Falls back to Python shell if unavailable"""
self.run('python')
- @args('--shell', dest="shell", metavar='<bpython|ipython|python >',
- help='Python shell')
+ @args('--shell', dest="shell",
+ metavar='<bpython|ipython|python >',
+ help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
class HostCommands(object):
- """List hosts"""
+ """List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
def __init__(self):
pass
- @args('--version', dest='version', metavar='<version>',
- help='Database version')
+ @args('--version', dest='version',
+ metavar='<version>',
+ help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
pass
def list(self):
- print _("%(version)s (%(vcs)s)") % \
- {'version': version.version_string(),
- 'vcs': version.version_string_with_vcs()}
+ print(
+ _("%(version)s (%(vcs)s)") %
+ {'version': version.version_string(),
+ 'vcs': version.version_string_with_vcs()})
def __call__(self):
self.list()
columns = table.columns.keys()
for row in src.query(table).all():
data = dict([(str(column), getattr(row, column))
- for column in columns])
+ for column in columns])
dest.add(new_row(**data))
dest.commit()
for row in src.query(table).all():
if row.resource == 'gigabytes' or row.resource == 'volumes':
data = dict([(str(column), getattr(row, column))
- for column in columns])
+ for column in columns])
dest.add(new_row(**data))
dest.commit()
dest_db = '%s/cinder' % dest_db
self._import_db(src_db, dest_db, backup_db)
- @args('--src', dest='src_tgts', metavar='<src tgts>',
- help='[login@src_host:]/opt/stack/nova/volumes/')
- @args('--dest', dest='dest_tgts', metavar='<dest tgts>',
- help='[login@src_host:/opt/stack/cinder/volumes/]')
+ @args('--src',
+ dest='src_tgts',
+ metavar='<src tgts>',
+ help='[login@src_host:]/opt/stack/nova/volumes/')
+ @args('--dest',
+ dest='dest_tgts',
+ metavar='<dest tgts>',
+ help='[login@src_host:/opt/stack/cinder/volumes/]')
def copy_ptgt_files(self, src_tgts, dest_tgts=None):
"""Copy persistent scsi tgt files from nova to cinder.
class VolumeCommands(object):
- """Methods for dealing with a cloud in an odd state"""
+ """Methods for dealing with a cloud in an odd state."""
- @args('--volume', dest='volume_id', metavar='<volume id>',
- help='Volume ID')
+ @args('--volume',
+ dest='volume_id',
+ metavar='<volume id>',
+ help='Volume ID')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it
must be available."""
{"method": "delete_volume",
"args": {"volume_id": volume['id']}})
- @args('--volume', dest='volume_id', metavar='<volume id>',
- help='Volume ID')
+ @args('--volume',
+ dest='volume_id',
+ metavar='<volume id>',
+ help='Volume ID')
def reattach(self, volume_id):
"""Re-attach a volume that has previously been attached
to an instance. Typically called after a compute host
class StorageManagerCommands(object):
- """Class for mangaging Storage Backends and Flavors"""
+ """Class for mangaging Storage Backends and Flavors."""
def flavor_list(self, flavor=None):
ctxt = context.get_admin_context()
for flav in flavors:
print "%-18s\t%-20s\t%s" % (
- flav['id'],
- flav['label'],
- flav['description'])
+ flav['id'],
+ flav['label'],
+ flav['description'])
def flavor_create(self, label, desc):
# TODO(renukaapte) flavor name must be unique
sys.exit(2)
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
- _('Flavor id'),
- _('SR UUID'),
- _('SR Type'),
- _('Config Parameters'),)
+ _('Flavor id'),
+ _('SR UUID'),
+ _('SR Type'),
+ _('Config Parameters'),)
for b in backends:
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
print "error: %s" % ex
sys.exit(2)
- config_params = " ".join(['%s=%s' %
- (key, params[key]) for key in params])
+ config_params = " ".join(
+ ['%s=%s' % (key, params[key]) for key in params])
if 'sr_uuid' in params:
sr_uuid = params['sr_uuid']
c = raw_input('Proceed? (y/n) ')
if c == 'y' or c == 'Y':
try:
- db.sm_backend_conf_update(ctxt, backend['id'],
- dict(created=False,
- flavor_id=flavors['id'],
- sr_type=sr_type,
- config_params=config_params))
+ db.sm_backend_conf_update(
+ ctxt, backend['id'],
+ dict(created=False,
+ flavor_id=flavors['id'],
+ sr_type=sr_type,
+ config_params=config_params))
except exception.DBError, e:
_db_error(e)
return
class GetLogCommands(object):
- """Get logging information"""
+ """Get logging information."""
def errors(self):
- """Get all of the errors from the log files"""
+ """Get all of the errors from the log files."""
error_found = 0
if FLAGS.logdir:
logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
print "No errors in logfiles!"
def syslog(self, num_entries=10):
- """Get <num_entries> of the cinder syslog events"""
+ """Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
script_name = argv.pop(0)
if len(argv) < 1:
print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
- {'version': version.version_string(),
- 'vcs': version.version_string_with_vcs()}
+ {'version': version.version_string(),
+ 'vcs': version.version_string_with_vcs()}
print script_name + " category action [<args>]"
print _("Available categories:")
for k, _v in CATEGORIES:
_collection_name = None
def _get_links(self, request, identifier):
- return [{
- "rel": "self",
- "href": self._get_href_link(request, identifier),
- },
- {
- "rel": "bookmark",
- "href": self._get_bookmark_link(request, identifier),
- }]
+ return [{"rel": "self",
+ "href": self._get_href_link(request, identifier), },
+ {"rel": "bookmark",
+ "href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier):
"""Return href string with proper limit and marker params."""
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
-authorize = extensions.soft_extension_authorizer('volume',
- 'extended_snapshot_attributes')
+authorize = extensions.soft_extension_authorizer(
+ 'volume',
+ 'extended_snapshot_attributes')
class ExtendedSnapshotAttributesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedSnapshotAttributesController, self).__init__(*args,
- **kwargs)
+ **kwargs)
self.volume_api = volume.API()
def _get_snapshots(self, context):
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
- return self._format_quota_set(
- id,
- QUOTAS.get_class_quotas(context, id)
- )
+ return self._format_quota_set(id,
+ QUOTAS.get_class_quotas(context, id))
@wsgi.serializers(xml=QuotaClassTemplate)
def update(self, req, id, body):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
- QuotaSetsController(),
- member_actions={'defaults': 'GET'})
+ QuotaSetsController(),
+ member_actions={'defaults': 'GET'})
resources.append(res)
return resources
def get_resources(self):
resources = []
res = extensions.ResourceExtension('extra_specs',
- VolumeTypeExtraSpecsController(),
- parent=dict(
- member_name='type',
- collection_name='types'))
+ VolumeTypeExtraSpecsController(),
+ parent=dict(member_name='type',
+ collection_name='types')
+ )
resources.append(res)
return resources
class VolumeTypesManageController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
+ """The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.action("delete")
def _delete(self, req, id):
- """ Deletes an existing volume type """
+ """Deletes an existing volume type."""
context = req.environ['cinder.context']
authorize(context)
class Types_manage(extensions.ExtensionDescriptor):
- """Types manage support"""
+ """Types manage support."""
name = "TypesManage"
alias = "os-types-manage"
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
- """Deserializer to handle xml-formatted requests"""
+ """Deserializer to handle xml-formatted requests."""
def default(self, string):
dom = minidom.parseString(string)
action_node = dom.childNodes[0]
from cinder import wsgi as base_wsgi
-use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
- default=False,
- help='Treat X-Forwarded-For as the canonical remote address. '
- 'Only enable this if you have a sanitizing proxy.')
+use_forwarded_for_opt = cfg.BoolOpt(
+ 'use_forwarded_for',
+ default=False,
+ help='Treat X-Forwarded-For as the canonical remote address. '
+ 'Only enable this if you have a sanitizing proxy.')
FLAGS = flags.FLAGS
FLAGS.register_opt(use_forwarded_for_opt)
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
- status, webob.exc.HTTPInternalServerError)()
+ status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
- routes.Mapper.resource(self, member_name,
- collection_name,
- **kwargs)
+ routes.Mapper.resource(self,
+ member_name,
+ collection_name,
+ **kwargs)
class APIRouter(base_wsgi.Router):
"""The hosts admin extension."""
import webob.exc
-from xml.dom import minidom
-from xml.parsers import expat
from cinder.api.openstack import extensions
from cinder.api.openstack import wsgi
from cinder.api.openstack import xmlutil
-from cinder.volume import api as volume_api
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
+from cinder.volume import api as volume_api
+from xml.dom import minidom
+from xml.parsers import expat
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
- result = self.api.set_host_enabled(context, host=host,
- enabled=enabled)
+ result = self.api.set_host_enabled(context,
+ host=host,
+ enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
context,
project_id)
- resources.append({'resource':
- {'host': host,
- 'project': project_id,
- 'volume_count': str(count),
- 'total_volume_gb': str(sum),
- 'snapshot_count': str(snap_count),
- 'total_snapshot_gb': str(snap_sum)}})
+ resources.append(
+ {'resource':
+ {'host': host,
+ 'project': project_id,
+ 'volume_count': str(count),
+ 'total_volume_gb': str(sum),
+ 'snapshot_count': str(snap_count),
+ 'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
- HostController(),
- collection_actions={'update': 'PUT'},
- member_actions={"startup": "GET", "shutdown": "GET",
- "reboot": "GET"})]
+ HostController(),
+ collection_actions={
+ 'update': 'PUT'},
+ member_actions={
+ 'startup': 'GET',
+ 'shutdown': 'GET',
+ 'reboot': 'GET'})]
return resources
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['cinder.best_content_type'] = (content_type or
- 'application/json')
+ 'application/json')
return self.environ['cinder.best_content_type']
code=ex_value.code, explanation=unicode(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
- LOG.error(_('Exception handling resource: %s') % ex_value,
- exc_info=exc_info)
+ LOG.error(_(
+ 'Exception handling resource: %s') %
+ ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
- action not in ['action', 'create', 'delete']):
+ action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
- _fault_names = {
- 400: "badRequest",
- 401: "unauthorized",
- 403: "forbidden",
- 404: "itemNotFound",
- 405: "badMethod",
- 409: "conflictingRequest",
- 413: "overLimit",
- 415: "badMediaType",
- 501: "notImplemented",
- 503: "serviceUnavailable"}
+ _fault_names = {400: "badRequest",
+ 401: "unauthorized",
+ 403: "forbidden",
+ 404: "itemNotFound",
+ 405: "badMethod",
+ 409: "conflictingRequest",
+ 413: "overLimit",
+ 415: "badMediaType",
+ 501: "notImplemented",
+ 503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
-_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
- r'(?:=\s*([^;]+|%s))?\s*' %
+_option_header_piece_re = re.compile(
+ r';\s*([^\s;=]+|%s)\s*'
+ r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
- if (path_info == app_url
- or path_info.startswith(app_url + '/')):
+ if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
- host, port, environ, supported_content_types)
+ host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
- controller=self.resources['versions'],
- action='show')
+ controller=self.resources['versions'],
+ action='show')
mapper.redirect("", "/")
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
- new_snapshot = self.volume_api.create_snapshot_force(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
+ new_snapshot = self.volume_api.create_snapshot_force(
+ context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
else:
- new_snapshot = self.volume_api.create_snapshot(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
+ new_snapshot = self.volume_api.create_snapshot(
+ context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
# License for the specific language governing permissions and limitations
# under the License.
-""" The volume type & volume types extra specs extension"""
+"""The volume type & volume types extra specs extension."""
from webob import exc
class VolumeTypesController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
+ """The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.serializers(xml=VolumeTypesTemplate)
def index(self, req):
- """ Returns the list of volume types """
+ """Returns the list of volume types."""
context = req.environ['cinder.context']
vol_types = volume_types.get_all_types(context).values()
return self._view_builder.index(req, vol_types)
@wsgi.serializers(xml=VolumeTypeTemplate)
def show(self, req, id):
- """ Return a single volume type item """
+ """Return a single volume type item."""
context = req.environ['cinder.context']
try:
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
- context, req_volume_type)
+ context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
- if opt not in allowed_search_options]
+ if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
- controller=self.resources['versions'],
- action='show')
+ controller=self.resources['versions'],
+ action='show')
mapper.redirect("", "/")
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
- new_snapshot = self.volume_api.create_snapshot_force(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
+ new_snapshot = self.volume_api.create_snapshot_force(
+ context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
else:
- new_snapshot = self.volume_api.create_snapshot(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
+ new_snapshot = self.volume_api.create_snapshot(
+ context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
# License for the specific language governing permissions and limitations
# under the License.
-""" The volume type & volume types extra specs extension"""
+"""The volume type & volume types extra specs extension."""
from webob import exc
class VolumeTypesController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
+ """The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.serializers(xml=VolumeTypesTemplate)
def index(self, req):
- """ Returns the list of volume types """
+ """Returns the list of volume types."""
context = req.environ['cinder.context']
vol_types = volume_types.get_all_types(context).values()
return self._view_builder.index(req, vol_types)
@wsgi.serializers(xml=VolumeTypeTemplate)
def show(self, req, id):
- """ Return a single volume type item """
+ """Return a single volume type item."""
context = req.environ['cinder.context']
try:
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
- context, req_volume_type)
+ context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
- if opt not in allowed_search_options]
+ if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)
def show(self, request, volume_type, brief=False):
"""Trim away extraneous volume type attributes."""
trimmed = dict(id=volume_type.get('id'),
- name=volume_type.get('name'),
- extra_specs=volume_type.get('extra_specs'))
+ name=volume_type.get('name'),
+ extra_specs=volume_type.get('extra_specs'))
return trimmed if brief else dict(volume_type=trimmed)
def index(self, request, volume_types):
version_objs.append({
"id": version['id'],
"status": version['status'],
- "links": [
- {
- "rel": "self",
- "href": self.generate_href(req.path),
- },
- ],
- "media-types": version['media-types'],
- })
+ "links": [{"rel": "self",
+ "href": self.generate_href(req.path), }, ],
+ "media-types": version['media-types'], })
return dict(choices=version_objs)
"id": version['id'],
"status": version['status'],
"updated": version['updated'],
- "links": self._build_links(version),
- })
+ "links": self._build_links(version), })
return dict(versions=version_objs)
reval = copy.deepcopy(version)
reval['links'].insert(0, {
"rel": "self",
- "href": self.base_url.rstrip('/') + '/',
- })
+ "href": self.base_url.rstrip('/') + '/', })
return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href()
- links = [
- {
- "rel": "self",
- "href": href,
- },
- ]
+ links = [{'rel': 'self',
+ 'href': href, }, ]
return links
deprecate_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
- help='make deprecations fatal')
- ]
+ help='make deprecations fatal')]
FLAGS = flags.FLAGS
FLAGS.register_opts(deprecate_opts)
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
- str(kwargs))
+ str(kwargs))
self.user_id = user_id
self.project_id = project_id
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
- help='Template string to be used to generate snapshot names'),
- ]
+ help='Template string to be used to generate snapshot names'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
- return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
- status)
+ return IMPL.migration_get_by_instance_and_status(context,
+ instance_uuid,
+ status)
def migration_get_all_unconfirmed(context, confirm_window):
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs):
+def volume_type_extra_specs_update_or_create(context,
+ volume_type_id,
+ extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
- IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs)
+ IMPL.volume_type_extra_specs_update_or_create(context,
+ volume_type_id,
+ extra_specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
- return IMPL.volume_glance_metadata_create(context, volume_id,
- key, value)
+ return IMPL.volume_glance_metadata_create(context,
+ volume_id,
+ key,
+ value)
def volume_glance_metadata_get(context, volume_id):
"""Implementation of SQLAlchemy backend."""
import datetime
-import functools
import uuid
import warnings
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
-from sqlalchemy.orm import joinedload_all
-from sqlalchemy.sql.expression import asc
-from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import func
query = query.filter_by(deleted=True)
else:
raise Exception(
- _("Unrecognized read_deleted value '%s'") % read_deleted)
+ _("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
@require_admin_context
def service_get(context, service_id, session=None):
- result = model_query(context, models.Service, session=session).\
- filter_by(id=service_id).\
- first()
+ result = model_query(
+ context,
+ models.Service,
+ session=session).\
+ filter_by(id=service_id).\
+ first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
@require_admin_context
def service_get_all_by_topic(context, topic):
- return model_query(context, models.Service, read_deleted="no").\
- filter_by(disabled=False).\
- filter_by(topic=topic).\
- all()
+ return model_query(
+ context, models.Service, read_deleted="no").\
+ filter_by(disabled=False).\
+ filter_by(topic=topic).\
+ all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
- result = model_query(context, models.Service, read_deleted="no").\
- filter_by(disabled=False).\
- filter_by(host=host).\
- filter_by(topic=topic).\
- first()
+ result = model_query(
+ context, models.Service, read_deleted="no").\
+ filter_by(disabled=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ first()
if not result:
raise exception.ServiceNotFound(host=host, topic=topic)
return result
@require_admin_context
def service_get_all_by_host(context, host):
- return model_query(context, models.Service, read_deleted="no").\
- filter_by(host=host).\
- all()
+ return model_query(
+ context, models.Service, read_deleted="no").\
+ filter_by(host=host).\
+ all()
@require_admin_context
return model_query(context, models.Service,
func.coalesce(sort_value, 0),
session=session, read_deleted="no").\
- filter_by(topic=topic).\
- filter_by(disabled=False).\
- outerjoin((subq, models.Service.host == subq.c.host)).\
- order_by(sort_value).\
- all()
+ filter_by(topic=topic).\
+ filter_by(disabled=False).\
+ outerjoin((subq, models.Service.host == subq.c.host)).\
+ order_by(sort_value).\
+ all()
@require_admin_context
subq = model_query(context, models.Volume.host,
func.sum(models.Volume.size).label(label),
session=session, read_deleted="no").\
- group_by(models.Volume.host).\
- subquery()
+ group_by(models.Volume.host).\
+ subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
- filter_by(host=host).\
- filter_by(binary=binary).\
- first()
+ filter_by(host=host).\
+ filter_by(binary=binary).\
+ first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
@require_admin_context
def iscsi_target_count_by_host(context, host):
return model_query(context, models.IscsiTarget).\
- filter_by(host=host).\
- count()
+ filter_by(host=host).\
+ count()
@require_admin_context
def quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(resource=resource).\
- first()
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
result = {'project_id': project_id}
for row in rows:
def quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
- filter_by(class_name=class_name).\
- filter_by(resource=resource).\
- first()
+ filter_by(class_name=class_name).\
+ filter_by(resource=resource).\
+ first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
- filter_by(class_name=class_name).\
- all()
+ filter_by(class_name=class_name).\
+ all()
result = {'class_name': class_name}
for row in rows:
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
- filter_by(class_name=class_name).\
- all()
+ filter_by(class_name=class_name).\
+ all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
def quota_usage_get(context, project_id, resource, session=None):
result = model_query(context, models.QuotaUsage, session=session,
read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(resource=resource).\
- first()
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
result = {'project_id': project_id}
for row in rows:
def reservation_get(context, uuid, session=None):
result = model_query(context, models.Reservation, session=session,
read_deleted="no").\
- filter_by(uuid=uuid).\
- first()
+ filter_by(uuid=uuid).first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).all()
result = {'project_id': project_id}
for row in rows:
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
- filter_by(project_id=context.project_id).\
- with_lockmode('update').\
- all()
+ filter_by(project_id=context.project_id).\
+ with_lockmode('update').\
+ all()
return dict((row.resource, row) for row in rows)
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
- filter(models.Reservation.uuid.in_(reservations)).\
- with_lockmode('update').\
- all()
+ filter(models.Reservation.uuid.in_(reservations)).\
+ with_lockmode('update').\
+ all()
@require_context
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
for quota_ref in quotas:
quota_ref.delete(session=session)
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
- filter(models.Reservation.expire < current_time).\
- all()
+ filter(models.Reservation.expire < current_time).\
+ all()
if results:
for reservation in results:
with session.begin():
iscsi_target_ref = model_query(context, models.IscsiTarget,
session=session, read_deleted="no").\
- filter_by(volume=None).\
- filter_by(host=host).\
- with_lockmode('update').\
- first()
+ filter_by(volume=None).\
+ filter_by(host=host).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
- filter_by(host=host).\
- first()
+ filter_by(host=host).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
- filter_by(project_id=project_id).\
- first()
+ filter_by(project_id=project_id).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
session = get_session()
with session.begin():
session.query(models.Volume).\
- filter_by(id=volume_id).\
- update({'status': 'deleted',
- 'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(id=volume_id).\
+ update({'status': 'deleted',
+ 'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
- filter_by(volume_id=volume_id).\
- update({'volume_id': None})
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
session.query(models.VolumeMetadata).\
- filter_by(volume_id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(volume_id=volume_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_admin_context
def _volume_get_query(context, session=None, project_only=False):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type'))
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type'))
@require_context
def volume_get(context, volume_id, session=None):
result = _volume_get_query(context, session=session, project_only=True).\
- filter_by(id=volume_id).\
- first()
+ filter_by(id=volume_id).\
+ first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@require_admin_context
def volume_get_all_by_instance_uuid(context, instance_uuid):
result = model_query(context, models.Volume, read_deleted="no").\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type')).\
- filter_by(instance_uuid=instance_uuid).\
- all()
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
+ filter_by(instance_uuid=instance_uuid).\
+ all()
if not result:
return []
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
- filter_by(volume_id=volume_id).\
- first()
+ filter_by(volume_id=volume_id).\
+ first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
def _volume_metadata_get_query(context, volume_id, session=None):
return model_query(context, models.VolumeMetadata,
session=session, read_deleted="no").\
- filter_by(volume_id=volume_id)
+ filter_by(volume_id=volume_id)
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key, session=None):
result = _volume_metadata_get_query(context, volume_id, session=session).\
- filter_by(key=key).\
- first()
+ filter_by(key=key).\
+ first()
if not result:
raise exception.VolumeMetadataNotFound(metadata_key=key,
session = get_session()
with session.begin():
session.query(models.Snapshot).\
- filter_by(id=snapshot_id).\
- update({'status': 'deleted',
- 'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(id=snapshot_id).\
+ update({'status': 'deleted',
+ 'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
def snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
- filter_by(id=snapshot_id).\
- first()
+ filter_by(id=snapshot_id).\
+ first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
- filter_by(volume_id=volume_id).all()
+ filter_by(volume_id=volume_id).all()
@require_context
def snapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Snapshot).\
- filter_by(project_id=project_id).\
- all()
+ filter_by(project_id=project_id).\
+ all()
@require_context
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
- filter_by(project_id=project_id).\
- first()
+ filter_by(project_id=project_id).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
def migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
- filter_by(id=id).\
- first()
+ filter_by(id=id).\
+ first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
- filter_by(instance_uuid=instance_uuid).\
- filter_by(status=status).\
- first()
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(status=status).\
+ first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
@require_admin_context
def migration_get_all_unconfirmed(context, confirm_window, session=None):
confirm_window = timeutils.utcnow() - datetime.timedelta(
- seconds=confirm_window)
+ seconds=confirm_window)
return model_query(context, models.Migration, session=session,
read_deleted="yes").\
- filter(models.Migration.updated_at <= confirm_window).\
- filter_by(status="finished").\
- all()
+ filter(models.Migration.updated_at <= confirm_window).\
+ filter_by(status="finished").\
+ all()
##################
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.VolumeTypes,
read_deleted=read_deleted).\
- options(joinedload('extra_specs')).\
- order_by("name").\
- all()
+ options(joinedload('extra_specs')).\
+ order_by("name").\
+ all()
# TODO(sirp): this patern of converting rows to a result with extra_specs
# is repeated quite a bit, might be worth creating a method for it
def volume_type_get(context, id, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(id=id).\
- first()
+ options(joinedload('extra_specs')).\
+ filter_by(id=id).\
+ first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
def volume_type_get_by_name(context, name, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(name=name).\
- first()
+ options(joinedload('extra_specs')).\
+ filter_by(name=name).\
+ first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
session=session)
volume_type_id = volume_type_ref['id']
session.query(models.VolumeTypes).\
- filter_by(id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(id=volume_type_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
session.query(models.VolumeTypeExtraSpecs).\
- filter_by(volume_type_id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
+ filter_by(volume_type_id=volume_type_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
-def volume_get_active_by_window(context, begin, end=None,
- project_id=None):
+def volume_get_active_by_window(context,
+ begin,
+ end=None,
+ project_id=None):
"""Return volumes that were active during window."""
session = get_session()
query = session.query(models.Volume)
- query = query.filter(or_(models.Volume.deleted_at == None,
+ query = query.filter(or_(models.Volume.deleted_at is None,
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
- filter_by(volume_type_id=volume_type_id)
+ filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
- all()
+ all()
result = {}
for row in rows:
def volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
- context, volume_type_id, session=session).\
- filter_by(key=key).\
- first()
+ context, volume_type_id, session=session).\
+ filter_by(key=key).\
+ first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
- extra_specs_key=key, volume_type_id=volume_type_id)
+ extra_specs_key=key,
+ volume_type_id=volume_type_id)
return result
session = get_session()
return session.query(models.VolumeGlanceMetadata).\
- filter_by(volume_id=volume_id).\
- filter_by(deleted=False).all()
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).all()
@require_context
session = get_session()
return session.query(models.VolumeGlanceMetadata).\
- filter_by(snapshot_id=snapshot_id).\
- filter_by(deleted=False).all()
+ filter_by(snapshot_id=snapshot_id).\
+ filter_by(deleted=False).all()
@require_context
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
- filter_by(volume_id=volume_id).\
- filter_by(key=key).\
- filter_by(deleted=False).all()
+ filter_by(volume_id=volume_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
session = get_session()
metadata = volume_snapshot_glance_metadata_get(context, snapshot_id,
- session=session)
+ session=session)
with session.begin():
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
backend_conf = model_query(context, models.SMBackendConf,
session=session,
read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
+ filter_by(id=sm_backend_id).\
+ first()
if not backend_conf:
raise exception.NotFound(
with session.begin():
model_query(context, models.SMBackendConf, session=session,
read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- delete()
+ filter_by(id=sm_backend_id).\
+ delete()
@require_admin_context
def sm_backend_conf_get(context, sm_backend_id):
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
+ filter_by(id=sm_backend_id).\
+ first()
if not result:
raise exception.NotFound(_("No backend config with id "
@require_admin_context
def sm_backend_conf_get_by_sr(context, sr_uuid):
return model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(sr_uuid=sr_uuid).\
- first()
+ filter_by(sr_uuid=sr_uuid).\
+ first()
@require_admin_context
def sm_backend_conf_get_all(context):
return model_query(context, models.SMBackendConf, read_deleted="yes").\
- all()
+ all()
####################
def _sm_flavor_get_query(context, sm_flavor_label, session=None):
return model_query(context, models.SMFlavors, session=session,
read_deleted="yes").\
- filter_by(label=sm_flavor_label)
+ filter_by(label=sm_flavor_label)
@require_admin_context
if not result:
raise exception.NotFound(
- _("No sm_flavor called %(sm_flavor)s") % locals())
+ _("No sm_flavor called %(sm_flavor)s") % locals())
return result
def _sm_volume_get_query(context, volume_id, session=None):
return model_query(context, models.SMVolume, session=session,
read_deleted="yes").\
- filter_by(id=volume_id)
+ filter_by(id=volume_id)
def sm_volume_create(context, values):
if not result:
raise exception.NotFound(
- _("No sm_volume with id %(volume_id)s") % locals())
+ _("No sm_volume with id %(volume_id)s") % locals())
return result
# New table
quota_classes = Table('quota_classes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('class_name',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('hard_limit', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True,
+ name=None)),
+ Column('id', Integer(), primary_key=True),
+ Column('class_name',
+ String(length=255,
+ convert_unicode=True,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False),
+ index=True),
+ Column('resource',
+ String(length=255,
+ convert_unicode=True,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('hard_limit', Integer(), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
try:
quota_classes.create()
raise
quota_usages = Table('quota_usages', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('in_use', Integer(), nullable=False),
- Column('reserved', Integer(), nullable=False),
- Column('until_refresh', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True,
+ name=None)),
+ Column('id', Integer(), primary_key=True),
+ Column('project_id',
+ String(length=255, convert_unicode=True,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ index=True),
+ Column('resource',
+ String(length=255, convert_unicode=True,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('in_use', Integer(), nullable=False),
+ Column('reserved', Integer(), nullable=False),
+ Column('until_refresh', Integer(), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
try:
quota_usages.create()
raise
reservations = Table('reservations', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('uuid',
- String(length=36, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), nullable=False),
- Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
- nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('delta', Integer(), nullable=False),
- Column('expire', DateTime(timezone=False)),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True,
+ name=None)),
+ Column('id', Integer(), primary_key=True),
+ Column('uuid',
+ String(length=36,
+ convert_unicode=True,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=False),
+ Column('usage_id',
+ Integer(),
+ ForeignKey('quota_usages.id'),
+ nullable=False),
+ Column('project_id',
+ String(length=255, convert_unicode=True,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ index=True),
+ Column('resource',
+ String(length=255, convert_unicode=True,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('delta', Integer(), nullable=False),
+ Column('expire', DateTime(timezone=False)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
try:
reservations.create()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of tables .
#
- volumes = Table('volumes', meta,
- Column('id', Integer(), primary_key=True, nullable=False),
- mysql_engine='InnoDB'
- )
- snapshots = Table('snapshots', meta,
- Column('id', Integer(), primary_key=True, nullable=False),
- mysql_engine='InnoDB'
- )
+ volumes = Table('volumes',
+ meta,
+ Column('id', Integer(),
+ primary_key=True, nullable=False),
+ mysql_engine='InnoDB')
+ snapshots = Table('snapshots',
+ meta,
+ Column('id', Integer(),
+ primary_key=True, nullable=False),
+ mysql_engine='InnoDB')
# Create new table
- volume_glance_metadata = Table('volume_glance_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('volume_id', String(length=36), ForeignKey('volumes.id')),
- Column('snapshot_id', String(length=36),
- ForeignKey('snapshots.id')),
- Column('key', String(255)),
- Column('value', Text),
- mysql_engine='InnoDB'
+ volume_glance_metadata = Table(
+ 'volume_glance_metadata',
+ meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), ForeignKey('volumes.id')),
+ Column('snapshot_id', String(length=36),
+ ForeignKey('snapshots.id')),
+ Column('key', String(255)),
+ Column('value', Text),
+ mysql_engine='InnoDB'
)
try:
return n, getattr(self, n)
def update(self, values):
- """Make the model object behave like a dict"""
+ """Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
class VolumeMetadata(BASE, CinderBase):
- """Represents a metadata key/value pair for a volume"""
+ """Represents a metadata key/value pair for a volume."""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
class VolumeTypes(BASE, CinderBase):
- """Represent possible volume_types of volumes offered"""
+ """Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
class VolumeTypeExtraSpecs(BASE, CinderBase):
- """Represents additional specs as key/value pairs for a volume_type"""
+ """Represents additional specs as key/value pairs for a volume_type."""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
class VolumeGlanceMetadata(BASE, CinderBase):
- """Glance metadata for a bootable volume"""
+ """Glance metadata for a bootable volume."""
__tablename__ = 'volume_glance_metadata'
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), ForeignKey('volumes.id'))
class IscsiTarget(BASE, CinderBase):
- """Represents an iscsi target for a given host"""
+ """Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})
_ENGINE.connect()
break
except OperationalError, e:
- if (remaining != 'infinite' and remaining == 0) or \
- not is_db_connection_error(e.args[0]):
+ if ((remaining != 'infinite' and remaining == 0) or
+ not is_db_connection_error(e.args[0])):
raise
return _ENGINE
help='Directory where cinder binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
- help="Top-level directory for maintaining cinder's state"),
- ]
+ help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
help='A list of the glance api servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_num_retries',
- default=0,
- help='Number retries when downloading an image from glance'),
+ default=0,
+ help='Number retries when downloading an image from glance'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='the topic scheduler nodes listen on'),
default=60,
help='maximum time since last check-in for up service'),
cfg.StrOpt('volume_api_class',
- default='cinder.volume.api.API',
- help='The full class name of the volume API class to use'),
+ default='cinder.volume.api.API',
+ help='The full class name of the volume API class to use'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth. Supports noauth, keystone, '
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
cfg.BoolOpt('secure_delete',
default=True,
- help='Whether to perform secure delete'),
-]
+ help='Whether to perform secure delete'), ]
FLAGS.register_opts(global_opts)
retry the request according to FLAGS.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
- glanceclient.exc.InvalidEndpoint,
- glanceclient.exc.CommunicationError)
+ glanceclient.exc.InvalidEndpoint,
+ glanceclient.exc.CommunicationError)
num_attempts = 1 + FLAGS.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
port = self.port
extra = "retrying"
error_msg = _("Error contacting glance server "
- "'%(host)s:%(port)s' for '%(method)s', %(extra)s.")
+ "'%(host)s:%(port)s' for '%(method)s', "
+ "%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, locals())
- raise exception.GlanceConnectionFailed(
- host=host, port=port, reason=str(e))
+ raise exception.GlanceConnectionFailed(host=host,
+ port=port,
+ reason=str(e))
LOG.exception(error_msg, locals())
time.sleep(1)
return self._translate_from_glance(recv_service_image_meta)
- def update(self, context, image_id, image_meta, data=None,
- purge_props=True):
+ def update(self, context, image_id,
+ image_meta, data=None, purge_props=True):
"""Modify the given image with the new data."""
image_meta = self._translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
- glanceclient.exc.Unauthorized)):
+ glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
- glanceclient.exc.Unauthorized)):
+ glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(exc_value)
try:
(image_id, glance_host, glance_port) = _parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
- host=glance_host, port=glance_port)
+ host=glance_host,
+ port=glance_port)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
- self.scheduler_rpcapi.update_service_capabilities(context,
- self.service_name, self.host, self.last_capabilities)
+ self.scheduler_rpcapi.update_service_capabilities(
+ context,
+ self.service_name,
+ self.host,
+ self.last_capabilities)
help=_('JSON file representing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
- help=_('Rule checked when requested rule is not found')),
- ]
+ help=_('Rule checked when requested rule is not found')), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(policy_opts)
help='number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='cinder.quota.DbQuotaDriver',
- help='default driver to use for quota checks'),
- ]
+ help='default driver to use for quota checks'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(quota_opts)
continue
quotas[resource.name] = dict(
- limit=project_quotas.get(resource.name, class_quotas.get(
- resource.name, resource.default)),
- )
+ limit=project_quotas.get(resource.name,
+ class_quotas.get(resource.name,
+ resource.default)), )
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
- reserved=usage.get('reserved', 0),
- )
+ reserved=usage.get('reserved', 0), )
return quotas
"""
return self._driver.get_project_quotas(context, self._resources,
- project_id,
- quota_class=quota_class,
- defaults=defaults,
- usages=usages)
+ project_id,
+ quota_class=quota_class,
+ defaults=defaults,
+ usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
def _sync_instances(context, project_id, session):
return dict(zip(('instances', 'cores', 'ram'),
- db.instance_data_get_for_project(
- context, project_id, session=session)))
+ db.instance_data_get_for_project(context,
+ project_id,
+ session=session)))
def _sync_volumes(context, project_id, session):
return dict(zip(('volumes', 'gigabytes'),
- db.volume_data_get_for_project(
- context, project_id, session=session)))
+ db.volume_data_get_for_project(context,
+ project_id,
+ session=session)))
QUOTAS = QuotaEngine()
resources = [
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
- ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
- ]
+ ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
QUOTAS.register_resources(resources)
updated_volume = driver.volume_update_db(context, volume_id, host)
self.volume_rpcapi.create_volume(context, updated_volume, host,
- snapshot_id, image_id)
+ snapshot_id, image_id)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='cinder.scheduler.host_manager.HostManager',
- help='The scheduler host manager class to use'),
- ]
+ help='The scheduler host manager class to use'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(scheduler_driver_opts)
def __init__(self):
self.host_manager = importutils.import_object(
- FLAGS.scheduler_host_manager)
+ FLAGS.scheduler_host_manager)
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def get_host_list(self):
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
- host, capabilities)
+ host,
+ capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
LOG = logging.getLogger(__name__)
-scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
- default='cinder.scheduler.simple.SimpleScheduler',
- help='Default driver to use for the scheduler')
+scheduler_driver_opt = cfg.StrOpt(
+ 'scheduler_driver',
+ default='cinder.scheduler.simple.SimpleScheduler',
+ help='Default driver to use for the scheduler')
FLAGS = flags.FLAGS
FLAGS.register_opt(scheduler_driver_opt)
class SchedulerManager(manager.Manager):
- """Chooses a host to create volumes"""
+ """Chooses a host to create volumes."""
RPC_API_VERSION = '1.2'
return self.driver.get_service_capabilities()
def update_service_capabilities(self, context, service_name=None,
- host=None, capabilities=None, **kwargs):
+ host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
- self.driver.update_service_capabilities(service_name, host,
- capabilities)
+ self.driver.update_service_capabilities(service_name,
+ host,
+ capabilities)
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
volume_properties = {'size': size,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id}
- request_spec.update({'volume_id': volume_id,
- 'snapshot_id': snapshot_id,
- 'image_id': image_id,
- 'volume_properties': volume_properties,
- 'volume_type': dict(vol_type).iteritems()})
+ request_spec.update(
+ {'volume_id': volume_id,
+ 'snapshot_id': snapshot_id,
+ 'image_id': image_id,
+ 'volume_properties': volume_properties,
+ 'volume_type': dict(vol_type).iteritems()})
self.driver.schedule_create_volume(context, request_spec,
filter_properties)
RPC_API_VERSION = '1.0'
def __init__(self):
- super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
- default_version=self.RPC_API_VERSION)
+ super(SchedulerAPI, self).__init__(
+ topic=FLAGS.scheduler_topic,
+ default_version=self.RPC_API_VERSION)
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
- return self.cast(ctxt, self.make_msg('create_volume',
- topic=topic,
- volume_id=volume_id,
- snapshot_id=snapshot_id,
- image_id=image_id,
- request_spec=request_spec,
- filter_properties=filter_properties),
- version='1.2')
-
- def update_service_capabilities(self, ctxt, service_name, host,
- capabilities):
+ return self.cast(ctxt, self.make_msg(
+ 'create_volume',
+ topic=topic,
+ volume_id=volume_id,
+ snapshot_id=snapshot_id,
+ image_id=image_id,
+ request_spec=request_spec,
+ filter_properties=filter_properties),
+ version='1.2')
+
+ def update_service_capabilities(self, ctxt,
+ service_name, host,
+ capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
- service_name=service_name, host=host,
- capabilities=capabilities))
+ service_name=service_name, host=host,
+ capabilities=capabilities))
simple_scheduler_opts = [
cfg.IntOpt("max_gigabytes",
default=10000,
- help="maximum number of volume gigabytes to allow per host"),
- ]
+ help="maximum number of volume gigabytes to allow per host"), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(simple_scheduler_opts)
if not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
updated_volume = driver.volume_update_db(context, volume_id, host)
- self.volume_rpcapi.create_volume(context, updated_volume,
- host,
- snapshot_id,
- image_id)
+ self.volume_rpcapi.create_volume(context,
+ updated_volume,
+ host,
+ snapshot_id,
+ image_id)
return None
results = db.service_get_all_volume_sorted(elevated)
if utils.service_is_up(service) and not service['disabled']:
updated_volume = driver.volume_update_db(context, volume_id,
service['host'])
- self.volume_rpcapi.create_volume(context, updated_volume,
- service['host'],
- snapshot_id,
- image_id)
+ self.volume_rpcapi.create_volume(context,
+ updated_volume,
+ service['host'],
+ snapshot_id,
+ image_id)
return None
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
help='IP address for OpenStack Volume API to listen'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
- help='port for os volume api to listen'),
- ]
+ help='port for os volume api to listen'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(service_opts)
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
- self.service_id, state_catalog)
+ self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
- help='should we use everything for testing'),
- ]
+ help='should we use everything for testing'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
"""
def __init__(self, stream):
import win32console as win
- red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
- win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
+ red, green, blue, bold = (win.FOREGROUND_RED,
+ win.FOREGROUND_GREEN,
+ win.FOREGROUND_BLUE,
+ win.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
self._colors = {
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
- 'white': red | green | blue | bold
- }
+ 'white': red | green | blue | bold}
def supported(cls, stream=sys.stdout):
try:
def _makeResult(self):
return CinderTestResult(self.stream,
- self.descriptions,
- self.verbosity,
- self.config,
- show_elapsed=self.show_elapsed)
+ self.descriptions,
+ self.verbosity,
+ self.config,
+ show_elapsed=self.show_elapsed)
def _writeSlowTests(self, result_):
# Pare out 'fast' tests
plugins=core.DefaultPluginManager())
runner = CinderTestRunner(stream=c.stream,
- verbosity=c.verbosity,
- config=c,
- show_elapsed=not hide_elapsed)
+ verbosity=c.verbosity,
+ config=c,
+ show_elapsed=not hide_elapsed)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
def _get_default_snapshot_param():
- return {
- 'id': UUID1,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- 'project_id': 'fake',
- 'progress': '0%'
- }
+ return {'id': UUID1,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': None,
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ 'project_id': 'fake',
+ 'progress': '0%'}
def fake_snapshot_get(self, context, snapshot_id):
def assertSnapshotAttributes(self, snapshot, project_id, progress):
self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
- project_id)
+ project_id)
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
def test_show(self):
self.assertEqual(res.status_int, 200)
self.assertSnapshotAttributes(self._get_snapshot(res.body),
- project_id='fake',
- progress='0%')
+ project_id='fake',
+ progress='0%')
def test_detail(self):
url = '/v1/fake/snapshots/detail'
self.assertEqual(res.status_int, 200)
for i, snapshot in enumerate(self._get_snapshots(res.body)):
self.assertSnapshotAttributes(snapshot,
- project_id='fake',
- progress='0%')
+ project_id='fake',
+ progress='0%')
def test_no_instance_passthrough_404(self):
app = fakes.wsgi_app()
for _action in self._actions:
req = webob.Request.blank('/v1/fake/volumes/%s/action' %
- self.UUID)
+ self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({_action: None})
req.content_type = 'application/json'
req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
res_dict = self.controller._volume_upload_image(req, id, body)
expected = {'os-volume_upload_image': {'id': id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'status': 'uploading',
- 'display_description': 'displaydesc',
- 'size': 1,
- 'volume_type': {'name': 'vol_type_name'},
- 'image_id': 1,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'image_name': 'image_name'}}
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'status': 'uploading',
+ 'display_description': 'displaydesc',
+ 'size': 1,
+ 'volume_type': {'name': 'vol_type_name'},
+ 'image_id': 1,
+ 'container_format': 'bare',
+ 'disk_format': 'raw',
+ 'image_name': 'image_name'}}
self.assertDictMatch(res_dict, expected)
def test_copy_volume_to_image_volumenotfound(self):
def test_copy_volume_to_image_invalidvolume(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
+ metadata, force):
raise exception.InvalidVolume
self.stubs.Set(volume_api.API,
"copy_volume_to_image",
def test_copy_volume_to_image_valueerror(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
+ metadata, force):
raise ValueError
self.stubs.Set(volume_api.API,
"copy_volume_to_image",
def test_copy_volume_to_image_remoteerror(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
+ metadata, force):
raise rpc_common.RemoteError
self.stubs.Set(volume_api.API,
"copy_volume_to_image",
res = req.get_response(app())
vol = etree.XML(res.body)
tenant_key = ('{http://docs.openstack.org/volume/ext/'
- 'volume_tenant_attribute/api/v1}tenant_id')
+ 'volume_tenant_attribute/api/v1}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
def test_list_volumes_detail_xml(self):
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
tenant_key = ('{http://docs.openstack.org/volume/ext/'
- 'volume_tenant_attribute/api/v1}tenant_id')
+ 'volume_tenant_attribute/api/v1}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
- FoxInSocksController())
+ FoxInSocksController())
resources.append(resource)
return resources
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
- (FoxInSocksFlavorBandsControllerExtension, 'flavors'),
- ]
+ (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
- headers={'Retry-After': 4}))
+ headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
- use_no_auth=False, ext_mgr=None):
+ use_no_auth=False, ext_mgr=None):
if not inner_app_v1:
inner_app_v1 = router.APIRouter(ext_mgr)
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v1 = fault.FaultWrapper(auth.InjectContext(ctxt,
- inner_app_v1))
+ inner_app_v1))
elif use_no_auth:
api_v1 = fault.FaultWrapper(auth.NoAuthMiddleware(
- limits.RateLimitingMiddleware(inner_app_v1)))
+ limits.RateLimitingMiddleware(inner_app_v1)))
else:
api_v1 = fault.FaultWrapper(auth.AuthMiddleware(
- limits.RateLimitingMiddleware(inner_app_v1)))
+ limits.RateLimitingMiddleware(inner_app_v1)))
mapper = urlmap.URLMap()
mapper['/v1'] = api_v1
kwargs['base_url'] = 'http://localhost/v1'
use_admin_context = kwargs.pop('use_admin_context', False)
out = webob.Request.blank(*args, **kwargs)
- out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake',
- is_admin=use_admin_context)
+ out.environ['cinder.context'] = FakeRequestContext(
+ 'fake_user',
+ 'fake',
+ is_admin=use_admin_context)
return out
def stub_snapshot(id, **kwargs):
- snapshot = {
- 'id': id,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- 'project_id': 'fake'
- }
+ snapshot = {'id': id,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': None,
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ 'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot
extended = ControllerExtended()
resource.register_actions(extended)
- self.assertEqual({
- 'fooAction': extended._action_foo,
- 'barAction': extended._action_bar,
- }, resource.wsgi_actions)
+ self.assertEqual({'fooAction': extended._action_foo,
+ 'barAction': extended._action_bar, },
+ resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
# License for the specific language governing permissions and limitations
# under the License.
-from lxml import etree
+import datetime
import webob.exc
from cinder.api.openstack.volume.contrib import hosts as os_hosts
from cinder import context
-import datetime
from cinder import db
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
+from lxml import etree
FLAGS = flags.FLAGS
curr_time = timeutils.utcnow()
SERVICE_LIST = [
- {'created_at': created_time, 'updated_at': curr_time,
- 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
- 'availability_zone': 'cinder'},
- {'created_at': created_time, 'updated_at': curr_time,
- 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
- 'availability_zone': 'cinder'},
- {'created_at': created_time, 'updated_at': curr_time,
- 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
- 'availability_zone': 'cinder'},
- {'created_at': created_time, 'updated_at': curr_time,
- 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
- 'availability_zone': 'cinder'}]
+ {'created_at': created_time, 'updated_at': curr_time,
+ 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+ 'availability_zone': 'cinder'},
+ {'created_at': created_time, 'updated_at': curr_time,
+ 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+ 'availability_zone': 'cinder'},
+ {'created_at': created_time, 'updated_at': curr_time,
+ 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+ 'availability_zone': 'cinder'},
+ {'created_at': created_time, 'updated_at': curr_time,
+ 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+ 'availability_zone': 'cinder'}]
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
expected = [host for host in LIST_RESPONSE
- if host['service'] == 'cinder-volume']
+ if host['service'] == 'cinder-volume']
self.assertEqual(cinder_hosts, expected)
def test_list_hosts_with_zone(self):
def test_bad_status_value(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, 'test.host.1', body={'status': 'bad'})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, 'test.host.1', body={'status': 'disablabc'})
+ self.req, 'test.host.1', body={'status': 'bad'})
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update,
+ self.req,
+ 'test.host.1',
+ body={'status': 'disablabc'})
def test_bad_update_key(self):
bad_body = {'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, 'test.host.1', body=bad_body)
+ self.req, 'test.host.1', body=bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {'status': 'disable', 'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, 'test.host.1', body=bad_body)
+ self.req, 'test.host.1', body=bad_body)
def test_good_udpate_keys(self):
body = {'status': 'disable'}
self.req, 'test.host.1', body=body)
def test_bad_host(self):
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- self.req, 'bogus_host_name', body={'disabled': 0})
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ self.req,
+ 'bogus_host_name',
+ body={'disabled': 0})
def test_show_forbidden(self):
self.req.environ['cinder.context'].is_admin = False
def setUp(self):
super(ExtensionControllerTest, self).setUp()
- self.ext_list = [
- "TypesManage",
- "TypesExtraSpecs",
- ]
+ self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
self.ext_list.sort()
def test_list_extensions_json(self):
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
- self.assertEqual(fox_ext, {
- 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
- 'name': 'Fox In Socks',
- 'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'The Fox In Socks Extension',
- 'alias': 'FOXNSOX',
- 'links': []
- },
- )
+ self.assertEqual(
+ fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
+ 'name': 'Fox In Socks',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'The Fox In Socks Extension',
+ 'alias': 'FOXNSOX',
+ 'links': []}, )
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
- self.assertEqual(data['extension'], {
- "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
- "name": "Fox In Socks",
- "updated": "2011-01-22T13:25:27-06:00",
- "description": "The Fox In Socks Extension",
- "alias": "FOXNSOX",
- "links": []})
+ self.assertEqual(
+ data['extension'],
+ {"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
+ "name": "Fox In Socks",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "The Fox In Socks Extension",
+ "alias": "FOXNSOX",
+ "links": []})
def test_get_non_existing_extension_json(self):
app = router.APIRouter()
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
- self.assertEqual(fox_ext.get('namespace'),
+ self.assertEqual(
+ fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
+ self.assertEqual(
+ fox_ext.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extensions')
self.assertEqual(root.tag.split('extension')[0], NS)
self.assertEqual(root.get('alias'), 'FOXNSOX')
self.assertEqual(root.get('name'), 'Fox In Socks')
- self.assertEqual(root.get('namespace'),
+ self.assertEqual(
+ root.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(root.findtext('{0}description'.format(NS)),
+ self.assertEqual(
+ root.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extension')
class SelectorTest(test.TestCase):
- obj_for_test = {
- 'test': {
- 'name': 'test',
- 'values': [1, 2, 3],
- 'attrs': {
- 'foo': 1,
- 'bar': 2,
- 'baz': 3,
- },
- },
- }
+ obj_for_test = {'test': {'name': 'test',
+ 'values': [1, 2, 3],
+ 'attrs': {'foo': 1,
+ 'bar': 2,
+ 'baz': 3, }, }, }
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(elem), 0)
# Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
+ children = [xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
- children2 = [
- xmlutil.TemplateElement('child4'),
- xmlutil.TemplateElement('child1'),
- ]
+ children2 = [xmlutil.TemplateElement('child4'),
+ xmlutil.TemplateElement('child1'), ]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 0)
# Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
+ children = [xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
self.assertEqual(len(elem), 0)
# Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
+ children = [xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
- slave_elems = [
- xmlutil.TemplateElement('test', attr2=attrs['attr2']),
- xmlutil.TemplateElement('test', attr3=attrs['attr3']),
- ]
+ slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
+ xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
def test__serialize(self):
# Our test object to serialize
- obj = {
- 'test': {
- 'name': 'foobar',
- 'values': [1, 2, 3, 4],
- 'attrs': {
- 'a': 1,
- 'b': 2,
- 'c': 3,
- 'd': 4,
- },
- 'image': {
- 'name': 'image_foobar',
- 'id': 42,
- },
- },
- }
+ obj = {'test': {'name': 'foobar',
+ 'values': [1, 2, 3, 4],
+ 'attrs': {'a': 1,
+ 'b': 2,
+ 'c': 3,
+ 'd': 4, },
+ 'image': {'name': 'image_foobar', 'id': 42, }, }, }
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
},
],
- "absolute": {
- "maxTotalVolumeGigabytes": 512,
- "maxTotalVolumes": 5,
- },
+ "absolute": {"maxTotalVolumeGigabytes": 512,
+ "maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
"injected_file_content_bytes": 5}
def test_build_limits(self):
- expected_limits = {"limits": {
- "rate": [{
- "uri": "*",
- "regex": ".*",
- "limit": [{"value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-07-21T18:17:06Z"}]},
- {"uri": "*/volumes",
- "regex": "^/volumes",
- "limit": [{"value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-07-21T18:17:06Z"}]}],
- "absolute": {"maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 5}}}
+ tdate = "2011-07-21T18:17:06Z"
+ expected_limits = \
+ {"limits": {"rate": [{"uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": tdate}]},
+ {"uri": "*/volumes",
+ "regex": "^/volumes",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": tdate}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
serializer = limits.LimitsTemplate()
fixture = {
"limits": {
- "rate": [{
- "uri": "*",
- "regex": ".*",
- "limit": [{
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"}]},
- {"uri": "*/servers",
- "regex": "^/servers",
- "limit": [{
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-12-15T22:42:45Z"}]}],
- "absolute": {"maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 10240}}}
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-12-15T22:42:45Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
- self.assertEqual(limit.get(key),
- str(fixture['limits']['rate'][i]['limit'][j][key]))
+ self.assertEqual(
+ limit.get(key),
+ str(fixture['limits']['rate'][i]['limit'][j][key]))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
def _get_default_snapshot_param():
- return {
- 'id': UUID,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- }
+ return {'id': UUID,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': None,
+ 'display_name': 'Default name',
+ 'display_description': 'Default description', }
def stub_snapshot_create(self, context, volume_id, name, description):
self.stubs.Set(db, 'snapshot_get_all_by_project',
fakes.stub_snapshot_get_all_by_project)
self.stubs.Set(db, 'snapshot_get_all',
- fakes.stub_snapshot_get_all)
+ fakes.stub_snapshot_get_all)
def test_snapshot_create(self):
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
- "force": False,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
+ "force": False,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
- snapshot['display_name'])
+ snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
- snapshot['display_description'])
+ snapshot['display_description'])
def test_snapshot_create_force(self):
- self.stubs.Set(volume.api.API, "create_snapshot_force",
- stub_snapshot_create)
+ self.stubs.Set(volume.api.API,
+ "create_snapshot_force",
+ stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
- "force": True,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
+ "force": True,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
- snapshot['display_name'])
+ snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
- snapshot['display_description'])
+ snapshot['display_description'])
snapshot = {"volume_id": "12",
- "force": "**&&^^%%$$##@@",
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
+ "force": "**&&^^%%$$##@@",
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.assertRaises(exception.InvalidParameterValue,
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "update_snapshot",
fakes.stub_snapshot_update)
- updates = {
- "display_name": "Updated Test Name",
- }
+ updates = {"display_name": "Updated Test Name", }
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
snapshot_id)
def test_snapshot_detail(self):
- self.stubs.Set(volume.api.API, "get_all_snapshots",
- stub_snapshot_get_all)
+ self.stubs.Set(volume.api.API,
+ "get_all_snapshots",
+ stub_snapshot_get_all)
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
resp_dict = self.controller.detail(req)
created_at=datetime.datetime.now(),
display_name='snap_name',
display_description='snap_desc',
- volume_id='vol_id',
- )
+ volume_id='vol_id', )
text = serializer.serialize(dict(snapshot=raw_snapshot))
print text
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
- raw_snapshots = [dict(
- id='snap1_id',
- status='snap1_status',
- size=1024,
- created_at=datetime.datetime.now(),
- display_name='snap1_name',
- display_description='snap1_desc',
- volume_id='vol1_id',
- ),
- dict(
- id='snap2_id',
- status='snap2_status',
- size=1024,
- created_at=datetime.datetime.now(),
- display_name='snap2_name',
- display_description='snap2_desc',
- volume_id='vol2_id',
- )]
+ raw_snapshots = [dict(id='snap1_id',
+ status='snap1_status',
+ size=1024,
+ created_at=datetime.datetime.now(),
+ display_name='snap1_name',
+ display_description='snap1_desc',
+ volume_id='vol1_id', ),
+ dict(id='snap2_id',
+ status='snap2_status',
+ size=1024,
+ created_at=datetime.datetime.now(),
+ display_name='snap2_name',
+ display_description='snap2_desc',
+ volume_id='vol2_id', )]
text = serializer.serialize(dict(snapshots=raw_snapshots))
print text
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
- return {
- 'id': snapshot_id,
+ return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
- 'display_description': 'Default description',
- }
+ 'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 100}}
self.assertEqual(res_dict, expected)
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
- "volume_type": db_vol_type['name'],
- }
+ "volume_type": db_vol_type['name'], }
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
def test_volume_create_with_image_id(self):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
+ test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
- "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
+ "imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
- 'display_description': 'Volume Test Desc',
- 'availability_zone': 'nova',
- 'display_name': 'Volume Test Name',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'bootable': 'false',
- 'volume_type': 'vol_type_name',
- 'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'size': '1'}
- }
+ 'display_description': 'Volume Test Desc',
+ 'availability_zone': 'nova',
+ 'display_name': 'Volume Test Name',
+ 'attachments': [{'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'}],
+ 'bootable': 'false',
+ 'volume_type': 'vol_type_name',
+ 'image_id': test_id,
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1,
+ 1, 1, 1),
+ 'size': '1'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "cinder",
- "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
- "snapshot_id": TEST_SNAPSHOT_UUID}
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
+ "snapshot_id": TEST_SNAPSHOT_UUID}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "cinder",
- "imageRef": 1234}
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "cinder",
- "imageRef": '12345'}
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
- 1, 1, 1),
+ 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
size=1024,
availability_zone='vol_availability',
created_at=datetime.datetime.now(),
- attachments=[dict(
- id='vol_id',
- volume_id='vol_id',
- server_id='instance_uuid',
- device='/foo')],
+ attachments=[dict(id='vol_id',
+ volume_id='vol_id',
+ server_id='instance_uuid',
+ device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
- metadata=dict(
- foo='bar',
- baz='quux',
- ),
- )
+ metadata=dict(foo='bar',
+ baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
print text
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
- raw_volumes = [dict(
- id='vol1_id',
- status='vol1_status',
- size=1024,
- availability_zone='vol1_availability',
- created_at=datetime.datetime.now(),
- attachments=[dict(
- id='vol1_id',
- volume_id='vol1_id',
- server_id='instance_uuid',
- device='/foo1')],
- display_name='vol1_name',
- display_description='vol1_desc',
- volume_type='vol1_type',
- snapshot_id='snap1_id',
- metadata=dict(
- foo='vol1_foo',
- bar='vol1_bar',
- ),
- ),
- dict(
- id='vol2_id',
- status='vol2_status',
- size=1024,
- availability_zone='vol2_availability',
- created_at=datetime.datetime.now(),
- attachments=[dict(
- id='vol2_id',
- volume_id='vol2_id',
- server_id='instance_uuid',
- device='/foo2')],
- display_name='vol2_name',
- display_description='vol2_desc',
- volume_type='vol2_type',
- snapshot_id='snap2_id',
- metadata=dict(
- foo='vol2_foo',
- bar='vol2_bar',
- ),
- )]
+ raw_volumes = [dict(id='vol1_id',
+ status='vol1_status',
+ size=1024,
+ availability_zone='vol1_availability',
+ created_at=datetime.datetime.now(),
+ attachments=[dict(id='vol1_id',
+ volume_id='vol1_id',
+ server_id='instance_uuid',
+ device='/foo1')],
+ display_name='vol1_name',
+ display_description='vol1_desc',
+ volume_type='vol1_type',
+ snapshot_id='snap1_id',
+ metadata=dict(foo='vol1_foo',
+ bar='vol1_bar', ), ),
+ dict(id='vol2_id',
+ status='vol2_status',
+ size=1024,
+ availability_zone='vol2_availability',
+ created_at=datetime.datetime.now(),
+ attachments=[dict(id='vol2_id',
+ volume_id='vol2_id',
+ server_id='instance_uuid',
+ device='/foo2')],
+ display_name='vol2_name',
+ display_description='vol2_desc',
+ volume_type='vol2_type',
+ snapshot_id='snap2_id',
+ metadata=dict(foo='vol2_foo',
+ bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- },
- }
+ expected = {"volume": {"size": "1", }, }
self.assertEquals(request['body'], expected)
def test_display_name(self):
},
],
- "absolute": {
- "maxTotalVolumeGigabytes": 512,
- "maxTotalVolumes": 5,
- },
+ "absolute": {"maxTotalVolumeGigabytes": 512,
+ "maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
def test_invalid_methods(self):
"""Only POSTs should work."""
- requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
"injected_file_content_bytes": 5}
def test_build_limits(self):
+ tdate = "2011-07-21T18:17:06Z"
expected_limits = {
- "limits": {
- "rate": [
- {
- "uri": "*",
- "regex": ".*",
- "limit": [
- {
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-07-21T18:17:06Z"
- }
- ]
- },
- {
- "uri": "*/volumes",
- "regex": "^/volumes",
- "limit": [
- {
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-07-21T18:17:06Z"
- }
- ]
- }
- ],
- "absolute": {
- "maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 5
- }
- }
- }
+ "limits": {"rate": [{"uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": tdate}]},
+ {"uri": "*/volumes",
+ "regex": "^/volumes",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": tdate}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertTrue(has_dec)
def test_index(self):
+ tdate = "2011-12-15T22:42:45Z"
serializer = limits.LimitsTemplate()
- fixture = {
- "limits": {
- "rate": [{
- "uri": "*",
- "regex": ".*",
- "limit": [{
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"}]},
- {"uri": "*/servers",
- "regex": "^/servers",
- "limit": [{
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-12-15T22:42:45Z"}]}],
- "absolute": {"maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 10240}}}
+ fixture = {"limits": {"rate": [{"uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": tdate}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": tdate}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
- self.assertEqual(limit.get(key),
- str(fixture['limits']['rate'][i]['limit'][j][key]))
+ self.assertEqual(
+ limit.get(key),
+ str(fixture['limits']['rate'][i]['limit'][j][key]))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
- metadata=dict(
- foo='vol1_foo',
- bar='vol1_bar',
- ),
- ),
+ metadata=dict(foo='vol1_foo',
+ bar='vol1_bar', ), ),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
- attachments=[
- dict(
- id='vol2_id',
- volume_id='vol2_id',
- server_id='instance_uuid',
- device='/foo2')],
+ attachments=[dict(id='vol2_id',
+ volume_id='vol2_id',
+ server_id='instance_uuid',
+ device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
- metadata=dict(
- foo='vol2_foo',
- bar='vol2_bar',
- ),
- )
- ]
+ metadata=dict(foo='vol2_foo',
+ bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
from cinder import db
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake image service"""
+"""Implementation of a fake image service."""
import copy
import datetime
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'name': 'fakeimage123456',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'raw',
- 'disk_format': 'raw',
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel',
- 'architecture': 'x86_64'}}
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
- 'name': 'fakeimage123456',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': True,
- 'container_format': 'ami',
- 'disk_format': 'ami',
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel'}}
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'name': 'fakeimage123456',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': True,
- 'container_format': None,
- 'disk_format': None,
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel'}}
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': None,
+ 'disk_format': None,
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fakeimage123456',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': True,
- 'container_format': 'ami',
- 'disk_format': 'ami',
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel'}}
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
- 'name': 'fakeimage123456',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': True,
- 'container_format': 'ami',
- 'disk_format': 'ami',
- 'properties': {'kernel_id':
- '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'ramdisk_id': None}}
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'properties': {
+ 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
- 'name': 'fakeimage6',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'ova',
- 'disk_format': 'vhd',
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel',
- 'architecture': 'x86_64',
- 'auto_disk_config': 'False'}}
+ 'name': 'fakeimage6',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'architecture': 'x86_64',
+ 'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
- 'name': 'fakeimage7',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'ova',
- 'disk_format': 'vhd',
- 'properties': {'kernel_id': 'nokernel',
- 'ramdisk_id': 'nokernel',
- 'architecture': 'x86_64',
- 'auto_disk_config': 'True'}}
+ 'name': 'fakeimage7',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'properties': {'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'architecture': 'x86_64',
+ 'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
class NullWriter(object):
- """Used to test ImageService.get which takes a writer object"""
+ """Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
def _fake_create_glance_client(context, host, port, version):
return client
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
+ self.stubs.Set(glance,
+ '_create_glance_client',
+ _fake_create_glance_client)
- client_wrapper = glance.GlanceClientWrapper(
- 'fake', 'fake_host', 9292)
+ client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
- """Ensure instance_id is persisted as an image-property"""
+ """Ensure instance_id is persisted as an image-property."""
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
- service.download, self.context, image_id, writer)
+ service.download,
+ self.context,
+ image_id,
+ writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
- (service, same_id) = glance.get_remote_image_service(
- self.context, image_id)
+ (service, same_id) = glance.get_remote_image_service(self.context,
+ image_id)
self.assertEquals(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
- (service, same_id) = glance.get_remote_image_service(
- self.context, image_url)
+ (service, same_id) = glance.get_remote_image_service(self.context,
+ image_url)
self.assertEquals(same_id, image_id)
self.assertEquals(service._client.host,
- 'something-less-likely')
+ 'something-less-likely')
def _create_failing_glance_client(info):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
- response)
+ response)
class OpenStackApiNotFoundException(OpenStackApiException):
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
- message=_("Unexpected status code"),
- response=response)
+ message=_("Unexpected status code"),
+ response=response)
return response
def example_decorator(name, function):
- """ decorator for notify which is used from utils.monkey_patch()
+ """decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
def test_update_service_capabilities(self):
self._test_scheduler_api('update_service_capabilities',
- rpc_method='fanout_cast', service_name='fake_name',
- host='fake_host', capabilities='fake_capabilities')
+ rpc_method='fanout_cast',
+ service_name='fake_name',
+ host='fake_host',
+ capabilities='fake_capabilities')
def test_create_volume(self):
self._test_scheduler_api('create_volume',
- rpc_method='cast', topic='topic', volume_id='volume_id',
- snapshot_id='snapshot_id', image_id='image_id',
- request_spec='fake_request_spec',
- filter_properties='filter_properties',
- version='1.2')
+ rpc_method='cast',
+ topic='topic',
+ volume_id='volume_id',
+ snapshot_id='snapshot_id',
+ image_id='image_id',
+ request_spec='fake_request_spec',
+ filter_properties='filter_properties',
+ version='1.2')
class SchedulerManagerTestCase(test.TestCase):
- """Test case for scheduler manager"""
+ """Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
- 'update_service_capabilities')
+ 'update_service_capabilities')
# Test no capabilities passes empty dictionary
self.manager.driver.update_service_capabilities(service_name,
- host, {})
+ host, {})
self.mox.ReplayAll()
- result = self.manager.update_service_capabilities(self.context,
- service_name=service_name, host=host)
+ result = self.manager.update_service_capabilities(
+ self.context,
+ service_name=service_name,
+ host=host)
self.mox.VerifyAll()
self.mox.ResetAll()
# Test capabilities passes correctly
capabilities = {'fake_capability': 'fake_value'}
- self.manager.driver.update_service_capabilities(
- service_name, host, capabilities)
+ self.manager.driver.update_service_capabilities(service_name,
+ host,
+ capabilities)
self.mox.ReplayAll()
- result = self.manager.update_service_capabilities(self.context,
- service_name=service_name, host=host,
- capabilities=capabilities)
+ result = self.manager.update_service_capabilities(
+ self.context,
+ service_name=service_name, host=host,
+ capabilities=capabilities)
def test_create_volume_exception_puts_volume_in_error_state(self):
- """ Test that a NoValideHost exception for create_volume puts
- the volume in 'error' state and eats the exception.
+ """Test that a NoValideHost exception for create_volume.
+
+ Puts the volume in 'error' state and eats the exception.
"""
fake_volume_id = 1
self._mox_schedule_method_helper('schedule_create_volume')
volume_id = fake_volume_id
request_spec = {'volume_id': fake_volume_id}
- self.manager.driver.schedule_create_volume(self.context,
+ self.manager.driver.schedule_create_volume(
+ self.context,
request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
db.volume_update(self.context, fake_volume_id, {'status': 'error'})
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
- method_name)
+ method_name)
class SchedulerTestCase(test.TestCase):
- """Test case for base scheduler driver class"""
+ """Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
host = 'fake_host'
self.mox.StubOutWithMock(self.driver.host_manager,
- 'update_service_capabilities')
+ 'update_service_capabilities')
capabilities = {'fake_capability': 'fake_value'}
- self.driver.host_manager.update_service_capabilities(
- service_name, host, capabilities)
+ self.driver.host_manager.update_service_capabilities(service_name,
+ host,
+ capabilities)
self.mox.ReplayAll()
result = self.driver.update_service_capabilities(service_name,
- host, capabilities)
+ host,
+ capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
self.mox.StubOutWithMock(utils, 'service_is_up')
db.service_get_all_by_topic(self.context,
- self.topic).AndReturn(services)
+ self.topic).AndReturn(services)
utils.service_is_up(service1).AndReturn(False)
utils.service_is_up(service2).AndReturn(True)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
- self.context, self.topic, 'schedule_something',
- *fake_args, **fake_kwargs)
+ self.context, self.topic, 'schedule_something',
+ *fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
- """Test case for scheduler driver module methods"""
+ """Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
timeutils.utcnow().AndReturn('fake-now')
db.volume_update(self.context, 31337,
- {'host': 'fake_host', 'scheduled_at': 'fake-now'})
+ {'host': 'fake_host',
+ 'scheduled_at': 'fake-now'})
self.mox.ReplayAll()
driver.volume_update_db(self.context, 31337, 'fake_host')
self.connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': 'fakehost'}
- self.properties = {'target_discoverd': True,
- 'target_portal': '10.0.1.6:3260',
- 'target_iqn':
- 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
- 'volume_id': 1}
+ self.properties = {
+ 'target_discoverd': True,
+ 'target_portal': '10.0.1.6:3260',
+ 'target_iqn':
+ 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
+ 'volume_id': 1}
def tearDown(self):
super(HpSanISCSITestCase, self).tearDown()
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the API endpoint"""
+"""Unit tests for the API endpoint."""
import httplib
import StringIO
class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
+ """A fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
+ """Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
- """A fake httplib.HTTPConnection for boto to use
+ """A fake httplib.HTTPConnection for boto.
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
return self.sock.response_string
def close(self):
- """Required for compatibility with boto/tornado"""
+ """Required for compatibility with boto/tornado."""
pass
filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
filters.CommandFilter("/nonexistent/cat", "root"),
- filters.CommandFilter("/bin/cat", "root") # Keep this one last
- ]
+ filters.CommandFilter("/bin/cat", "root")] # Keep this one last
def test_RegExpFilter_match(self):
usercmd = ["ls", "/root"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertFalse(filtermatch is None)
self.assertEqual(filtermatch.get_command(usercmd),
- ["/bin/ls", "/root"])
+ ["/bin/ls", "/root"])
def test_RegExpFilter_reject(self):
usercmd = ["ls", "root"]
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
def test_KillFilter_no_raise(self):
- """Makes sure ValueError from bug 926412 is gone"""
+ """Makes sure ValueError from bug 926412 is gone."""
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
- """Makes sure deleted exe's are killed correctly"""
+ """Makes sure deleted exe's are killed correctly."""
# See bug #967931.
def fake_readlink(blah):
return '/bin/commandddddd (deleted)'
self.stubs.Set(context.LOG, 'warn', fake_warn)
- c = context.RequestContext('user', 'project',
- extra_arg1='meow', extra_arg2='wuff')
+ c = context.RequestContext('user',
+ 'project',
+ extra_arg1='meow',
+ extra_arg2='wuff')
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
def test_long_vs_short_flags(self):
FLAGS.clear()
FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
- default='val',
- help='desc'))
+ default='val',
+ help='desc'))
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
args = flags.parse_args(argv, default_config_files=[])
FLAGS.clear()
FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
- default=60,
- help='desc'))
+ default=60,
+ help='desc'))
args = flags.parse_args(argv, default_config_files=[])
self.assertEqual(FLAGS.duplicate_answer, 60)
self.assertEqual(FLAGS.duplicate_answer_long, 'val')
tgtadm = iscsi.get_target_admin()
tgtadm.set_execute(self.fake_execute)
tgtadm.create_iscsi_target(self.target_name, self.tid,
- self.lun, self.path)
+ self.lun, self.path)
tgtadm.show_target(self.tid, iqn=self.target_name)
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
self.flags(iscsi_helper='tgtadm')
self.flags(volumes_dir=self.persist_tempdir)
self.script_template = "\n".join([
- 'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
- 'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
+ 'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
+ 'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
def tearDown(self):
try:
TargetAdminTestCase.setUp(self)
self.flags(iscsi_helper='ietadm')
self.script_template = "\n".join([
- 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
- 'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
- '--params Path=%(path)s,Type=fileio',
- 'ietadm --op show --tid=%(tid)s',
- 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
- 'ietadm --op delete --tid=%(tid)s'])
+ 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
+ 'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
+ '--params Path=%(path)s,Type=fileio',
+ 'ietadm --op show --tid=%(tid)s',
+ 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
+ 'ietadm --op delete --tid=%(tid)s'])
class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations"""
+ """Test sqlalchemy-migrate migrations."""
TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
DEFAULT_CONFIG_FILE)
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
REPOSITORY = repository.Repository(
- os.path.abspath(os.path.dirname(MIGRATE_FILE)))
+ os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
super(TestMigrations, self).setUp()
# upgrades successfully.
# Place the database under version control
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
+ migration_api.version_control(engine,
+ TestMigrations.REPOSITORY,
+ migration.INIT_VERSION)
self.assertEqual(migration.INIT_VERSION,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ migration_api.db_version(engine,
+ TestMigrations.REPOSITORY))
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION + 1)
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
for version in xrange(migration.INIT_VERSION + 2,
- TestMigrations.REPOSITORY.latest + 1):
+ TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version)
if snake_walk:
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ migration_api.db_version(engine,
+ TestMigrations.REPOSITORY))
RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
- 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
- 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
- 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
- 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
- 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
- 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
- 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
- 'StorageServiceDatasetProvision']
+ 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
+ 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
+ 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
+ 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
+ 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
+ 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
+ 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
+ 'StorageServiceDatasetProvision']
iter_count = 0
iter_table = {}
class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- """HTTP handler that fakes enough stuff to allow the driver to run"""
+ """HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
out.write('</portType>')
out.write('<binding name="DfmBinding" type="na:DfmInterface">')
out.write('<soap:binding style="document" ' +
- 'transport="http://schemas.xmlsoap.org/soap/http"/>')
+ 'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction="urn:%s"/>' % api)
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://www.netapp.com/management/v1'
nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
- 'na': ntap_ns}
+ 'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
self.VOLUME_TYPE, self.VOLUME_SIZE)
volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
- 'id': 0, 'provider_auth': None}
+ 'id': 0, 'provider_auth': None}
updates = self.driver._get_export(volume)
self.assertTrue(updates['provider_location'])
volume['provider_location'] = updates['provider_location']
out.write('<binding name="CloudStorageBinding" '
'type="na:CloudStorage">')
out.write('<soap:binding style="document" ' +
- 'transport="http://schemas.xmlsoap.org/soap/http"/>')
+ 'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in CMODE_APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction=""/>')
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://cloud.netapp.com/'
nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
- 'na': ntap_ns}
+ 'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/soapenv:Envelope/soapenv:Body',
class NetAppCmodeISCSIDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
- volume = {
- 'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
- 'os_type': 'linux', 'provider_location': 'lun1',
- 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
- 'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None
- }
- snapshot = {
- 'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
- 'volume_size': 1, 'project_id': 'project'
- }
- volume_sec = {
- 'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
- 'os_type': 'linux', 'provider_location': 'lun1',
- 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
- 'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None
- }
+ volume = {'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None}
+ snapshot = {'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
+ 'volume_size': 1, 'project_id': 'project'}
+ volume_sec = {'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
+ 'os_type': 'linux', 'provider_location': 'lun1',
+ 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+ 'display_name': None, 'display_description': 'lun1',
+ 'volume_type_id': None}
def setUp(self):
super(NetAppCmodeISCSIDriverTestCase, self).setUp()
self.volume['provider_location'] = updates['provider_location']
connector = {'initiator': 'init1'}
connection_info = self.driver.initialize_connection(self.volume,
- connector)
+ connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
self.driver.terminate_connection(self.volume, connector)
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
+"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)."""
from cinder import context
from cinder import exception
class NetappNfsDriverTestCase(test.TestCase):
- """Test case for NetApp specific NFS clone driver"""
+ """Test case for NetApp specific NFS clone driver."""
def setUp(self):
self._driver = netapp_nfs.NetAppNFSDriver()
def test_check_for_setup_error(self):
mox = self._mox
drv = self._driver
- required_flags = [
- 'netapp_wsdl_url',
- 'netapp_login',
- 'netapp_password',
- 'netapp_server_hostname',
- 'netapp_server_port'
- ]
+ required_flags = ['netapp_wsdl_url',
+ 'netapp_login',
+ 'netapp_password',
+ 'netapp_server_hostname',
+ 'netapp_server_port']
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
mox.VerifyAll()
def test_create_snapshot(self):
- """Test snapshot can be created and deleted"""
+ """Test snapshot can be created and deleted."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
- """Tests volume creation from snapshot"""
+ """Tests volume creation from snapshot."""
drv = self._driver
mox = self._mox
volume = FakeVolume(1)
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
- drv._volume_not_present(IgnoreArg(), IgnoreArg())\
- .AndReturn(not snapshot_exists)
+ drv._volume_not_present(IgnoreArg(),
+ IgnoreArg()).AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
u'Unable to create iscsi target\n'
u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
- u' configured\n'
- u' itadm create-target failed with error 17\n',
- ),
+ u' configured\n'
+ u' itadm create-target failed with error 17\n', ),
('stmf', 'create_targetgroup', ('cinder/volume1',),
u'Unable to create targetgroup: stmfadm: cinder/volume1:'
- u' already exists\n',
- ),
+ u' already exists\n', ),
('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
u'Unable to add member to targetgroup: stmfadm:'
- u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n',
- ),
+ u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', ),
('scsidisk', 'create_lu', ('cinder/volume1', {}),
u"Unable to create lu with zvol 'cinder/volume1':\n"
- u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n",
- ),
+ u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", ),
('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
- 'target_group': 'cinder/volume1', 'lun': '0'}),
+ 'target_group': 'cinder/volume1', 'lun': '0'}),
u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
- u" stmfadm: view entry exists\n",
- ),
+ u" stmfadm: view entry exists\n", ),
]
def _stub_export_method(self, module, method, args, error, fail=False):
self._stub_all_export_methods()
self.mox.ReplayAll()
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
- self.assertEquals(retval,
+ self.assertEquals(
+ retval,
{'provider_location':
'%s:%s,1 %s%s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
fail=True)
self.mox.ReplayAll()
self.assertRaises(nexenta.NexentaException,
- self.drv.create_export, {}, self.TEST_VOLUME_REF)
+ self.drv.create_export,
+ {},
+ self.TEST_VOLUME_REF)
return _test_create_export_fail
for i in range(len(_CREATE_EXPORT_METHODS)):
def test_remove_export_fail_0(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
- self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise(
- nexenta.NexentaException())
+ self.nms_mock.stmf.destroy_targetgroup(
+ 'cinder/volume1').AndRaise(nexenta.NexentaException())
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
def test_remove_export_fail_1(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
- self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
- nexenta.NexentaException())
+ self.nms_mock.iscsitarget.delete_target(
+ 'iqn:volume1').AndRaise(nexenta.NexentaException())
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
URL_S = 'https://example.com/'
USER = 'user'
PASSWORD = 'password'
- HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
- ':'.join((USER, PASSWORD))),),
- 'Content-Type': 'application/json'}
+ HEADERS = {'Authorization': 'Basic %s' % (
+ base64.b64encode(':'.join((USER, PASSWORD))),),
+ 'Content-Type': 'application/json'}
REQUEST = 'the request'
def setUp(self):
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
def test_call(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL,
+ '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+ self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
- '{"error": null, "result": "the result"}')
+ '{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEquals("the result", result)
def test_call_deep(self):
- urllib2.Request(self.URL,
- '{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
- ' "method": "meth"}',
- self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL,
+ '{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
+ ' "method": "meth"}',
+ self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.assertEquals("the result", result)
def test_call_auto(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- urllib2.Request(self.URL_S,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL,
+ '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+ self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL_S,
+ '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+ self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
self.assertEquals("the result", result)
def test_call_error(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL,
+ '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+ self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": {"message": "the error"}, "result": "the result"}')
self.proxy, 'arg1', 'arg2')
def test_call_fail(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
+ urllib2.Request(
+ self.URL,
+ '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+ self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.proxy.auto = False
self.mox.ReplayAll()
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the NFS driver module"""
+"""Unit tests for the NFS driver module."""
import __builtin__
import errno
class NfsDriverTestCase(test.TestCase):
- """Test case for NFS driver"""
+ """Test case for NFS driver."""
TEST_NFS_EXPORT1 = 'nfs-host1:/export'
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
self.stubs.Set(obj, attr_name, stub)
def test_path_exists_should_return_true(self):
- """_path_exists should return True if stat returns 0"""
+ """_path_exists should return True if stat returns 0."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_path_exists_should_return_false(self):
- """_path_exists should return True if stat doesn't return 0"""
+ """_path_exists should return True if stat doesn't return 0."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
- drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
+ drv._execute(
+ 'stat',
+ self.TEST_FILE_NAME, run_as_root=True).\
AndRaise(ProcessExecutionError(
- stderr="stat: cannot stat `test.txt': No such file or directory"))
+ stderr="stat: cannot stat `test.txt': No such file "
+ "or directory"))
mox.ReplayAll()
mox.VerifyAll()
def test_local_path(self):
- """local_path common use case"""
+ """local_path common use case."""
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
drv.local_path(volume))
def test_mount_nfs_should_mount_correctly(self):
- """_mount_nfs common case usage"""
+ """_mount_nfs common case usage."""
mox = self._mox
drv = self._driver
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(ProcessExecutionError(
- stderr='is busy or already mounted'))
+ stderr='is busy or already mounted'))
mox.ReplayAll()
drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
mox.StubOutWithMock(drv, '_execute')
- drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
- self.TEST_MNT_POINT, run_as_root=True).\
- AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
+ drv._execute(
+ 'mount',
+ '-t',
+ 'nfs',
+ self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, run_as_root=True).\
+ AndRaise(ProcessExecutionError(stderr='is busy or '
+ 'already mounted'))
mox.ReplayAll()
mox.VerifyAll()
def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
- """_mount_nfs should create mountpoint if it doesn't exist"""
+ """_mount_nfs should create mountpoint if it doesn't exist."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_mount_nfs_should_not_create_mountpoint_if_already(self):
- """_mount_nfs should not create mountpoint if it already exists"""
+ """_mount_nfs should not create mountpoint if it already exists."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_get_hash_str(self):
- """_get_hash_str should calculation correct value"""
+ """_get_hash_str should calculation correct value."""
drv = self._driver
self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
drv._get_hash_str(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share(self):
- """_get_mount_point_for_share should calculate correct value"""
+ """_get_mount_point_for_share should calculate correct value."""
drv = self._driver
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_available_capacity_with_df(self):
- """_get_available_capacity should calculate correct value"""
+ """_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
delattr(nfs.FLAGS, 'nfs_disk_util')
def test_get_available_capacity_with_du(self):
- """_get_available_capacity should calculate correct value"""
+ """_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_ensure_share_mounted(self):
- """_ensure_share_mounted simple use case"""
+ """_ensure_share_mounted simple use case."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
- """_ensure_shares_mounted should save share if mounted with success"""
+ """_ensure_shares_mounted should save share if mounted with success."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
- """_ensure_shares_mounted should not save share if failed to mount"""
+ """_ensure_shares_mounted should not save share if failed to mount."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_setup_should_throw_error_if_shares_config_not_configured(self):
- """do_setup should throw error if shares config is not configured """
+ """do_setup should throw error if shares config is not configured."""
drv = self._driver
nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv.do_setup, IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
- """do_setup should throw error if nfs client is not installed """
+ """do_setup should throw error if nfs client is not installed."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
- """_find_share should throw error if there is no mounted shares"""
+ """_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.TEST_SIZE_IN_GB)
def test_find_share(self):
- """_find_share simple use case"""
+ """_find_share simple use case."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
- """_find_share should throw error if there is no share to host vol"""
+ """_find_share should throw error if there is no share to host vol."""
mox = self._mox
drv = self._driver
delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
def test_create_volume_should_ensure_nfs_mounted(self):
- """create_volume should ensure shares provided in config are mounted"""
+ """create_volume ensures shares provided in config are mounted."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_create_volume_should_return_provider_location(self):
- """create_volume should return provider_location with found share """
+ """create_volume should return provider_location with found share."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_delete_volume(self):
- """delete_volume simple test case"""
+ """delete_volume simple test case."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_delete_should_ensure_share_mounted(self):
- """delete_volume should ensure that corresponding share is mounted"""
+ """delete_volume should ensure that corresponding share is mounted."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_delete_should_not_delete_if_provider_location_not_provided(self):
- """delete_volume shouldn't try to delete if provider_location missed"""
+ """delete_volume shouldn't delete if provider_location missed."""
mox = self._mox
drv = self._driver
mox.VerifyAll()
def test_delete_should_not_delete_if_there_is_no_file(self):
- """delete_volume should not try to delete if file missed"""
+ """delete_volume should not try to delete if file missed."""
mox = self._mox
drv = self._driver
# License for the specific language governing permissions and limitations
# under the License.
-"""Test of Policy Engine For Cinder"""
+"""Test of Policy Engine For Cinder."""
import os.path
import StringIO
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
- 'fake',
- roles=['AdMiN'])
+ 'fake',
+ roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, "example:exist", {})
+ self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_brain("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, "example:noexist", {})
+ self.context, "example:noexist", {})
class ContextIsAdminPolicyTestCase(test.TestCase):
cinder.tests.image.fake.FakeImageService_reset()
def _create_volume(self, size=10):
- """Create a test volume"""
+ """Create a test volume."""
vol = {}
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
def test_quota_with_project_no_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
- driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- ))
+ driver = FakeDriver(
+ by_project=dict(
+ test_project=dict(test_resource=15), ))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
def test_quota_no_project_with_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
- driver = FakeDriver(by_class=dict(
- test_class=dict(test_resource=20),
- ))
+ driver = FakeDriver(
+ by_class=dict(
+ test_class=dict(test_resource=20), ))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- ),
- by_class=dict(
- test_class=dict(test_resource=20),
- ))
+ test_project=dict(test_resource=15), ),
+ by_class=dict(test_class=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- override_project=dict(test_resource=20),
- ))
+ test_project=dict(test_resource=15),
+ override_project=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_class=dict(
- test_class=dict(test_resource=15),
- override_class=dict(test_resource=20),
- ))
+ test_class=dict(test_resource=15),
+ override_class=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
- quota.AbsoluteResource('test_resource3'),
- ]
+ quota.AbsoluteResource('test_resource3'), ]
quota_obj.register_resources(resources)
- self.assertEqual(quota_obj._resources, dict(
- test_resource1=resources[0],
- test_resource2=resources[1],
- test_resource3=resources[2],
- ))
+ self.assertEqual(quota_obj._resources,
+ dict(test_resource1=resources[0],
+ test_resource2=resources[1],
+ test_resource3=resources[2], ))
def test_sync_predeclared(self):
quota_obj = quota.QuotaEngine()
quota.ReservableResource('test_resource1', spam),
quota.ReservableResource('test_resource2', spam),
quota.ReservableResource('test_resource3', spam),
- quota.ReservableResource('test_resource4', spam),
- ]
+ quota.ReservableResource('test_resource4', spam), ]
quota_obj.register_resources(resources[:2])
self.assertEqual(resources[0].sync, spam)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_project=dict(
+ driver = FakeDriver(
+ by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
- self.assertEqual(driver.called, [
- ('get_by_project', context, 'test_project', 'test_resource'),
- ])
+ self.assertEqual(driver.called,
+ [('get_by_project',
+ context,
+ 'test_project',
+ 'test_resource'), ])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_class=dict(
+ driver = FakeDriver(
+ by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
- self.assertEqual(driver.called, [
- ('get_by_class', context, 'test_class', 'test_resource'),
- ])
+ self.assertEqual(driver.called, [('get_by_class',
+ context,
+ 'test_class',
+ 'test_resource'), ])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
- quota.AbsoluteResource('test_resource1'),
- ]
+ quota.AbsoluteResource('test_resource1'), ]
quota_obj.register_resources(resources)
return quota_obj
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
- self.assertEqual(driver.called, [
- ('get_defaults', context, quota_obj._resources),
- ])
+ self.assertEqual(driver.called, [('get_defaults',
+ context,
+ quota_obj._resources), ])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
- ('get_class_quotas', context, quota_obj._resources,
- 'test_class', True),
- ('get_class_quotas', context, quota_obj._resources,
- 'test_class', False),
- ])
+ ('get_class_quotas',
+ context,
+ quota_obj._resources,
+ 'test_class', True),
+ ('get_class_quotas',
+ context, quota_obj._resources,
+ 'test_class', False), ])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
usages=False)
self.assertEqual(driver.called, [
- ('get_project_quotas', context, quota_obj._resources,
- 'test_project', None, True, True),
- ('get_project_quotas', context, quota_obj._resources,
- 'test_project', 'test_class', False, False),
- ])
+ ('get_project_quotas',
+ context,
+ quota_obj._resources,
+ 'test_project',
+ None,
+ True,
+ True),
+ ('get_project_quotas',
+ context,
+ quota_obj._resources,
+ 'test_project',
+ 'test_class',
+ False,
+ False), ])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
- ('limit_check', context, quota_obj._resources, dict(
- test_resource1=4,
- test_resource2=3,
- test_resource3=2,
- test_resource4=1,
- )),
- ])
+ ('limit_check',
+ context,
+ quota_obj._resources,
+ dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1,)), ])
def test_reserve(self):
context = FakeContext(None, None)
- driver = FakeDriver(reservations=[
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
+ driver = FakeDriver(reservations=['resv-01',
+ 'resv-02',
+ 'resv-03',
+ 'resv-04', ])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
- ('reserve', context, quota_obj._resources, dict(
- test_resource1=4,
- test_resource2=3,
- test_resource3=2,
- test_resource4=1,
- ), None),
- ('reserve', context, quota_obj._resources, dict(
- test_resource1=1,
- test_resource2=2,
- test_resource3=3,
- test_resource4=4,
- ), 3600),
- ])
- self.assertEqual(result1, [
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
- self.assertEqual(result2, [
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
+ ('reserve',
+ context,
+ quota_obj._resources,
+ dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1, ),
+ None),
+ ('reserve',
+ context,
+ quota_obj._resources,
+ dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4, ),
+ 3600), ])
+ self.assertEqual(result1, ['resv-01',
+ 'resv-02',
+ 'resv-03',
+ 'resv-04', ])
+ self.assertEqual(result2, ['resv-01',
+ 'resv-02',
+ 'resv-03',
+ 'resv-04', ])
def test_commit(self):
context = FakeContext(None, None)
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
- self.assertEqual(driver.called, [
- ('commit', context, ['resv-01', 'resv-02', 'resv-03']),
- ])
+ self.assertEqual(driver.called,
+ [('commit',
+ context,
+ ['resv-01',
+ 'resv-02',
+ 'resv-03']), ])
def test_rollback(self):
context = FakeContext(None, None)
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
- self.assertEqual(driver.called, [
- ('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
- ])
+ self.assertEqual(driver.called,
+ [('rollback',
+ context,
+ ['resv-01',
+ 'resv-02',
+ 'resv-03']), ])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
- self.assertEqual(driver.called, [
- ('destroy_all_by_project', context, 'test_project'),
- ])
+ self.assertEqual(driver.called,
+ [('destroy_all_by_project',
+ context,
+ 'test_project'), ])
def test_expire(self):
context = FakeContext(None, None)
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
- self.assertEqual(driver.called, [
- ('expire', context),
- ])
+ self.assertEqual(driver.called, [('expire', context), ])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
- self.assertEqual(result, dict(
+ self.assertEqual(
+ result,
+ dict(
volumes=10,
- gigabytes=1000,
- ))
+ gigabytes=1000, ))
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
- return dict(
- gigabytes=500,
- volumes=10,
- )
+ return dict(gigabytes=500, volumes=10, )
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
- self.assertEqual(result, dict(
- volumes=10,
- gigabytes=500,
- ))
+ self.assertEqual(result, dict(volumes=10, gigabytes=500, ))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
- self.assertEqual(result, dict(
- volumes=10,
- gigabytes=500,
- ))
+ self.assertEqual(result, dict(volumes=10, gigabytes=500, ))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
- return dict(
- volumes=10,
- gigabytes=50,
- reserved=0
- )
+ return dict(volumes=10, gigabytes=50, reserved=0)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
- return dict(
- volumes=dict(in_use=2, reserved=0),
- gigabytes=dict(in_use=10, reserved=0),
- )
+ return dict(volumes=dict(in_use=2, reserved=0),
+ gigabytes=dict(in_use=10, reserved=0), )
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- ))
+ self.assertEqual(self.calls, ['quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name', ])
+ self.assertEqual(result, dict(volumes=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ), ))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- ])
- self.assertEqual(result, dict(
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- ))
+ self.assertEqual(self.calls, ['quota_get_all_by_project',
+ 'quota_usage_get_all_by_project', ])
+ self.assertEqual(result, dict(volumes=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ), ))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- ))
+ self.assertEqual(self.calls, ['quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name', ])
+ self.assertEqual(result, dict(volumes=dict(limit=10,
+ in_use=2,
+ reserved=0, ),
+ gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ), ))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- ))
+ self.assertEqual(self.calls, ['quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name', ])
+ self.assertEqual(result,
+ dict(gigabytes=dict(limit=50,
+ in_use=10,
+ reserved=0, ),
+ volumes=dict(limit=10,
+ in_use=2,
+ reserved=0, ), ))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- volumes=dict(
- limit=10,
- ),
- gigabytes=dict(
- limit=50,
- ),
- ))
+ self.assertEqual(self.calls, ['quota_get_all_by_project',
+ 'quota_class_get_all_by_name', ])
+ self.assertEqual(result, dict(volumes=dict(limit=10, ),
+ gigabytes=dict(limit=50, ), ))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
True)
self.assertEqual(self.calls, ['get_project_quotas'])
- self.assertEqual(result, dict(
- volumes=10,
- gigabytes=1000,
- ))
+ self.assertEqual(result, dict(volumes=10, gigabytes=1000, ))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
dict(volumes=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 0, 0), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
dict(volumes=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 0, 0), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
dict(volumes=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 0, 0), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
quota.QUOTAS._resources,
dict(volumes=2), expire=expire)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 0, 0), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
quota.QUOTAS._resources,
dict(volumes=2), expire=expire)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 500, 0),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 500, 0), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
quota.QUOTAS._resources,
dict(volumes=2), expire=expire)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 86400),
- ])
+ self.assertEqual(self.calls, ['get_project_quotas',
+ ('quota_reserve', expire, 0, 86400), ])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5,
+ gigabytes=10 * 1024, )
+ deltas = dict(volumes=2,
+ gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
- self.compare_usage(self.usages_created, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=0,
- reserved=2,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=0,
- reserved=2 * 1024,
- until_refresh=None),
- ])
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages_created['volumes'],
- project_id='test_project',
- delta=2),
- dict(resource='gigabytes',
- usage_id=self.usages_created['gigabytes'],
- delta=2 * 1024),
- ])
+ self.compare_usage(self.usages_created,
+ [dict(resource='volumes',
+ project_id='test_project',
+ in_use=0,
+ reserved=2,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=0,
+ reserved=2 * 1024,
+ until_refresh=None), ])
+ self.compare_reservation(
+ result,
+ [dict(resource='volumes',
+ usage_id=self.usages_created['volumes'],
+ project_id='test_project',
+ delta=2),
+ dict(resource='gigabytes',
+ usage_id=self.usages_created['gigabytes'],
+ delta=2 * 1024), ])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1)
self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5,
+ gigabytes=10 * 1024, )
+ deltas = dict(volumes=2,
+ gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=2,
- reserved=2,
- until_refresh=5),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=2,
- reserved=2 * 1024,
- until_refresh=5),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2,
+ until_refresh=5),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2 * 1024,
+ until_refresh=5), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- delta=2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ delta=2 * 1024), ])
def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1)
self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=2,
- reserved=2,
- until_refresh=5),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=2,
- reserved=2 * 1024,
- until_refresh=5),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2,
+ until_refresh=5),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2 * 1024,
+ until_refresh=5), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- delta=2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ delta=2 * 1024), ])
def test_quota_reserve_max_age(self):
max_age = 3600
self.init_usage('test_project', 'gigabytes', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=2,
- reserved=2,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=2,
- reserved=2 * 1024,
- until_refresh=None),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=2,
+ reserved=2 * 1024,
+ until_refresh=None), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- delta=2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ delta=2 * 1024), ])
def test_quota_reserve_no_refresh(self):
self.init_usage('test_project', 'volumes', 3, 0)
self.init_usage('test_project', 'gigabytes', 3, 0)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=3,
- reserved=2,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=3,
- reserved=2 * 1024,
- until_refresh=None),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=3,
+ reserved=2,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=3,
+ reserved=2 * 1024,
+ until_refresh=None), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- delta=2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ delta=2 * 1024), ])
def test_quota_reserve_unders(self):
self.init_usage('test_project', 'volumes', 1, 0)
self.init_usage('test_project', 'gigabytes', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=-2,
- gigabytes=-2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=1,
- reserved=0,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=1 * 1024,
- reserved=0,
- until_refresh=None),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=1,
+ reserved=0,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=1 * 1024,
+ reserved=0,
+ until_refresh=None), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=-2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- delta=-2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=-2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ delta=-2 * 1024), ])
def test_quota_reserve_overs(self):
self.init_usage('test_project', 'volumes', 4, 0)
self.init_usage('test_project', 'gigabytes', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=2,
- gigabytes=2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=2, gigabytes=2 * 1024, )
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=4,
- reserved=0,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=10 * 1024,
- reserved=0,
- until_refresh=None),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=4,
+ reserved=0,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=10 * 1024,
+ reserved=0,
+ until_refresh=None), ])
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
self.init_usage('test_project', 'volumes', 10, 0)
self.init_usage('test_project', 'gigabytes', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
- quotas = dict(
- volumes=5,
- gigabytes=10 * 1024,
- )
- deltas = dict(
- volumes=-2,
- gigabytes=-2 * 1024,
- )
+ quotas = dict(volumes=5, gigabytes=10 * 1024, )
+ deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
- self.compare_usage(self.usages, [
- dict(resource='volumes',
- project_id='test_project',
- in_use=10,
- reserved=0,
- until_refresh=None),
- dict(resource='gigabytes',
- project_id='test_project',
- in_use=20 * 1024,
- reserved=0,
- until_refresh=None),
- ])
+ self.compare_usage(self.usages, [dict(resource='volumes',
+ project_id='test_project',
+ in_use=10,
+ reserved=0,
+ until_refresh=None),
+ dict(resource='gigabytes',
+ project_id='test_project',
+ in_use=20 * 1024,
+ reserved=0,
+ until_refresh=None), ])
self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, [
- dict(resource='volumes',
- usage_id=self.usages['volumes'],
- project_id='test_project',
- delta=-2),
- dict(resource='gigabytes',
- usage_id=self.usages['gigabytes'],
- project_id='test_project',
- delta=-2 * 1024),
- ])
+ self.compare_reservation(result,
+ [dict(resource='volumes',
+ usage_id=self.usages['volumes'],
+ project_id='test_project',
+ delta=-2),
+ dict(resource='gigabytes',
+ usage_id=self.usages['gigabytes'],
+ project_id='test_project',
+ delta=-2 * 1024), ])
self.driver = RBDDriver(execute=fake_execute)
def test_good_locations(self):
- locations = [
- 'rbd://fsid/pool/image/snap',
- 'rbd://%2F/%2F/%2F/%2F',
- ]
+ locations = ['rbd://fsid/pool/image/snap',
+ 'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
def test_bad_locations(self):
- locations = [
- 'rbd://image',
- 'http://path/to/somewhere/else',
- 'rbd://image/extra',
- 'rbd://image/',
- 'rbd://fsid/pool/image/',
- 'rbd://fsid/pool/image/snap/',
- 'rbd://///',
- ]
+ locations = ['rbd://image',
+ 'http://path/to/somewhere/else',
+ 'rbd://image/extra',
+ 'rbd://image/',
+ 'rbd://fsid/pool/image/',
+ 'rbd://fsid/pool/image/snap/',
+ 'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = 1
# creating volume testdata
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': timeutils.utcnow(),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'instance_uuid': None,
- 'host': 'dummy'})
+ db.volume_create(self.context,
+ {'id': volume_id,
+ 'updated_at': timeutils.utcnow(),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'creating',
+ 'instance_uuid': None,
+ 'host': 'dummy'})
try:
if clone_works:
self.volume.create_volume(self.context,
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
- help="Port number to bind test service to"),
- ]
+ help="Port number to bind test service to"), ]
flags.FLAGS.register_opts(test_service_opts)
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova',
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova',
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
+ host,
+ binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova',
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova',
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
+ host,
+ binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
rows.append(["IO_group_name", "io_grp0"])
rows.append(["status", "online"])
rows.append(["mdisk_grp_id", "0"])
- rows.append(["mdisk_grp_name",
- self._flags["storwize_svc_volpool_name"]])
+ rows.append([
+ "mdisk_grp_name",
+ self._flags["storwize_svc_volpool_name"]])
rows.append(["capacity", cap])
rows.append(["type", "striped"])
rows.append(["formatted", "no"])
LOG.debug(_('Run CLI command: %s') % cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
- LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') %
- {'out': stdout, 'err': stderr})
+ LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') % {
+ 'out': stdout, 'err': stderr})
except exception.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s') % {'out': e.stdout,
- 'err': e.stderr})
+ 'err': e.stderr})
return ret
# Check for missing san_ip
self.flags(san_ip=None)
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
if self.USESIM != 1:
# Check for invalid ip
self.flags(san_ip="-1.-1.-1.-1")
self.assertRaises(socket.gaierror,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
# Check for unreachable IP
self.flags(san_ip="1.1.1.1")
self.assertRaises(socket.error,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
def test_storwize_svc_connectivity(self):
# Make sure we detect if the pool doesn't exist
no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
self.flags(storwize_svc_volpool_name=no_exist_pool)
self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
FLAGS.reset()
# Check the case where the user didn't configure IP addresses
if self.USESIM == 1:
self.sim.error_injection("lsnodecanister", "header_mismatch")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
self.sim.error_injection("lsnodecanister", "remove_field")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "ip_no_config")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "header_mismatch")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "remove_field")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
+ self.driver.check_for_setup_error)
# Check with bad parameters
self.flags(san_password=None)
self.flags(san_private_key=None)
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_rsize="invalid")
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_warning="invalid")
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_autoexpand="invalid")
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_grainsize=str(42))
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_flashcopy_timeout=str(601))
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_compression=True)
self.flags(storwize_svc_vol_rsize="-1")
self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
+ self.driver._check_flags)
FLAGS.reset()
# Finally, check with good parameters
# Test timeout and volume cleanup
self.flags(storwize_svc_flashcopy_timeout=str(1))
self.assertRaises(exception.InvalidSnapshot,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot, snapshot)
is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
self.assertEqual(is_volume_defined, False)
FLAGS.reset()
if self.USESIM == 1:
self.sim.error_injection("lsfcmap", "bogus_prepare")
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot, snapshot)
# Test prestartfcmap, startfcmap, and rmfcmap failing
if self.USESIM == 1:
self.sim.error_injection("prestartfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot, snapshot)
self.sim.error_injection("lsfcmap", "speed_up")
self.sim.error_injection("startfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot, snapshot)
self.sim.error_injection("prestartfcmap", "bad_id")
self.sim.error_injection("rmfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot, snapshot)
# Test successful snapshot
self.driver.create_snapshot(snapshot)
if self.USESIM == 1:
self.sim.error_injection("prestartfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_volume_from_snapshot, volume2, snapshot)
+ self.driver.create_volume_from_snapshot,
+ volume2,
+ snapshot)
# Succeed
if self.USESIM == 1:
self.driver.create_volume(volume3)
snapshot["name"] = volume3["name"]
self.assertRaises(exception.InvalidSnapshot,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot,
+ snapshot)
self.driver._delete_volume(volume1, True)
self.driver._delete_volume(volume3, True)
snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
snapshot["volume_name"] = "no_exist"
self.assertRaises(exception.VolumeNotFound,
- self.driver.create_snapshot, snapshot)
+ self.driver.create_snapshot,
+ snapshot)
def test_storwize_svc_volumes(self):
# Create a first volume
# Try to create the volume again (should fail)
self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_volume, volume)
+ self.driver.create_volume,
+ volume)
# Try to delete a volume that doesn't exist (should not fail)
vol_no_exist = {"name": "i_dont_exist"}
if self.USESIM == 1:
self.sim.error_injection("mkvdisk", "no_compression")
self.assertRaises(exception.ProcessExecutionError,
- self._create_test_vol)
+ self._create_test_vol)
FLAGS.reset()
def test_storwize_svc_unicode_host_and_volume_names(self):
# Try to delete the 1st volume (should fail because it is mapped)
self.assertRaises(exception.ProcessExecutionError,
- self.driver.delete_volume, volume1)
+ self.driver.delete_volume,
+ volume1)
# Test no preferred node
self.driver.terminate_connection(volume1, conn)
# Try to remove connection from host that doesn't exist (should fail)
conn_no_exist = {"initiator": "i_dont_exist"}
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.terminate_connection, volume1, conn_no_exist)
+ self.driver.terminate_connection,
+ volume1,
+ conn_no_exist)
# Try to remove connection from volume that isn't mapped (should print
# message but NOT fail)
class TestUtilsTestCase(test.TestCase):
def test_get_test_admin_context(self):
- """get_test_admin_context's return value behaves like admin context"""
+ """get_test_admin_context's return value behaves like admin context."""
ctxt = test_utils.get_test_admin_context()
# TODO(soren): This should verify the full interface context
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
- data = utils.read_cached_file("/this/is/a/fake", cache_data,
- reload_func=test_reload)
+ data = utils.read_cached_file("/this/is/a/fake",
+ cache_data,
+ reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
- + self.example_package + 'example_decorator'])
+ + self.example_package
+ + 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
- in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+ in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
- self.assertEquals(begin, datetime.datetime(
- hour=7,
- day=5,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- hour=8,
- day=5,
- month=3,
- year=2012))
+ self.assertEquals(begin,
+ datetime.datetime(hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(hour=8,
+ day=5,
+ month=3,
+ year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
- self.assertEquals(begin, datetime.datetime(
- minute=10,
- hour=7,
- day=5,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- minute=10,
- hour=8,
- day=5,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(minute=10,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(minute=10,
+ hour=8,
+ day=5,
+ month=3,
+ year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
- self.assertEquals(begin, datetime.datetime(
- minute=30,
- hour=6,
- day=5,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- minute=30,
- hour=7,
- day=5,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(minute=30,
+ hour=6,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(minute=30,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
- self.assertEquals(begin, datetime.datetime(
- day=4,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- day=5,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=4,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(day=5,
+ month=3,
+ year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
- self.assertEquals(begin, datetime.datetime(
- hour=6,
- day=4,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- hour=6,
- day=5,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(hour=6,
+ day=4,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(hour=6,
+ day=5,
+ month=3,
+ year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
- self.assertEquals(begin, datetime.datetime(
- hour=10,
- day=3,
- month=3,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- hour=10,
- day=4,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(hour=10,
+ day=3,
+ month=3,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(hour=10,
+ day=4,
+ month=3,
+ year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
- self.assertEquals(begin, datetime.datetime(
- day=1,
- month=2,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- day=1,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=1,
+ month=2,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(day=1,
+ month=3,
+ year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
- self.assertEquals(begin, datetime.datetime(
- day=2,
- month=2,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- day=2,
- month=3,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=2,
+ month=2,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(day=2,
+ month=3,
+ year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
- self.assertEquals(begin, datetime.datetime(
- day=15,
- month=1,
- year=2012))
- self.assertEquals(end, datetime.datetime(
- day=15,
- month=2,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=15,
+ month=1,
+ year=2012))
+ self.assertEquals(end, datetime.datetime(day=15,
+ month=2,
+ year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
- self.assertEquals(begin, datetime.datetime(
- day=1,
- month=1,
- year=2011))
- self.assertEquals(end, datetime.datetime(
- day=1,
- month=1,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=1,
+ month=1,
+ year=2011))
+ self.assertEquals(end, datetime.datetime(day=1,
+ month=1,
+ year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
- self.assertEquals(begin, datetime.datetime(
- day=1,
- month=2,
- year=2011))
- self.assertEquals(end, datetime.datetime(
- day=1,
- month=2,
- year=2012))
+ self.assertEquals(begin, datetime.datetime(day=1,
+ month=2,
+ year=2011))
+ self.assertEquals(end, datetime.datetime(day=1,
+ month=2,
+ year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
- self.assertEquals(begin, datetime.datetime(
- day=1,
- month=6,
- year=2010))
- self.assertEquals(end, datetime.datetime(
- day=1,
- month=6,
- year=2011))
+ self.assertEquals(begin, datetime.datetime(day=1,
+ month=6,
+ year=2010))
+ self.assertEquals(end, datetime.datetime(day=1,
+ month=6,
+ year=2011))
class FakeSSHClient(object):
class VersionTestCase(test.TestCase):
- """Test cases for Versions code"""
+ """Test cases for Versions code."""
def setUp(self):
- """setup test with unchanging values"""
+ """Setup test with unchanging values."""
super(VersionTestCase, self).setUp()
self.version = version
self.version.FINAL = False
self.version.CINDER_VERSION = ['2012', '10']
self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION
self.version.version_info = {'branch_nick': u'LOCALBRANCH',
- 'revision_id': 'LOCALREVISION',
- 'revno': 0}
+ 'revision_id': 'LOCALREVISION',
+ 'revno': 0}
def test_version_string_is_good(self):
- """Ensure version string works"""
+ """Ensure version string works."""
self.assertEqual("2012.10-dev", self.version.version_string())
def test_canonical_version_string_is_good(self):
- """Ensure canonical version works"""
+ """Ensure canonical version works."""
self.assertEqual("2012.10", self.version.canonical_version_string())
def test_final_version_strings_are_identical(self):
- """Ensure final version strings match only at release"""
+ """Ensure final version strings match only at release."""
self.assertNotEqual(self.version.canonical_version_string(),
- self.version.version_string())
+ self.version.version_string())
self.version.FINAL = True
self.assertEqual(self.version.canonical_version_string(),
- self.version.version_string())
+ self.version.version_string())
def test_vcs_version_string_is_good(self):
- """Ensure uninstalled code generates local """
+ """Ensure uninstalled code generates local."""
self.assertEqual("LOCALBRANCH:LOCALREVISION",
- self.version.vcs_version_string())
+ self.version.vcs_version_string())
def test_version_string_with_vcs_is_good(self):
- """Ensure uninstalled code get version string"""
+ """Ensure uninstalled code get version string."""
self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
- self.version.version_string_with_vcs())
+ self.version.version_string_with_vcs())
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
- self.volume.driver.delete_volume(mox.IgnoreArg()) \
- .AndRaise(exception.VolumeIsBusy)
+ self.volume.driver.delete_volume(
+ mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy)
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
- self.assertEqual(snapshot_id, db.volume_get(
- context.get_admin_context(),
- volume_dst['id']).snapshot_id)
+ self.assertEqual(snapshot_id,
+ db.volume_get(context.get_admin_context(),
+ volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
- self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
- .AndRaise(exception.SnapshotIsBusy)
+ self.volume.driver.delete_snapshot(
+ mox.IgnoreArg()).AndRaise(exception.SnapshotIsBusy)
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = 1
# creating volume testdata
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'instance_uuid': None,
- 'host': 'dummy'})
+ db.volume_create(self.context,
+ {'id': volume_id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'creating',
+ 'instance_uuid': None,
+ 'host': 'dummy'})
try:
self.volume.create_volume(self.context,
volume_id,
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'host': 'dummy'})
+ db.volume_create(self.context,
+ {'id': volume_id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'creating',
+ 'host': 'dummy'})
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
# creating volume testdata
volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'uploading',
- 'instance_uuid': None,
- 'host': 'dummy'})
+ db.volume_create(self.context,
+ {'id': volume_id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'uploading',
+ 'instance_uuid': None,
+ 'host': 'dummy'})
try:
# start test
self.volume.copy_volume_to_image(self.context,
- volume_id,
- image_id)
+ volume_id,
+ image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], 'available')
image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
volume_id = 1
- db.volume_create(self.context,
- {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'uploading',
- 'instance_uuid':
- 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
- 'host': 'dummy'})
+ db.volume_create(
+ self.context,
+ {'id': volume_id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'uploading',
+ 'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
+ 'host': 'dummy'})
try:
# start test
self.volume.copy_volume_to_image(self.context,
- volume_id,
- image_id)
+ volume_id,
+ image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], 'in-use')
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'in-use',
- 'host': 'dummy'})
+ db.volume_create(self.context,
+ {'id': volume_id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'display_description': 'Test Desc',
+ 'size': 20,
+ 'status': 'in-use',
+ 'host': 'dummy'})
try:
# start test
try:
volume_id = None
volume_api = cinder.volume.api.API(
- image_service=_FakeImageService())
+ image_service=_FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=1)
volume_id = volume['id']
'value1')
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1',
'value1')
- vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key2',
+ vol_metadata = db.volume_glance_metadata_create(ctxt, 2,
+ 'key2',
'value2')
expected_metadata_1 = {'volume_id': '1',
- 'key': 'key1',
- 'value': 'value1'}
+ 'key': 'key1',
+ 'value': 'value1'}
metadata = db.volume_glance_metadata_get(ctxt, 1)
self.assertEqual(len(metadata), 1)
db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1)
expected_meta = {'snapshot_id': '100',
- 'key': 'key1',
- 'value': 'value1'}
+ 'key': 'key1',
+ 'value': 'value1'}
for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100):
for (key, value) in expected_meta.items():
def test_create_volume(self):
self._test_volume_api('create_volume',
- rpc_method='cast',
- volume=self.fake_volume,
- host='fake_host1',
- snapshot_id='fake_snapshot_id',
- image_id='fake_image_id')
+ rpc_method='cast',
+ volume=self.fake_volume,
+ host='fake_host1',
+ snapshot_id='fake_snapshot_id',
+ image_id='fake_image_id')
def test_delete_volume(self):
self._test_volume_api('delete_volume',
- rpc_method='cast',
- volume=self.fake_volume)
+ rpc_method='cast',
+ volume=self.fake_volume)
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
- rpc_method='cast',
- volume=self.fake_volume,
- snapshot=self.fake_snapshot)
+ rpc_method='cast',
+ volume=self.fake_volume,
+ snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
- rpc_method='cast',
- snapshot=self.fake_snapshot,
- host='fake_host')
+ rpc_method='cast',
+ snapshot=self.fake_snapshot,
+ host='fake_host')
def test_attach_volume(self):
self._test_volume_api('attach_volume',
- rpc_method='call',
- volume=self.fake_volume,
- instance_uuid='fake_uuid',
- mountpoint='fake_mountpoint')
+ rpc_method='call',
+ volume=self.fake_volume,
+ instance_uuid='fake_uuid',
+ mountpoint='fake_mountpoint')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
- rpc_method='call',
- volume=self.fake_volume)
+ rpc_method='call',
+ volume=self.fake_volume)
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
- rpc_method='cast',
- volume=self.fake_volume,
- image_id='fake_image_id')
+ rpc_method='cast',
+ volume=self.fake_volume,
+ image_id='fake_image_id')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
- rpc_method='call',
- volume=self.fake_volume,
- connector='fake_connector')
+ rpc_method='call',
+ volume=self.fake_volume,
+ connector='fake_connector')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
- rpc_method='call',
- volume=self.fake_volume,
- connector='fake_connector',
- force=False)
+ rpc_method='call',
+ volume=self.fake_volume,
+ connector='fake_connector',
+ force=False)
class VolumeTypeTestCase(test.TestCase):
- """Test cases for volume type code"""
+ """Test cases for volume type code."""
def setUp(self):
super(VolumeTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.vol_type1_name = str(int(time.time()))
- self.vol_type1_specs = dict(
- type="physical drive",
- drive_type="SAS",
- size="300",
- rpm="7200",
- visible="True")
+ self.vol_type1_specs = dict(type="physical drive",
+ drive_type="SAS",
+ size="300",
+ rpm="7200",
+ visible="True")
def test_volume_type_create_then_destroy(self):
- """Ensure volume types can be created and deleted"""
+ """Ensure volume types can be created and deleted."""
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
volume_types.create(self.ctxt,
'drive type was not deleted')
def test_get_all_volume_types(self):
- """Ensures that all volume types can be retrieved"""
+ """Ensures that all volume types can be retrieved."""
session = sql_session.get_session()
total_volume_types = session.query(models.VolumeTypes).count()
vol_types = volume_types.get_all_types(self.ctxt)
self.assertEqual(total_volume_types, len(vol_types))
def test_get_default_volume_type(self):
- """ Ensures default volume type can be retrieved """
+ """Ensures default volume type can be retrieved."""
volume_types.create(self.ctxt,
fake_flags.def_vol_type,
{})
fake_flags.def_vol_type)
def test_default_volume_type_missing_in_db(self):
- """ Ensures proper exception raised if default volume type
- is not in database. """
+ """Ensures proper exception raised if default volume type
+ is not in database."""
session = sql_session.get_session()
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual(default_vol_type, {})
def test_non_existent_vol_type_shouldnt_delete(self):
- """Ensures that volume type creation fails with invalid args"""
+ """Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.VolumeTypeNotFoundByName,
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
def test_repeated_vol_types_shouldnt_raise(self):
- """Ensures that volume duplicates don't raise"""
+ """Ensures that volume duplicates don't raise."""
new_name = self.vol_type1_name + "dup"
volume_types.create(self.ctxt, new_name)
volume_types.destroy(self.ctxt, new_name)
volume_types.create(self.ctxt, new_name)
def test_invalid_volume_types_params(self):
- """Ensures that volume type creation fails with invalid args"""
+ """Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.InvalidVolumeType,
volume_types.destroy, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
self.ctxt, None)
def test_volume_type_get_by_id_and_name(self):
- """Ensure volume types get returns same entry"""
+ """Ensure volume types get returns same entry."""
volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs)
self.assertEqual(new, new2)
def test_volume_type_search_by_extra_spec(self):
- """Ensure volume types get by extra spec returns correct type"""
+ """Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
"key4": "val4"})
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key1": "val1"}})
+ vol_types = volume_types.get_all_types(
+ self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertTrue("type1" in vol_types.keys())
self.assertEqual(vol_types['type1']['extra_specs'],
{"key1": "val1", "key2": "val2"})
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key2": "val2"}})
+ vol_types = volume_types.get_all_types(
+ self.ctxt,
+ search_opts={'extra_specs': {"key2": "val2"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertTrue("type1" in vol_types.keys())
self.assertTrue("type2" in vol_types.keys())
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key3": "val3"}})
+ vol_types = volume_types.get_all_types(
+ self.ctxt,
+ search_opts={'extra_specs': {"key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertTrue("type2" in vol_types.keys())
def test_volume_type_search_by_extra_spec_multiple(self):
- """Ensure volume types get by extra spec returns correct type"""
+ """Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2",
"key3": "val3"})
"key3": "val3",
"key4": "val4"})
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key1": "val1",
- "key3": "val3"}})
+ vol_types = volume_types.get_all_types(
+ self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1",
+ "key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertTrue("type1" in vol_types.keys())
self.context = context.get_admin_context()
self.vol_type1 = dict(name="TEST: Regular volume test")
self.vol_type1_specs = dict(vol_extra1="value1",
- vol_extra2="value2",
- vol_extra3=3)
+ vol_extra2="value2",
+ vol_extra3=3)
self.vol_type1['extra_specs'] = self.vol_type1_specs
ref = db.volume_type_create(self.context, self.vol_type1)
self.volume_type1_id = ref.id
def test_volume_type_specs_get(self):
expected_specs = self.vol_type1_specs.copy()
actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
+ context.get_admin_context(),
+ self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_delete(self):
expected_specs = self.vol_type1_specs.copy()
del expected_specs['vol_extra2']
db.volume_type_extra_specs_delete(context.get_admin_context(),
- self.volume_type1_id,
- 'vol_extra2')
+ self.volume_type1_id,
+ 'vol_extra2')
actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
+ context.get_admin_context(),
+ self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_update(self):
expected_specs = self.vol_type1_specs.copy()
expected_specs['vol_extra3'] = "4"
db.volume_type_extra_specs_update_or_create(
- context.get_admin_context(),
- self.volume_type1_id,
- dict(vol_extra3=4))
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra3=4))
actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
+ context.get_admin_context(),
+ self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_create(self):
expected_specs['vol_extra4'] = 'value4'
expected_specs['vol_extra5'] = 'value5'
db.volume_type_extra_specs_update_or_create(
- context.get_admin_context(),
- self.volume_type1_id,
- dict(vol_extra4="value4",
- vol_extra5="value5"))
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra4="value4",
+ vol_extra5="value5"))
actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
+ context.get_admin_context(),
+ self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_get_with_extra_specs(self):
volume_type = db.volume_type_get(
- context.get_admin_context(),
- self.volume_type1_id)
+ context.get_admin_context(),
+ self.volume_type1_id)
self.assertEquals(volume_type['extra_specs'],
self.vol_type1_specs)
volume_type = db.volume_type_get(
- context.get_admin_context(),
- self.vol_type2_id)
+ context.get_admin_context(),
+ self.vol_type2_id)
self.assertEquals(volume_type['extra_specs'], {})
def test_volume_type_get_by_name_with_extra_specs(self):
volume_type = db.volume_type_get_by_name(
- context.get_admin_context(),
- self.vol_type1['name'])
+ context.get_admin_context(),
+ self.vol_type1['name'])
self.assertEquals(volume_type['extra_specs'],
self.vol_type1_specs)
volume_type = db.volume_type_get_by_name(
- context.get_admin_context(),
- self.vol_type2_noextra['name'])
+ context.get_admin_context(),
+ self.vol_type2_noextra['name'])
self.assertEquals(volume_type['extra_specs'], {})
def test_volume_type_get_all(self):
super(UsageInfoTestCase, self).tearDown()
def _create_volume(self, params={}):
- """Create a test volume"""
+ """Create a test volume."""
vol = {}
vol['snapshot_id'] = self.snapshot_id
vol['user_id'] = self.user_id
def tearDown(self):
try:
- if self._volume_data_2 and \
- self._wutils.volume_exists(
- self._volume_data_2['name']):
+ if (self._volume_data_2 and
+ self._wutils.volume_exists(self._volume_data_2['name'])):
self._wutils.delete_volume(self._volume_data_2['name'])
- if self._volume_data and \
- self._wutils.volume_exists(
- self._volume_data['name']):
+
+ if (self._volume_data and
+ self._wutils.volume_exists(
+ self._volume_data['name'])):
self._wutils.delete_volume(self._volume_data['name'])
- if self._snapshot_data and \
- self._wutils.snapshot_exists(
- self._snapshot_data['name']):
+ if (self._snapshot_data and
+ self._wutils.snapshot_exists(
+ self._snapshot_data['name'])):
self._wutils.delete_snapshot(self._snapshot_data['name'])
- if self._connector_data and \
- self._wutils.initiator_id_exists(
- "%s%s" % (FLAGS.iscsi_target_prefix,
- self._volume_data['name']),
- self._connector_data['initiator']):
+ if (self._connector_data and
+ self._wutils.initiator_id_exists(
+ "%s%s" % (FLAGS.iscsi_target_prefix,
+ self._volume_data['name']),
+ self._connector_data['initiator'])):
target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name'])
initiator_name = self._connector_data['initiator']
self._wutils.delete_initiator_id(target_name, initiator_name)
- if self._volume_data and \
- self._wutils.export_exists("%s%s" % (FLAGS.iscsi_target_prefix,
- self._volume_data['name'])):
- self._wutils.delete_export("%s%s" % (FLAGS.iscsi_target_prefix,
- self._volume_data['name']))
+ if (self._volume_data and
+ self._wutils.export_exists("%s%s" %
+ (FLAGS.iscsi_target_prefix,
+ self._volume_data['name']))):
+ self._wutils.delete_export(
+ "%s%s" % (FLAGS.iscsi_target_prefix,
+ self._volume_data['name']))
finally:
super(TestWindowsDriver, self).tearDown()
retval = self._drv.create_export({}, self._volume_data)
volume_name = self._volume_data['name']
- self.assertEquals(retval,
- {'provider_location':
- "%s%s" % (FLAGS.iscsi_target_prefix, volume_name)})
+ self.assertEquals(
+ retval,
+ {'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
+ volume_name)})
def test_initialize_connection(self):
#Create a volume
size=1, display_name='name', display_description='desc'))
mock.VerifyAll()
- self.assertEquals(dict(
- provider_location='sr_uuid/vdi_uuid'
- ), result)
+ self.assertEquals(dict(provider_location='sr_uuid/vdi_uuid'), result)
def test_delete_volume(self):
mock = mox.Mox()
FLAGS = flags.FLAGS
FAKE = "fake"
-VOLUME = {
- 'size': 16,
- 'name': FAKE,
- 'id': 1
- }
+VOLUME = {'size': 16,
+ 'name': FAKE,
+ 'id': 1}
-CONNECTOR = {
- 'initiator': "iqn.2012-07.org.fake:01:948f189c4695",
- }
+CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
class XIVFakeProxyDriver(object):
self.volumes[volume['name']]['attached'] = connector
- return {
- 'driver_volume_type': 'iscsi',
- 'data': {
- 'target_discovered': True,
- 'target_portal': self.xiv_portal,
- 'target_iqn': self.xiv_iqn,
- 'target_lun': lun_id,
- 'volume_id': volume['id'],
- 'multipath': True,
- # part of a patch to nova-compute to enable iscsi multipath
- 'provider_location': "%s,1 %s %s" % (
- self.xiv_portal,
- self.xiv_iqn,
- lun_id),
- },
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': True,
+ 'target_discovered': True,
+ 'target_portal': self.xiv_portal,
+ 'target_iqn': self.xiv_iqn,
+ 'target_lun': lun_id,
+ 'volume_id': volume['id'],
+ 'multipath': True,
+ 'provider_location': "%s,1 %s %s" % (
+ self.xiv_portal,
+ self.xiv_iqn,
+ lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound()
- return self.volumes[volume['name']].get('attached', None) \
- == connector
+ return (self.volumes[volume['name']].get('attached', None)
+ == connector)
class XIVVolumeDriverTest(test.TestCase):
def test_initialized_should_set_xiv_info(self):
"""Test that the san flags are passed to the XIV proxy."""
- self.assertEquals(
- self.driver.xiv_proxy.xiv_info['xiv_user'],
- FLAGS.san_login)
- self.assertEquals(
- self.driver.xiv_proxy.xiv_info['xiv_pass'],
- FLAGS.san_password)
- self.assertEquals(
- self.driver.xiv_proxy.xiv_info['xiv_address'],
- FLAGS.san_ip)
- self.assertEquals(
- self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
- FLAGS.san_clustername)
+ self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_user'],
+ FLAGS.san_login)
+ self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_pass'],
+ FLAGS.san_password)
+ self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_address'],
+ FLAGS.san_ip)
+ self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
+ FLAGS.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_proxy validates credentials."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_volume,
- {'name': FAKE, 'id': 1, 'size': 12000})
+ self.driver.create_volume,
+ {'name': FAKE,
+ 'id': 1,
+ 'size': 12000})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
- self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
+ self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
- self.driver.initialize_connection, VOLUME, CONNECTOR)
+ self.driver.initialize_connection,
+ VOLUME,
+ CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
- self.assertFalse(
- self.driver.xiv_proxy.is_volume_attached(
- VOLUME,
- CONNECTOR))
+ self.assertFalse(self.driver.xiv_proxy.is_volume_attached(VOLUME,
+ CONNECTOR))
self.driver.delete_volume(VOLUME)
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
- self.driver.terminate_connection, VOLUME, CONNECTOR)
+ self.driver.terminate_connection,
+ VOLUME,
+ CONNECTOR)
def test_terminate_connection_should_fail_on_non_attached_volume(self):
"""Test that terminate won't work for volumes that are not attached."""
self.driver.create_volume(VOLUME)
self.assertRaises(exception.VolumeNotFoundForInstance,
- self.driver.terminate_connection, VOLUME, CONNECTOR)
+ self.driver.terminate_connection,
+ VOLUME,
+ CONNECTOR)
self.driver.delete_volume(VOLUME)
('/api/vcontrollers.xml', self._list_controllers),
('/api/servers.xml', self._list_servers),
('/api/volumes/*/servers.xml',
- self._list_vol_attachments)]
+ self._list_vol_attachments)]
}
ops_list = ops[self.method]
def _login(self):
params = self._get_parameters(self.body)
- if params['user'] == RUNTIME_VARS['user'] and\
- params['password'] == RUNTIME_VARS['password']:
+ if (params['user'] == RUNTIME_VARS['user'] and
+ params['password'] == RUNTIME_VARS['password']):
return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
else:
return RUNTIME_VARS['bad_login']
<created-at type='datetime'>2012-01-28...</created-at>
<modified-at type='datetime'>2012-01-28...</modified-at>
</volume>"""
- return self._generate_list_resp(header, footer, body,
- RUNTIME_VARS['volumes'])
+ return self._generate_list_resp(header,
+ footer,
+ body,
+ RUNTIME_VARS['volumes'])
def _list_controllers(self):
header = """<show-vcontrollers-response>
<chap-username>test_chap_user</chap-username>
<chap-target-secret>test_chap_secret</chap-target-secret>
</vcontroller>"""
- return self._generate_list_resp(header, footer, body,
- RUNTIME_VARS['controllers'])
+ return self._generate_list_resp(header,
+ footer,
+ body,
+ RUNTIME_VARS['controllers'])
def _list_servers(self):
header = """<show-servers-response>
for server in attachments:
srv_params = self._get_server_obj(server)
resp += body % (server,
- srv_params['display_name'], srv_params['iqn'])
+ srv_params['display_name'],
+ srv_params['iqn'])
resp += footer
return resp
class ZadaraVPSADriverTestCase(test.TestCase):
- """Test case for Zadara VPSA volume driver"""
+ """Test case for Zadara VPSA volume driver."""
def setUp(self):
LOG.debug('Enter: setUp')
self.driver.check_for_setup_error()
def test_volume_attach_detach(self):
- """Test volume attachment and detach"""
+ """Test volume attachment and detach."""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector = dict(initiator='test_iqn.1')
self.driver.delete_volume(volume)
def test_volume_attach_multiple_detach(self):
- """Test multiple volume attachment and detach"""
+ """Test multiple volume attachment and detach."""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
self.driver.delete_volume(volume)
def test_wrong_attach_params(self):
- """Test different wrong attach scenarios"""
+ """Test different wrong attach scenarios."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
volume1, connector1)
def test_wrong_detach_params(self):
- """Test different wrong detachment scenarios"""
+ """Test different wrong detachment scenarios."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
volume1, connector2)
def test_wrong_login_reply(self):
- """Test wrong login reply"""
+ """Test wrong login reply."""
RUNTIME_VARS['login'] = """<hash>
<access-key>%s</access-key>
self.driver.do_setup, None)
def test_ssl_use(self):
- """Coverage test for SSL connection"""
+ """Coverage test for SSL connection."""
self.flags(zadara_vpsa_use_ssl=True)
self.driver.do_setup(None)
self.flags(zadara_vpsa_use_ssl=False)
def test_bad_http_response(self):
- """Coverage test for non-good HTTP response"""
+ """Coverage test for non-good HTTP response."""
RUNTIME_VARS['status'] = 400
volume = {'name': 'test_volume_01', 'size': 1}
self.driver.create_volume, volume)
def test_delete_without_detach(self):
- """Test volume deletion without detach"""
+ """Test volume deletion without detach."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
connector1 = dict(initiator='test_iqn.1')
super(BaseTestCase, self).tearDown()
has_errors = len([test for (test, msgs) in self._currentResult.errors
- if test.id() == self.id()]) > 0
+ if test.id() == self.id()]) > 0
failed = len([test for (test, msgs) in self._currentResult.failures
- if test.id() == self.id()]) > 0
+ if test.id() == self.id()]) > 0
if not has_errors and not failed:
self._save_mock_proxies()
test_name = test_name[len(prefix):]
file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
return os.path.join(os.path.dirname(mockproxy.__file__),
- "stubs", file_name)
+ "stubs", file_name)
def _load_mock(self, name):
path = self._get_stub_file_path(self.id(), name)
def _load_mock_or_create_proxy(self, module_name):
m = None
- if not gen_test_mocks_key in os.environ or \
- os.environ[gen_test_mocks_key].lower() \
- not in ['true', 'yes', '1']:
+ if (not gen_test_mocks_key in os.environ or
+ os.environ[gen_test_mocks_key].lower()
+ not in ['true', 'yes', '1']):
m = self._load_mock(module_name)
else:
module = __import__(module_name)
def get_fake_volume_info(name):
- return {
- 'name': name,
- 'size': 1,
- 'provider_location': 'iqn.2010-10.org.openstack:' + name,
- 'id': 1,
- 'provider_auth': None
- }
+ return {'name': name,
+ 'size': 1,
+ 'provider_location': 'iqn.2010-10.org.openstack:' + name,
+ 'id': 1,
+ 'provider_auth': None}
def get_fake_snapshot_info(volume_name, snapshot_name):
- return {
- 'name': snapshot_name,
- 'volume_name': volume_name,
- }
+ return {'name': snapshot_name,
+ 'volume_name': volume_name, }
def get_fake_connector_info(initiator):
- return {
- 'initiator': initiator,
- }
+ return {'initiator': initiator, }
def serialize_args(*args, **kwargs):
- """Workaround for float string conversion issues in Python 2.6"""
+ """Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
self._recorded_values = {}
def _get_proxy_object(self, obj):
- if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
- isinstance(obj, list) or isinstance(obj, dict):
+ if (hasattr(obj, '__dict__') or
+ isinstance(obj, tuple) or
+ isinstance(obj, list) or
+ isinstance(obj, dict)):
p = MockProxy(obj)
else:
p = obj
return object.__getattribute__(self, name)
else:
attr = getattr(self._wrapped, name)
- if inspect.isfunction(attr) or inspect.ismethod(attr) or \
- inspect.isbuiltin(attr):
+ if (inspect.isfunction(attr) or
+ inspect.ismethod(attr) or
+ inspect.isbuiltin(attr)):
def newfunc(*args, **kwargs):
result = attr(*args, **kwargs)
p = self._get_proxy_object(result)
self._add_recorded_ret_value(name, params, p)
return p
return newfunc
- elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
- and not (isinstance(attr, str) or isinstance(attr, unicode))):
+ elif (hasattr(attr, '__dict__') or
+ (hasattr(attr, '__getitem__') and not
+ (isinstance(attr, str) or isinstance(attr, unicode)))):
p = MockProxy(attr)
else:
p = attr
return self.__conn_wmi
def find_vhd_by_name(self, name):
- ''' Finds a volume by its name.'''
+ '''Finds a volume by its name.'''
wt_disks = self._conn_wmi.WT_Disk(Description=name)
return wt_disks
def volume_exists(self, name):
- ''' Checks if a volume exists.'''
+ '''Checks if a volume exists.'''
wt_disks = self.find_vhd_by_name(name)
if len(wt_disks) > 0:
return False
def snapshot_exists(self, name):
- ''' Checks if a snapshot exists.'''
+ '''Checks if a snapshot exists.'''
wt_snapshots = self.find_snapshot_by_name(name)
if len(wt_snapshots) > 0:
return False
def find_snapshot_by_name(self, name):
- ''' Finds a snapshot by its name.'''
+ '''Finds a snapshot by its name.'''
wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name)
return wt_snapshots
def delete_volume(self, name):
- ''' Deletes a volume.'''
+ '''Deletes a volume.'''
wt_disk = self._conn_wmi.WT_Disk(Description=name)[0]
wt_disk.Delete_()
vhdfiles = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- self._get_vhd_path(name) + "'")
+ "Select * from CIM_DataFile where Name = '" +
+ self._get_vhd_path(name) + "'")
if len(vhdfiles) > 0:
vhdfiles[0].Delete()
def _get_vhd_path(self, volume_name):
- ''' Gets the path disk of the volume'''
+ '''Gets the path disk of the volume.'''
base_vhd_folder = FLAGS.windows_iscsi_lun_path
return os.path.join(base_vhd_folder, volume_name + ".vhd")
def delete_snapshot(self, name):
- ''' Deletes a snapshot.'''
+ '''Deletes a snapshot.'''
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0]
wt_snapshot.Delete_()
vhdfile = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- self._get_vhd_path(name) + "'")[0]
+ "Select * from CIM_DataFile where Name = '" +
+ self._get_vhd_path(name) + "'")[0]
vhdfile.Delete()
def find_initiator_ids(self, target_name, initiator_name):
- ''' Finds a initiator id by its name.'''
+ '''Finds a initiator id by its name.'''
wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name,
Method=4,
Value=initiator_name)
return wt_idmethod
def initiator_id_exists(self, target_name, initiator_name):
- ''' Checks if a initiatorId exists.'''
+ '''Checks if a initiatorId exists.'''
wt_idmethod = self.find_initiator_ids(target_name, initiator_name)
if len(wt_idmethod) > 0:
return False
def find_exports(self, target_name):
- ''' Finds a export id by its name.'''
+ '''Finds a export id by its name.'''
wt_host = self._conn_wmi.WT_Host(HostName=target_name)
return wt_host
def export_exists(self, target_name):
- ''' Checks if a export exists.'''
+ '''Checks if a export exists.'''
wt_host = self.find_exports(target_name)
if len(wt_host) > 0:
return False
def delete_initiator_id(self, target_name, initiator_name):
- ''' Deletes a initiatorId.'''
+ '''Deletes a initiatorId.'''
wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0]
wt_init_id.Delete_()
def delete_export(self, target_name):
- ''' Deletes an export.'''
+ '''Deletes an export.'''
wt_host = self.find_exports(target_name)[0]
wt_host.RemoveAllWTDisks()
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
- exit_code=_returncode,
- stdout=stdout,
- stderr=stderr,
- cmd=' '.join(cmd))
+ exit_code=_returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
elif unit == 'day':
end = datetime.datetime(hour=offset,
- day=rightnow.day,
- month=rightnow.month,
- year=rightnow.year)
+ day=rightnow.day,
+ month=rightnow.month,
+ year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not. """
val = str(val).lower()
- return val == 'true' or val == 'false' or \
- val == 'yes' or val == 'no' or \
- val == 'y' or val == 'n' or \
- val == '1' or val == '0'
+ return (val == 'true' or val == 'false' or
+ val == 'yes' or val == 'no' or
+ val == 'y' or val == 'n' or
+ val == '1' or val == '0')
def is_valid_ipv4(address):
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
- setattr(clz, method,
+ setattr(
+ clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
- decorator("%s.%s" % (module, key), func))
+ decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
- default=True,
- help='Create volume from snapshot at the host where snapshot resides')
+ default=True,
+ help='Create volume from snapshot at the host '
+ 'where snapshot resides')
FLAGS = flags.FLAGS
FLAGS.register_opt(volume_host_opt)
super(API, self).__init__(db_driver)
def create(self, context, size, name, description, snapshot=None,
- image_id=None, volume_type=None, metadata=None,
- availability_zone=None):
+ image_id=None, volume_type=None, metadata=None,
+ availability_zone=None):
check_policy(context, 'create')
if snapshot is not None:
if snapshot['status'] != "available":
elif 'volumes' in overs:
consumed = _consumed('volumes')
LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
- "volume (%(consumed)d volumes already consumed)")
- % locals())
+ "volume (%(consumed)d volumes "
+ "already consumed)") % locals())
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
if availability_zone is None:
volume_type_id = volume_type.get('id')
- options = {
- 'size': size,
- 'user_id': context.user_id,
- 'project_id': context.project_id,
- 'snapshot_id': snapshot_id,
- 'availability_zone': availability_zone,
- 'status': "creating",
- 'attach_status': "detached",
- 'display_name': name,
- 'display_description': description,
- 'volume_type_id': volume_type_id,
- 'metadata': metadata,
- }
+ options = {'size': size,
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'snapshot_id': snapshot_id,
+ 'availability_zone': availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': name,
+ 'display_description': description,
+ 'volume_type_id': volume_type_id,
+ 'metadata': metadata, }
try:
volume = self.db.volume_create(context, options)
finally:
QUOTAS.rollback(context, reservations)
- request_spec = {
- 'volume_properties': options,
- 'volume_type': volume_type,
- 'volume_id': volume['id'],
- 'snapshot_id': volume['snapshot_id'],
- 'image_id': image_id
- }
+ request_spec = {'volume_properties': options,
+ 'volume_type': volume_type,
+ 'volume_id': volume['id'],
+ 'snapshot_id': volume['snapshot_id'],
+ 'image_id': image_id}
filter_properties = {}
# bypass scheduler and send request directly to volume
self.volume_rpcapi.create_volume(context,
- volume_ref,
- volume_ref['host'],
- snapshot_id,
- image_id)
+ volume_ref,
+ volume_ref['host'],
+ snapshot_id,
+ image_id)
else:
- self.scheduler_rpcapi.create_volume(context,
- FLAGS.volume_topic,
- volume_id,
- snapshot_id,
- image_id,
- request_spec=request_spec,
- filter_properties=filter_properties)
+ self.scheduler_rpcapi.create_volume(
+ context,
+ FLAGS.volume_topic,
+ volume_id,
+ snapshot_id,
+ image_id,
+ request_spec=request_spec,
+ filter_properties=filter_properties)
@wrap_check_policy
def delete(self, context, volume, force=False):
for k, v in searchdict.iteritems():
if (k not in volume_metadata.keys() or
- volume_metadata[k] != v):
+ volume_metadata[k] != v):
return False
return True
@wrap_check_policy
def attach(self, context, volume, instance_uuid, mountpoint):
return self.volume_rpcapi.attach_volume(context,
- volume,
- instance_uuid,
- mountpoint)
+ volume,
+ instance_uuid,
+ mountpoint)
@wrap_check_policy
def detach(self, context, volume):
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
- volume,
- connector)
+ volume,
+ connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
- volume,
- connector,
- force)
+ volume,
+ connector,
+ force)
def _create_snapshot(self, context, volume, name, description,
force=False):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
- options = {
- 'volume_id': volume['id'],
- 'user_id': context.user_id,
- 'project_id': context.project_id,
- 'status': "creating",
- 'progress': '0%',
- 'volume_size': volume['size'],
- 'display_name': name,
- 'display_description': description}
+ options = {'volume_id': volume['id'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'status': "creating",
+ 'progress': '0%',
+ 'volume_size': volume['size'],
+ 'display_name': name,
+ 'display_description': description}
snapshot = self.db.snapshot_create(context, options)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
- self.volume_rpcapi.copy_volume_to_image(context, volume,
- recv_metadata['id'])
+ self.volume_rpcapi.copy_volume_to_image(context,
+ volume,
+ recv_metadata['id'])
response = {"id": volume['id'],
- "updated_at": volume['updated_at'],
- "status": 'uploading',
- "display_description": volume['display_description'],
- "size": volume['size'],
- "volume_type": volume['volume_type'],
- "image_id": recv_metadata['id'],
- "container_format": recv_metadata['container_format'],
- "disk_format": recv_metadata['disk_format'],
- "image_name": recv_metadata.get('name', None)
- }
+ "updated_at": volume['updated_at'],
+ "status": 'uploading',
+ "display_description": volume['display_description'],
+ "size": volume['size'],
+ "volume_type": volume['volume_type'],
+ "image_id": recv_metadata['id'],
+ "container_format": recv_metadata['container_format'],
+ "disk_format": recv_metadata['disk_format'],
+ "image_name": recv_metadata.get('name', None)}
return response
help='use this ip for iscsi'),
cfg.IntOpt('iscsi_port',
default=3260,
- help='The port that the iSCSI daemon is listening on'),
- ]
+ help='The port that the iSCSI daemon is listening on'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(volume_opts)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
- run_as_root=True)
+ run_as_root=True)
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
exception_message = (_("volume group %s doesn't exist")
- % FLAGS.volume_group)
+ % FLAGS.volume_group)
raise exception.VolumeBackendAPIException(data=exception_message)
def _create_volume(self, volume_name, sizestr):
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
+ iscsi_target = self.db.volume_get_iscsi_target_num(
+ context,
+ volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
old_name = None
volume_name = volume['name']
if (volume['provider_location'] is not None and
- volume['name'] not in volume['provider_location']):
+ volume['name'] not in volume['provider_location']):
msg = _('Detected inconsistency in provider_location id')
LOG.debug(msg)
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
+ iscsi_target = self.db.volume_get_iscsi_target_num(
+ context,
+ volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
cfg.StrOpt('netapp_storage_service',
default=None,
help=('Storage service to use for provisioning '
- '(when volume_type=None)')),
+ '(when volume_type=None)')),
cfg.StrOpt('netapp_storage_service_prefix',
default=None,
help=('Prefix of storage service name to use for '
- 'provisioning (volume_type name will be appended)')),
+ 'provisioning (volume_type name will be appended)')),
cfg.StrOpt('netapp_vfiler',
default=None,
- help='Vfiler to use for provisioning'),
- ]
+ help='Vfiler to use for provisioning'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(netapp_opts)
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
- 'netapp_server_hostname', 'netapp_server_port']
+ 'netapp_server_hostname', 'netapp_server_port']
for flag in required_flags:
if not getattr(FLAGS, flag, None):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
if not (FLAGS.netapp_storage_service or
FLAGS.netapp_storage_service_prefix):
- raise exception.InvalidInput(reason=_('Either '
- 'netapp_storage_service or netapp_storage_service_prefix must '
- 'be set'))
+ raise exception.InvalidInput(
+ reason=_('Either '
+ 'netapp_storage_service or '
+ 'netapp_storage_service_prefix must '
+ 'be set'))
def do_setup(self, context):
"""Setup the NetApp Volume driver.
client.
"""
self._check_flags()
- self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
+ self._create_client(
+ wsdl_url=FLAGS.netapp_wsdl_url,
login=FLAGS.netapp_login, password=FLAGS.netapp_password,
hostname=FLAGS.netapp_server_hostname,
port=FLAGS.netapp_server_port, cache=True)
"""Discover all of the LUNs in a dataset."""
server = self.client.service
res = server.DatasetMemberListInfoIterStart(
- DatasetNameOrId=dataset.id,
- IncludeExportsInfo=True,
- IncludeIndirect=True,
- MemberType='lun_path')
+ DatasetNameOrId=dataset.id,
+ IncludeExportsInfo=True,
+ IncludeIndirect=True,
+ MemberType='lun_path')
tag = res.Tag
suffix = None
if volume:
res = server.DatasetMemberListInfoIterNext(Tag=tag,
Maximum=100)
if (not hasattr(res, 'DatasetMembers') or
- not res.DatasetMembers):
+ not res.DatasetMembers):
break
for member in res.DatasetMembers.DatasetMemberInfo:
if suffix and not member.MemberName.endswith(suffix):
"""
if ss_type and not self.storage_service_prefix:
msg = _('Attempt to use volume_type without specifying '
- 'netapp_storage_service_prefix flag.')
+ 'netapp_storage_service_prefix flag.')
raise exception.VolumeBackendAPIException(data=msg)
if not (ss_type or self.storage_service):
msg = _('You must set the netapp_storage_service flag in order to '
- 'create volumes with no volume_type.')
+ 'create volumes with no volume_type.')
raise exception.VolumeBackendAPIException(data=msg)
storage_service = self.storage_service
if ss_type:
metadata.DfmMetadataField = [field1, field2]
res = self.client.service.StorageServiceDatasetProvision(
- StorageServiceNameOrId=storage_service,
- DatasetName=dataset_name,
- AssumeConfirmation=True,
- StorageSetDetails=details,
- DatasetMetadata=metadata)
+ StorageServiceNameOrId=storage_service,
+ DatasetName=dataset_name,
+ AssumeConfirmation=True,
+ StorageSetDetails=details,
+ DatasetMetadata=metadata)
ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type)
self.discovered_datasets.append(ds)
igroup_infos = igroups[0]['initiator-group-info']
for igroup_info in igroup_infos:
if ('iscsi' != igroup_info['initiator-group-type'][0] or
- 'linux' != igroup_info['initiator-group-os-type'][0]):
+ 'linux' != igroup_info['initiator-group-os-type'][0]):
continue
igroup_name = igroup_info['initiator-group-name'][0]
if not igroup_name.startswith(self.IGROUP_PREFIX):
request.Name = 'lun-map-list-info'
request.Args = text.Raw('<path>%s</path>' % (lunpath))
response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
+ Request=request)
self._check_fail(request, response)
igroups = response.Results['initiator-groups']
if self._api_elem_is_empty(igroups):
'<volume-uuid>%s</volume-uuid>'
'</clone-id-info></clone-id>')
request.Args = text.Raw(clone_list_status_xml % (clone_op_id,
- volume_uuid))
+ volume_uuid))
response = self.client.service.ApiProxy(Target=host_id,
Request=request)
self._check_fail(request, response)
else:
no_snap = 'true'
request.Args = text.Raw(clone_start_xml % (src_path, no_snap,
- dest_path))
+ dest_path))
response = self.client.service.ApiProxy(Target=host_id,
Request=request)
self._check_fail(request, response)
snap_size = snapshot['volume_size']
if vol_size != snap_size:
msg = _('Cannot create volume of size %(vol_size)s from '
- 'snapshot of size %(snap_size)s')
+ 'snapshot of size %(snap_size)s')
raise exception.VolumeBackendAPIException(data=msg % locals())
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
new_type = self._get_ss_type(volume)
if new_type != old_type:
msg = _('Cannot create volume of type %(new_type)s from '
- 'snapshot of type %(old_type)s')
+ 'snapshot of type %(old_type)s')
raise exception.VolumeBackendAPIException(data=msg % locals())
lun = self._get_lun_details(lun_id)
extra_gb = vol_size
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
- 'netapp_server_hostname', 'netapp_server_port']
+ 'netapp_server_hostname', 'netapp_server_port']
for flag in required_flags:
if not getattr(FLAGS, flag, None):
msg = _('%s is not set') % flag
client.
"""
self._check_flags()
- self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
+ self._create_client(
+ wsdl_url=FLAGS.netapp_wsdl_url,
login=FLAGS.netapp_login, password=FLAGS.netapp_password,
hostname=FLAGS.netapp_server_hostname,
port=FLAGS.netapp_server_port, cache=True)
meta_dict = {}
if hasattr(lun, 'Metadata'):
meta_dict = self._create_dict_from_meta(lun.Metadata)
- discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
- meta_dict)
+ discovered_lun = NetAppLun(lun.Handle,
+ lun.Name,
+ lun.Size,
+ meta_dict)
self._add_lun_to_table(discovered_lun)
LOG.debug(_("Success getting LUN list from server"))
lun = server.ProvisionLun(Name=name, Size=size,
Metadata=metadata)
LOG.debug(_("Created LUN with name %s") % name)
- self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
- lun.Size, self._create_dict_from_meta(lun.Metadata)))
+ self._add_lun_to_table(
+ NetAppLun(lun.Handle,
+ lun.Name,
+ lun.Size,
+ self._create_dict_from_meta(lun.Metadata)))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
LOG.debug(msg % locals())
- target_details_list = server.GetLunTargetDetails(Handle=handle,
- InitiatorType="iscsi", InitiatorName=initiator_name)
+ target_details_list = server.GetLunTargetDetails(
+ Handle=handle,
+ InitiatorType="iscsi",
+ InitiatorName=initiator_name)
msg = _("Succesfully fetched target details for LUN %(handle)s and "
"initiator %(initiator_name)s")
LOG.debug(msg % locals())
lun = server.CloneLun(Handle=handle, NewName=new_name,
Metadata=metadata)
LOG.debug(_("Cloned LUN with new name %s") % new_name)
- self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
- lun.Size, self._create_dict_from_meta(lun.Metadata)))
+ self._add_lun_to_table(
+ NetAppLun(lun.Handle,
+ lun.Name,
+ lun.Size,
+ self._create_dict_from_meta(lun.Metadata)))
def _create_metadata_list(self, extra_args):
"""Creates metadata from kwargs."""
netapp_nfs_opts = [
cfg.IntOpt('synchronous_snapshot_create',
default=0,
- help='Does snapshot creation call returns immediately')
- ]
+ help='Does snapshot creation call returns immediately')]
FLAGS = flags.FLAGS
FLAGS.register_opts(netapp_opts)
if vol_size != snap_size:
msg = _('Cannot create volume of size %(vol_size)s from '
- 'snapshot of size %(snap_size)s')
+ 'snapshot of size %(snap_size)s')
raise exception.CinderException(msg % locals())
self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
client = suds.client.Client(FLAGS.netapp_wsdl_url,
username=FLAGS.netapp_login,
password=FLAGS.netapp_password)
- soap_url = 'http://%s:%s/apis/soap/v1' % (
- FLAGS.netapp_server_hostname,
- FLAGS.netapp_server_port)
+ soap_url = 'http://%s:%s/apis/soap/v1' % (FLAGS.netapp_server_hostname,
+ FLAGS.netapp_server_port)
client.set_options(location=soap_url)
return client
clone_name))
resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
+ Request=request)
if resp.Status == 'passed' and FLAGS.synchronous_snapshot_create:
clone_id = resp.Results['clone-id'][0]
:param clone_operation_id: Identifier of ONTAP clone operation
"""
clone_list_options = ('<clone-id>'
- '<clone-id-info>'
- '<clone-op-id>%d</clone-op-id>'
- '<volume-uuid></volume-uuid>'
- '</clone-id>'
+ '<clone-id-info>'
+ '<clone-op-id>%d</clone-op-id>'
+ '<volume-uuid></volume-uuid>'
+ '</clone-id>'
'</clone-id-info>')
request = self._client.factory.create('Request')
while resp.Status != 'passed':
time.sleep(1)
resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
+ Request=request)
def _get_provider_location(self, volume_id):
"""
request.Args = text.Raw(command_args % export_path)
resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
+ Request=request)
if resp.Status == 'passed':
return resp.Results['actual-pathname'][0]
def __call__(self, *args):
data = jsonutils.dumps({'object': self.obj,
- 'method': self.method,
- 'params': args})
+ 'method': self.method,
+ 'params': args})
auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1]
headers = {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % (auth,)}
if response_obj.info().status == 'EOF in headers':
if self.auto and self.url.startswith('http://'):
LOG.info(_('Auto switching to HTTPS connection to %s'),
- self.url)
+ self.url)
self.url = 'https' + self.url[4:]
request = urllib2.Request(self.url, data, headers)
response_obj = urllib2.urlopen(request)
nexenta_opts = [
cfg.StrOpt('nexenta_host',
- default='',
- help='IP address of Nexenta SA'),
+ default='',
+ help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=2000,
help='HTTP port to connect to Nexenta REST API server'),
"""
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
- FLAGS.nexenta_volume)
+ FLAGS.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
raise
else:
LOG.info(_('Ignored target creation error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
raise
else:
LOG.info(_('Ignored target group creation error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
raise
else:
LOG.info(_('Ignored target group member addition error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
raise
else:
LOG.info(_('Ignored LU creation error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
raise
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
return '%s:%s,1 %s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
target_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
- ' %(target_group)s, assuming it is already gone: %(exc)s'),
- {'target_group': target_group_name, 'exc': exc})
+ ' %(target_group)s, assuming it is '
+ 'already gone: %(exc)s'),
+ {'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
- ' assuming it is already gone: %(exc)s'),
- {'target': target_name, 'exc': exc})
+ ' assuming it is already gone: %(exc)s'),
+ {'target': target_name, 'exc': exc})
volume_opts = [
cfg.StrOpt('nfs_shares_config',
- default=None,
- help='File with the list of available nfs shares'),
+ default=None,
+ help='File with the list of available nfs shares'),
cfg.StrOpt('nfs_mount_point_base',
default='$state_path/mnt',
help='Base dir where nfs expected to be mounted'),
default=True,
help=('Create volumes as sparsed files which take no space.'
'If set to False volume is created as regular file.'
- 'In such case volume creation takes a lot of time.'))
-]
+ 'In such case volume creation takes a lot of time.'))]
FLAGS = flags.FLAGS
FLAGS.register_opts(volume_opts)
if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
raise exception.NfsNoSuitableShareFound(
- volume_size=volume_size_for)
+ volume_size=volume_size_for)
return greatest_share
def _get_mount_point_for_share(self, nfs_share):
cfg.StrOpt('volume_tmp_dir',
default=None,
help='where to store temporary image files if the volume '
- 'driver does not write them directly to the volume'),
- ]
+ 'driver does not write them directly to the volume'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(rbd_opts)
'auth_enabled': FLAGS.rbd_secret_uuid is not None,
'auth_username': FLAGS.rbd_user,
'secret_type': 'ceph',
- 'secret_uuid': FLAGS.rbd_secret_uuid,
- }
+ 'secret_uuid': FLAGS.rbd_secret_uuid, }
}
def terminate_connection(self, volume, connector, **kwargs):
while attempts > 0:
attempts -= 1
try:
- return utils.ssh_execute(ssh, command,
- check_exit_code=check_exit_code)
+ return utils.ssh_execute(
+ ssh,
+ command,
+ check_exit_code=check_exit_code)
except Exception as e:
LOG.error(e)
greenthread.sleep(random.randint(20, 500) / 100.0)
solaris_opts = [
cfg.StrOpt('san_zfs_volume_base',
default='rpool/',
- help='The ZFS path under which to create zvols for volumes.'),
- ]
+ help='The ZFS path under which to create zvols for volumes.'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(solaris_opts)
help='Password for SF Cluster Admin'),
cfg.BoolOpt('sf_allow_tenant_qos',
- default=True,
- help='Allow tenants to specify QOS on create'), ]
+ default=True,
+ help='Allow tenants to specify QOS on create'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(sf_opts)
default='0',
help='Storage system threshold for volume capacity warnings'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
- default=True,
- help='Storage system autoexpand parameter for volumes '
- '(True/False)'),
+ default=True,
+ help='Storage system autoexpand parameter for volumes '
+ '(True/False)'),
cfg.StrOpt('storwize_svc_vol_grainsize',
default='256',
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
- default=False,
- help='Storage system compression option for volumes'),
+ default=False,
+ help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
- default=True,
- help='Enable Easy Tier for volumes'),
+ default=True,
+ help='Enable Easy Tier for volumes'),
cfg.StrOpt('storwize_svc_flashcopy_timeout',
default='120',
help='Maximum number of seconds to wait for FlashCopy to be '
- 'prepared. Maximum value is 600 seconds (10 minutes).'),
-]
+ 'prepared. Maximum value is 600 seconds (10 minutes).'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(storwize_svc_opts)
if ((not ch.isalnum()) and (ch != ' ') and (ch != '.')
and (ch != '-') and (ch != '_')):
invalid_ch_in_host = invalid_ch_in_host + ch
- self._string_host_name_filter = string.maketrans(invalid_ch_in_host,
- '-' * len(invalid_ch_in_host))
+ self._string_host_name_filter = string.maketrans(
+ invalid_ch_in_host, '-' * len(invalid_ch_in_host))
self._unicode_host_name_filter = dict((ord(unicode(char)), u'-')
- for char in invalid_ch_in_host)
+ for char in invalid_ch_in_host)
def _get_hdr_dic(self, header, row, delim):
"""Return CLI row data as a dictionary indexed by names from header.
attributes = header.split(delim)
values = row.split(delim)
- self._driver_assert(len(values) == len(attributes),
+ self._driver_assert(
+ len(values) ==
+ len(attributes),
_('_get_hdr_dic: attribute headers and values do not match.\n '
'Headers: %(header)s\n Values: %(row)s')
- % {'header': str(header),
- 'row': str(row)})
+ % {'header': str(header),
+ 'row': str(row)})
dic = {}
for attribute, value in map(None, attributes, values):
dic[attribute] = value
# Validate that the pool exists
ssh_cmd = 'lsmdiskgrp -delim ! -nohdr'
out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
+ self._driver_assert(
+ len(out) > 0,
_('check_for_setup_error: failed with unexpected CLI output.\n '
'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
search_text = '!%s!' % FLAGS.storwize_svc_volpool_name
if search_text not in out:
raise exception.InvalidInput(
- reason=(_('pool %s doesn\'t exist')
+ reason=(_('pool %s doesn\'t exist')
% FLAGS.storwize_svc_volpool_name))
storage_nodes = {}
# Get the iSCSI names of the Storwize/SVC nodes
ssh_cmd = 'svcinfo lsnode -delim !'
out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
+ self._driver_assert(
+ len(out) > 0,
_('check_for_setup_error: failed with unexpected CLI output.\n '
'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
nodes = out.strip().split('\n')
- self._driver_assert(len(nodes) > 0,
+ self._driver_assert(
+ len(nodes) > 0,
_('check_for_setup_error: failed with unexpected CLI output.\n '
'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
header = nodes.pop(0)
for node_line in nodes:
try:
'Details: %(msg)s\n'
'Command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'msg': str(e),
- 'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'msg': str(e),
+ 'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
raise exception.VolumeBackendAPIException(
- data=exception_message)
+ data=exception_message)
# Get the iSCSI IP addresses of the Storwize/SVC nodes
ssh_cmd = 'lsportip -delim !'
out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
+ self._driver_assert(
+ len(out) > 0,
_('check_for_setup_error: failed with unexpected CLI output.\n '
'Command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
portips = out.strip().split('\n')
- self._driver_assert(len(portips) > 0,
+ self._driver_assert(
+ len(portips) > 0,
_('check_for_setup_error: failed with unexpected CLI output.\n '
'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
header = portips.pop(0)
for portip_line in portips:
try:
'Details: %(msg)s\n'
'Command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'msg': str(e),
- 'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'msg': str(e),
+ 'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
raise exception.VolumeBackendAPIException(
- data=exception_message)
+ data=exception_message)
if port_node_id in storage_nodes:
node = storage_nodes[port_node_id]
node['ipv6'].append(port_ipv6)
else:
raise exception.VolumeBackendAPIException(
- data=_('check_for_setup_error: '
- 'fail to storage configuration: unknown '
- 'storage node %(node_id)s from CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'node_id': port_node_id,
- 'out': str(out),
- 'err': str(err)})
+ data=_('check_for_setup_error: '
+ 'fail to storage configuration: unknown '
+ 'storage node %(node_id)s from CLI output.\n '
+ 'stdout: %(out)s\n stderr: %(err)s\n') % {
+ 'node_id': port_node_id,
+ 'out': str(out),
+ 'err': str(err)})
iscsi_ipv4_conf = []
iscsi_ipv6_conf = []
'node_id': node['id']})
if (len(node['ipv4']) == 0) and (len(node['ipv6']) == 0):
raise exception.VolumeBackendAPIException(
- data=_('check_for_setup_error: '
- 'fail to storage configuration: storage '
- 'node %s has no IP addresses configured')
- % node['id'])
+ data=_('check_for_setup_error: '
+ 'fail to storage configuration: storage '
+ 'node %s has no IP addresses configured') %
+ node['id'])
# Make sure we have at least one IPv4 address with a iSCSI name
# TODO(ronenkat) need to expand this to support IPv6
- self._driver_assert(len(iscsi_ipv4_conf) > 0,
+ self._driver_assert(
+ len(iscsi_ipv4_conf) > 0,
_('could not obtain IP address and iSCSI name from the storage. '
'Please verify that the storage is configured for iSCSI.\n '
'Storage nodes: %(nodes)s\n portips: %(portips)s')
- % {'nodes': nodes, 'portips': portips})
+ % {'nodes': nodes, 'portips': portips})
self.iscsi_ipv4_conf = iscsi_ipv4_conf
self.iscsi_ipv6_conf = iscsi_ipv6_conf
'storwize_svc_volpool_name']
for flag in required_flags:
if not getattr(FLAGS, flag, None):
- raise exception.InvalidInput(
- reason=_('%s is not set') % flag)
+ raise exception.InvalidInput(reason=_('%s is not set') % flag)
# Ensure that either password or keyfile were set
if not (FLAGS.san_password or FLAGS.san_private_key):
# Check that flashcopy_timeout is numeric and 32/64/128/256
flashcopy_timeout = FLAGS.storwize_svc_flashcopy_timeout
if not (flashcopy_timeout.isdigit() and int(flashcopy_timeout) > 0 and
- int(flashcopy_timeout) <= 600):
+ int(flashcopy_timeout) <= 600):
raise exception.InvalidInput(
reason=_('Illegal value %s specified for '
'storwize_svc_flashcopy_timeout: '
'valid values are between 0 and 600')
- % flashcopy_timeout)
+ % flashcopy_timeout)
# Check that rsize is set
volume_compression = FLAGS.storwize_svc_vol_compression
- if ((volume_compression == True) and
+ if ((volume_compression is True) and
(FLAGS.storwize_svc_vol_rsize == '-1')):
raise exception.InvalidInput(
reason=_('If compression is set to True, rsize must '
size = int(volume['size'])
- if FLAGS.storwize_svc_vol_autoexpand == True:
+ if FLAGS.storwize_svc_vol_autoexpand is True:
autoex = '-autoexpand'
else:
autoex = ''
- if FLAGS.storwize_svc_vol_easytier == True:
+ if FLAGS.storwize_svc_vol_easytier is True:
easytier = '-easytier on'
else:
easytier = '-easytier off'
if FLAGS.storwize_svc_vol_rsize.strip() == '-1':
ssh_cmd_se_opt = ''
else:
- ssh_cmd_se_opt = ('-rsize %(rsize)s %(autoex)s -warning %(warn)s' %
- {'rsize': FLAGS.storwize_svc_vol_rsize,
- 'autoex': autoex,
- 'warn': FLAGS.storwize_svc_vol_warning})
+ ssh_cmd_se_opt = (
+ '-rsize %(rsize)s %(autoex)s -warning %(warn)s' %
+ {'rsize': FLAGS.storwize_svc_vol_rsize,
+ 'autoex': autoex,
+ 'warn': FLAGS.storwize_svc_vol_warning})
if FLAGS.storwize_svc_vol_compression:
ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed'
else:
- ssh_cmd_se_opt = ssh_cmd_se_opt + (' -grainsize %(grain)s' %
- {'grain': FLAGS.storwize_svc_vol_grainsize})
+ ssh_cmd_se_opt = ssh_cmd_se_opt + (
+ ' -grainsize %(grain)s' %
+ {'grain': FLAGS.storwize_svc_vol_grainsize})
ssh_cmd = ('mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s '
- '-iogrp 0 -size %(size)s -unit '
- '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s'
- % {'name': name,
- 'mdiskgrp': FLAGS.storwize_svc_volpool_name,
- 'size': size, 'unit': units, 'easytier': easytier,
- 'ssh_cmd_se_opt': ssh_cmd_se_opt})
+ '-iogrp 0 -size %(size)s -unit '
+ '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s'
+ % {'name': name,
+ 'mdiskgrp': FLAGS.storwize_svc_volpool_name,
+ 'size': size, 'unit': units, 'easytier': easytier,
+ 'ssh_cmd_se_opt': ssh_cmd_se_opt})
out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out.strip()) > 0,
+ self._driver_assert(
+ len(out.strip()) > 0,
_('create volume %(name)s - did not find '
'success message in CLI output.\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name, 'out': str(out), 'err': str(err)})
+ % {'name': name, 'out': str(out), 'err': str(err)})
# Ensure that the output is as expected
match_obj = re.search('Virtual Disk, id \[([0-9]+)\], '
- 'successfully created', out)
+ 'successfully created', out)
# Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(match_obj is not None,
+ self._driver_assert(
+ match_obj is not None,
_('create volume %(name)s - did not find '
'success message in CLI output.\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name, 'out': str(out), 'err': str(err)})
+ % {'name': name, 'out': str(out), 'err': str(err)})
LOG.debug(_('leave: create_volume: volume %(name)s ') % {'name': name})
volume_defined = self._is_volume_defined(name)
# Try to delete volume only if found on the storage
if volume_defined:
- out, err = self._run_ssh('rmvdisk %(force)s %(name)s'
- % {'force': force_flag,
- 'name': name})
+ out, err = self._run_ssh(
+ 'rmvdisk %(force)s %(name)s'
+ % {'force': force_flag,
+ 'name': name})
# No output should be returned from rmvdisk
- self._driver_assert(len(out.strip()) == 0,
+ self._driver_assert(
+ len(out.strip()) == 0,
_('delete volume %(name)s - non empty output from CLI.\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name,
- 'out': str(out),
- 'err': str(err)})
+ % {'name': name,
+ 'out': str(out),
+ 'err': str(err)})
else:
# Log that volume does not exist
LOG.info(_('warning: tried to delete volume %(name)s but '
volume_defined = self._is_volume_defined(volume['name'])
if not volume_defined:
LOG.error(_('ensure_export: volume %s not found on storage')
- % volume['name'])
+ % volume['name'])
def create_export(self, context, volume):
model_update = None
"""
LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
+ 'conn': str(connector)})
initiator_name = connector['initiator']
volume_name = volume['name']
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._create_new_host('host%s' % initiator_name,
- initiator_name)
+ initiator_name)
# Verify that create_new_host succeeded
- self._driver_assert(host_name is not None,
+ self._driver_assert(
+ host_name is not None,
_('_create_new_host failed to return the host name.'))
lun_id = self._map_vol_to_host(volume_name, host_name)
# TODO(ronenkat): Add support for IPv6
volume_attributes = self._get_volume_attributes(volume_name)
if (volume_attributes is not None and
- 'preferred_node_id' in volume_attributes):
+ 'preferred_node_id' in volume_attributes):
preferred_node = volume_attributes['preferred_node_id']
preferred_node_entry = None
for node in self.iscsi_ipv4_conf:
LOG.error(_('initialize_connection: did not find preferred '
'node %(node)s for volume %(vol)s in iSCSI '
'configuration') % {'node': preferred_node,
- 'vol': volume_name})
+ 'vol': volume_name})
else:
# Get 1st node
preferred_node_entry = self.iscsi_ipv4_conf[0]
properties['target_discovered'] = False
# We take the first IP address for now. Ideally, OpenStack will
# support multipath for improved performance.
- properties['target_portal'] = ('%s:%s' %
- (preferred_node_entry['ip'][0], '3260'))
+ properties['target_portal'] = (
+ '%s:%s' % (preferred_node_entry['ip'][0], '3260'))
properties['target_iqn'] = preferred_node_entry['iscsi_name']
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s')
% {'vol': str(volume),
- 'conn': str(connector),
- 'prop': str(properties)})
+ 'conn': str(connector),
+ 'prop': str(properties)})
return {'driver_volume_type': 'iscsi', 'data': properties, }
"""
LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
+ 'conn': str(connector)})
vol_name = volume['name']
initiator_name = connector['initiator']
host_name = self._get_host_from_iscsiname(initiator_name)
# Verify that _get_host_from_iscsiname returned the host.
# This should always succeed as we terminate an existing connection.
- self._driver_assert(host_name is not None,
+ self._driver_assert(
+ host_name is not None,
_('_get_host_from_iscsiname failed to return the host name '
'for iscsi name %s') % initiator_name)
% (host_name, vol_name))
# Verify CLI behaviour - no output is returned from
# rmvdiskhostmap
- self._driver_assert(len(out.strip()) == 0,
+ self._driver_assert(
+ len(out.strip()) == 0,
_('delete mapping of volume %(vol)s to host %(host)s '
'- non empty output from CLI.\n '
'stdout: %(out)s\n stderr: %(err)s')
- % {'vol': vol_name,
- 'host': host_name,
- 'out': str(out),
- 'err': str(err)})
+ % {'vol': vol_name,
+ 'host': host_name,
+ 'out': str(out),
+ 'err': str(err)})
del mapping_data[vol_name]
else:
LOG.error(_('terminate_connection: no mapping of volume '
'%(vol)s to host %(host)s found') %
- {'vol': vol_name, 'host': host_name})
+ {'vol': vol_name, 'host': host_name})
# If this host has no more mappings, delete it
if not mapping_data:
LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
+ 'conn': str(connector)})
def _flashcopy_cleanup(self, fc_map_id, source, target):
"""Clean up a failed FlashCopy operation."""
'mapping %(fc_map_id)% '
'from %(source)s to %(target)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
- % {'fc_map_id': fc_map_id,
- 'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
+ % {'fc_map_id': fc_map_id,
+ 'source': source,
+ 'target': target,
+ 'out': e.stdout,
+ 'err': e.stderr})
def _run_flashcopy(self, source, target):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug(
_('enter: _run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s') % {'source': source,
- 'target': target})
+ 'target': target})
fc_map_cli_cmd = ('mkfcmap -source %s -target %s -autodelete '
- '-cleanrate 0' % (source, target))
+ '-cleanrate 0' % (source, target))
out, err = self._run_ssh(fc_map_cli_cmd)
- self._driver_assert(len(out.strip()) > 0,
+ self._driver_assert(
+ len(out.strip()) > 0,
_('create FC mapping from %(source)s to %(target)s - '
'did not find success message in CLI output.\n'
' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
+ % {'source': source,
+ 'target': target,
+ 'out': str(out),
+ 'err': str(err)})
# Ensure that the output is as expected
match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], '
- 'successfully created', out)
+ 'successfully created', out)
# Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(match_obj is not None,
+ self._driver_assert(
+ match_obj is not None,
_('create FC mapping from %(source)s to %(target)s - '
'did not find success message in CLI output.\n'
' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
+ % {'source': source,
+ 'target': target,
+ 'out': str(out),
+ 'err': str(err)})
try:
fc_map_id = match_obj.group(1)
- self._driver_assert(fc_map_id is not None,
+ self._driver_assert(
+ fc_map_id is not None,
_('create FC mapping from %(source)s to %(target)s - '
'did not find mapping id in CLI output.\n'
' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
+ % {'source': source,
+ 'target': target,
+ 'out': str(out),
+ 'err': str(err)})
except IndexError:
- self._driver_assert(False,
+ self._driver_assert(
+ False,
_('create FC mapping from %(source)s to %(target)s - '
'did not find mapping id in CLI output.\n'
' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
+ % {'source': source,
+ 'target': target,
+ 'out': str(out),
+ 'err': str(err)})
try:
out, err = self._run_ssh('prestartfcmap %s' % fc_map_id)
except exception.ProcessExecutionError as e:
LOG.error(_('_run_flashcopy: fail to prepare FlashCopy '
'from %(source)s to %(target)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
+ % {'source': source,
+ 'target': target,
+ 'out': e.stdout,
+ 'err': e.stderr})
self._flashcopy_cleanup(fc_map_id, source, target)
mapping_ready = False
wait_time = 5
# Allow waiting of up to timeout (set as parameter)
max_retries = (int(FLAGS.storwize_svc_flashcopy_timeout)
- / wait_time) + 1
+ / wait_time) + 1
for try_number in range(1, max_retries):
mapping_attributes = self._get_flashcopy_mapping_attributes(
- fc_map_id)
+ fc_map_id)
if (mapping_attributes is None or
'status' not in mapping_attributes):
break
% {'status': mapping_attributes['status'],
'id': fc_map_id,
'attr': mapping_attributes})
- raise exception.VolumeBackendAPIException(
- data=exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
# Need to wait for mapping to be prepared, wait a few seconds
time.sleep(wait_time)
if not mapping_ready:
exception_msg = (_('mapping %(id)s prepare failed to complete '
'within the alloted %(to)s seconds timeout. '
- 'Terminating') % {'id': fc_map_id,
- 'to': FLAGS.storwize_svc_flashcopy_timeout})
+ 'Terminating')
+ % {'id': fc_map_id,
+ 'to': FLAGS.storwize_svc_flashcopy_timeout})
LOG.error(_('_run_flashcopy: fail to start FlashCopy '
'from %(source)s to %(target)s with '
'exception %(ex)s')
- % {'source': source,
- 'target': target,
- 'ex': exception_msg})
+ % {'source': source,
+ 'target': target,
+ 'ex': exception_msg})
self._flashcopy_cleanup(fc_map_id, source, target)
raise exception.InvalidSnapshot(
reason=_('_run_flashcopy: %s') % exception_msg)
LOG.error(_('_run_flashcopy: fail to start FlashCopy '
'from %(source)s to %(target)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
+ % {'source': source,
+ 'target': target,
+ 'out': e.stdout,
+ 'err': e.stderr})
self._flashcopy_cleanup(fc_map_id, source, target)
LOG.debug(_('leave: _run_flashcopy: FlashCopy started from '
- '%(source)s to %(target)s') % {'source': source,
- 'target': target})
+ '%(source)s to %(target)s')
+ % {'source': source,
+ 'target': target})
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a new snapshot from volume."""
tgt_volume = volume['name']
LOG.debug(_('enter: create_volume_from_snapshot: snapshot %(tgt)s '
- 'from volume %(src)s') % {'tgt': tgt_volume,
- 'src': source_volume})
+ 'from volume %(src)s')
+ % {'tgt': tgt_volume,
+ 'src': source_volume})
src_volume_attributes = self._get_volume_attributes(source_volume)
if src_volume_attributes is None:
'does not exist') % source_volume)
LOG.error(exception_msg)
raise exception.SnapshotNotFound(exception_msg,
- volume_id=source_volume)
-
- self._driver_assert('capacity' in src_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': source_volume,
- 'attr': src_volume_attributes})
+ volume_id=source_volume)
+
+ self._driver_assert(
+ 'capacity' in src_volume_attributes,
+ _('create_volume_from_snapshot: cannot get source '
+ 'volume %(src)s capacity from volume attributes '
+ '%(attr)s')
+ % {'src': source_volume,
+ 'attr': src_volume_attributes})
src_volume_size = src_volume_attributes['capacity']
tgt_volume_attributes = self._get_volume_attributes(tgt_volume)
tgt_volume_created = False
LOG.debug(_('enter: create_snapshot: snapshot %(tgt)s from '
- 'volume %(src)s') % {'tgt': tgt_volume,
- 'src': src_volume})
+ 'volume %(src)s')
+ % {'tgt': tgt_volume,
+ 'src': src_volume})
src_volume_attributes = self._get_volume_attributes(src_volume)
if src_volume_attributes is None:
raise exception.VolumeNotFound(exception_msg,
volume_id=src_volume)
- self._driver_assert('capacity' in src_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': src_volume,
- 'attr': src_volume_attributes})
+ self._driver_assert(
+ 'capacity' in src_volume_attributes,
+ _('create_volume_from_snapshot: cannot get source '
+ 'volume %(src)s capacity from volume attributes '
+ '%(attr)s')
+ % {'src': src_volume,
+ 'attr': src_volume_attributes})
source_volume_size = src_volume_attributes['capacity']
tgt_volume_created = True
else:
# Yes, target exists, verify exact same size as source
- self._driver_assert('capacity' in tgt_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': tgt_volume,
- 'attr': tgt_volume_attributes})
+ self._driver_assert(
+ 'capacity' in tgt_volume_attributes,
+ _('create_volume_from_snapshot: cannot get source '
+ 'volume %(src)s capacity from volume attributes '
+ '%(attr)s')
+ % {'src': tgt_volume,
+ 'attr': tgt_volume_attributes})
target_volume_size = tgt_volume_attributes['capacity']
if target_volume_size != source_volume_size:
exception_msg = (
_('create_snapshot: source %(src)s and target '
'volume %(tgt)s have different capacities '
- '(source:%(ssize)s target:%(tsize)s)') %
- {'src': src_volume,
- 'tgt': tgt_volume,
- 'ssize': source_volume_size,
- 'tsize': target_volume_size})
+ '(source:%(ssize)s target:%(tsize)s)')
+ % {'src': src_volume,
+ 'tgt': tgt_volume,
+ 'ssize': source_volume_size,
+ 'tsize': target_volume_size})
LOG.error(exception_msg)
raise exception.InvalidSnapshot(reason=exception_msg)
"""
LOG.debug(_('enter: _get_host_from_iscsiname: iSCSI initiator %s')
- % iscsi_name)
+ % iscsi_name)
# Get list of host in the storage
ssh_cmd = 'lshost -delim !'
if (len(out.strip()) == 0):
return None
- err_msg = _('_get_host_from_iscsiname: '
- 'failed with unexpected CLI output.\n'
- ' command: %(cmd)s\n stdout: %(out)s\n '
- 'stderr: %(err)s') % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)}
+ err_msg = _(
+ '_get_host_from_iscsiname: '
+ 'failed with unexpected CLI output.\n'
+ ' command: %(cmd)s\n stdout: %(out)s\n '
+ 'stderr: %(err)s') % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)}
host_lines = out.strip().split('\n')
self._driver_assert(len(host_lines) > 0, err_msg)
header = host_lines.pop(0).split('!')
for host in hosts:
ssh_cmd = 'lshost -delim ! %s' % host
out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
- _('_get_host_from_iscsiname: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ len(out) > 0,
+ _('_get_host_from_iscsiname: '
+ 'Unexpected response from CLI output. '
+ 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
for attrib_line in out.split('\n'):
# If '!' not found, return the string and two empty strings
attrib_name, foo, attrib_value = attrib_line.partition('!')
break
LOG.debug(_('leave: _get_host_from_iscsiname: iSCSI initiator %s')
- % iscsi_name)
+ % iscsi_name)
return hostname
LOG.debug(_('enter: _create_new_host: host %(name)s with iSCSI '
'initiator %(init)s') % {'name': host_name,
- 'init': initiator_name})
+ 'init': initiator_name})
if isinstance(host_name, unicode):
host_name = host_name.translate(self._unicode_host_name_filter)
host_name = '%s_%s' % (host_name, random.randint(10000, 99999))
out, err = self._run_ssh('mkhost -name "%s" -iscsiname "%s"'
% (host_name, initiator_name))
- self._driver_assert(len(out.strip()) > 0 and
- 'successfully created' in out,
- _('create host %(name)s with iSCSI initiator %(init)s - '
- 'did not find success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'name': host_name,
- 'init': initiator_name,
- 'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ len(out.strip()) > 0 and
+ 'successfully created' in out,
+ _('create host %(name)s with iSCSI initiator %(init)s - '
+ 'did not find success message in CLI output.\n '
+ 'stdout: %(out)s\n stderr: %(err)s\n')
+ % {'name': host_name,
+ 'init': initiator_name,
+ 'out': str(out),
+ 'err': str(err)})
LOG.debug(_('leave: _create_new_host: host %(host)s with iSCSI '
'initiator %(init)s') % {'host': host_name,
- 'init': initiator_name})
+ 'init': initiator_name})
return host_name
LOG.debug(_('enter: _is_volume_defined: volume %s ') % volume_name)
volume_attributes = self._get_volume_attributes(volume_name)
LOG.debug(_('leave: _is_volume_defined: volume %(vol)s with %(str)s ')
- % {'vol': volume_name,
- 'str': volume_attributes is not None})
+ % {'vol': volume_name,
+ 'str': volume_attributes is not None})
if volume_attributes is None:
return False
else:
# We expect zero or one line if host does not exist,
# two lines if it does exist, otherwise error
out, err = self._run_ssh('lshost -filtervalue name=%s -delim !'
- % host_name)
+ % host_name)
if len(out.strip()) == 0:
return False
lines = out.strip().split('\n')
- self._driver_assert(len(lines) <= 2,
- _('_is_host_defined: Unexpected response from CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ len(lines) <= 2,
+ _('_is_host_defined: Unexpected response from CLI output.\n '
+ 'stdout: %(out)s\n stderr: %(err)s\n')
+ % {'out': str(out),
+ 'err': str(err)})
if len(lines) == 2:
host_info = self._get_hdr_dic(lines[0], lines[1], '!')
host_name_from_storage = host_info['name']
# Make sure we got the data for the right host
- self._driver_assert(host_name_from_storage == host_name,
- _('Data received for host %(host1)s instead of host '
- '%(host2)s.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'host1': host_name_from_storage,
- 'host2': host_name,
- 'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ host_name_from_storage == host_name,
+ _('Data received for host %(host1)s instead of host '
+ '%(host2)s.\n '
+ 'stdout: %(out)s\n stderr: %(err)s\n')
+ % {'host1': host_name_from_storage,
+ 'host2': host_name,
+ 'out': str(out),
+ 'err': str(err)})
else: # 0 or 1 lines
host_name_from_storage = None
LOG.debug(_('leave: _is_host_defined: host %(host)s with %(str)s ') % {
- 'host': host_name,
- 'str': host_name_from_storage is not None})
+ 'host': host_name,
+ 'str': host_name_from_storage is not None})
if host_name_from_storage is None:
return False
"""Create a mapping between a volume to a host."""
LOG.debug(_('enter: _map_vol_to_host: volume %(vol)s to '
- 'host %(host)s') % {'vol': volume_name,
- 'host': host_name})
+ 'host %(host)s')
+ % {'vol': volume_name,
+ 'host': host_name})
# Check if this volume is already mapped to this host
mapping_data = self._get_hostvdisk_mappings(host_name)
# Volume is not mapped to host, create a new LUN
if not mapped_flag:
out, err = self._run_ssh('mkvdiskhostmap -host %s -scsi %s %s'
- % (host_name, result_lun, volume_name))
- self._driver_assert(len(out.strip()) > 0 and
- 'successfully created' in out,
- _('_map_vol_to_host: mapping host %(host)s to '
- 'volume %(vol)s with LUN '
- '%(lun)s - did not find success message in CLI output. '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'host': host_name,
- 'vol': volume_name,
- 'lun': result_lun,
- 'out': str(out),
- 'err': str(err)})
+ % (host_name, result_lun, volume_name))
+ self._driver_assert(
+ len(out.strip()) > 0 and
+ 'successfully created' in out,
+ _('_map_vol_to_host: mapping host %(host)s to '
+ 'volume %(vol)s with LUN '
+ '%(lun)s - did not find success message in CLI output. '
+ 'stdout: %(out)s\n stderr: %(err)s\n')
+ % {'host': host_name,
+ 'vol': volume_name,
+ 'lun': result_lun,
+ 'out': str(out),
+ 'err': str(err)})
LOG.debug(_('leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, '
- 'host %(host)s') % {'lun': result_lun, 'vol': volume_name,
- 'host': host_name})
+ 'host %(host)s')
+ % {'lun': result_lun,
+ 'vol': volume_name,
+ 'host': host_name})
return result_lun
"""
LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s')
- % fc_map_id)
+ % fc_map_id)
# Get the lunid to be used
fc_ls_map_cmd = ('lsfcmap -filtervalue id=%s -delim !' % fc_map_id)
out, err = self._run_ssh(fc_ls_map_cmd)
- self._driver_assert(len(out) > 0,
+ self._driver_assert(
+ len(out) > 0,
_('_get_flashcopy_mapping_attributes: '
'Unexpected response from CLI output. '
'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': fc_ls_map_cmd,
- 'out': str(out),
- 'err': str(err)})
+ % {'cmd': fc_ls_map_cmd,
+ 'out': str(out),
+ 'err': str(err)})
# Get list of FlashCopy mappings
# We expect zero or one line if mapping does not exist,
# two lines if it does exist, otherwise error
lines = out.strip().split('\n')
- self._driver_assert(len(lines) <= 2,
- _('_get_flashcopy_mapping_attributes: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': fc_ls_map_cmd,
- 'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ len(lines) <= 2,
+ _('_get_flashcopy_mapping_attributes: '
+ 'Unexpected response from CLI output. '
+ 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+ % {'cmd': fc_ls_map_cmd,
+ 'out': str(out),
+ 'err': str(err)})
if len(lines) == 2:
attributes = self._get_hdr_dic(lines[0], lines[1], '!')
attributes = None
LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping '
- '%(id)s, attributes %(attr)s') %
- {'id': fc_map_id,
- 'attr': attributes})
+ '%(id)s, attributes %(attr)s')
+ % {'id': fc_map_id,
+ 'attr': attributes})
return attributes
"""
LOG.debug(_('enter: _get_volume_attributes: volume %s')
- % volume_name)
+ % volume_name)
# Get the lunid to be used
try:
'err': e.stderr})
return None
- self._driver_assert(len(out) > 0,
- ('_get_volume_attributes: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
+ self._driver_assert(
+ len(out) > 0,
+ ('_get_volume_attributes: '
+ 'Unexpected response from CLI output. '
+ 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+ % {'cmd': ssh_cmd,
+ 'out': str(out),
+ 'err': str(err)})
attributes = {}
for attrib_line in out.split('\n'):
# If '!' not found, return the string and two empty strings
windows_opts = [
cfg.StrOpt('windows_iscsi_lun_path',
- default='C:\iSCSIVirtualDisks',
- help='Path to store VHD backed volumes'),
+ default='C:\iSCSIVirtualDisks',
+ help='Path to store VHD backed volumes'),
]
FLAGS.register_opts(windows_opts)
wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
wt_disk.Delete_()
vhdfiles = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- self._get_vhd_path(volume) + "'")
+ "Select * from CIM_DataFile where Name = '" +
+ self._get_vhd_path(volume) + "'")
if len(vhdfiles) > 0:
vhdfiles[0].Delete()
raise
else:
LOG.info(_('Ignored target creation error "%s"'
- ' while ensuring export'), exc)
+ ' while ensuring export'), exc)
#Get the disk to add
vol_name = volume['name']
wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
self.call_xenapi('SR.scan', sr_ref)
def create(self, host_ref, device_config, name_label, name_description,
- sr_type, physical_size=None, content_type=None,
- shared=False, sm_config=None):
+ sr_type, physical_size=None, content_type=None,
+ shared=False, sm_config=None):
return self.call_xenapi(
'SR.create',
host_ref,
)
def introduce(self, sr_uuid, name_label, name_description, sr_type,
- content_type=None, shared=False, sm_config=None):
+ content_type=None, shared=False, sm_config=None):
return self.call_xenapi(
'SR.introduce',
sr_uuid,
return self.get_record(vdi_ref)['uuid']
def create(self, sr_ref, size, vdi_type,
- sharable=False, read_only=False, other_config=None):
+ sharable=False, read_only=False, other_config=None):
return self.call_xenapi('VDI.create',
- dict(
- SR=sr_ref,
- virtual_size=str(size),
- type=vdi_type,
- sharable=sharable,
- read_only=read_only,
- other_config=other_config or dict()
- )
- )
+ dict(SR=sr_ref,
+ virtual_size=str(size),
+ type=vdi_type,
+ sharable=sharable,
+ read_only=read_only,
+ other_config=other_config or dict()))
def destroy(self, vdi_ref):
self.call_xenapi('VDI.destroy', vdi_ref)
self.SR.forget(sr_ref)
def create_new_vdi(self, sr_ref, size_in_gigabytes):
- return self.VDI.create(
- sr_ref,
- to_bytes(size_in_gigabytes),
- 'User',
- )
+ return self.VDI.create(sr_ref,
+ to_bytes(size_in_gigabytes),
+ 'User', )
def to_bytes(size_in_gigs):
proxy = importutils.import_class(FLAGS.xiv_proxy)
- self.xiv_proxy = proxy({
- "xiv_user": FLAGS.san_login,
- "xiv_pass": FLAGS.san_password,
- "xiv_address": FLAGS.san_ip,
- "xiv_vol_pool": FLAGS.san_clustername
- },
- LOG,
- exception)
+ self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
+ "xiv_pass": FLAGS.san_password,
+ "xiv_address": FLAGS.san_ip,
+ "xiv_vol_pool": FLAGS.san_clustername},
+ LOG,
+ exception)
san.SanISCSIDriver.__init__(self, *args, **kwargs)
def do_setup(self, context):
def initialize_connection(self, volume, connector):
"""Map the created volume."""
- return self.xiv_proxy.initialize_connection(
- volume,
- connector)
+ return self.xiv_proxy.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume."""
- return self.xiv_proxy.terminate_connection(
- volume,
- connector)
+ return self.xiv_proxy.terminate_connection(volume, connector)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
- return self.xiv_proxy.create_volume_from_snapshot(
- volume,
- snapshot)
+ return self.xiv_proxy.create_volume_from_snapshot(volume,
+ snapshot)
def create_snapshot(self, snapshot):
"""Create a snapshot."""
default=None,
help='Zadara VPSA port number'),
cfg.BoolOpt('zadara_vpsa_use_ssl',
- default=False,
- help='Use SSL connection'),
+ default=False,
+ help='Use SSL connection'),
cfg.StrOpt('zadara_user',
default=None,
help='User name for the VPSA'),
default='OS_%s',
help='Default template for VPSA volume names'),
cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete',
- default=True,
- help="Automatically detach from servers on volume delete"),
+ default=True,
+ help="Automatically detach from servers on volume delete"),
cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete',
- default=True,
- help="Don't halt on deletion of non-existing volumes"),
- ]
+ default=True,
+ help="Don't halt on deletion of non-existing volumes"), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(zadara_opts)
# Attach/Detach operations
'attach_volume': ('POST',
'/api/servers/%s/volumes.xml'
- % kwargs.get('vpsa_srv'),
+ % kwargs.get('vpsa_srv'),
{'volume_name[]': kwargs.get('vpsa_vol'),
'force': 'NO'}),
'detach_volume': ('POST',
'/api/volumes/%s/detach.xml'
- % kwargs.get('vpsa_vol'),
+ % kwargs.get('vpsa_vol'),
{'server_name[]': kwargs.get('vpsa_srv'),
'force': 'NO'}),
{}),
'list_vol_attachments': ('GET',
'/api/volumes/%s/servers.xml'
- % kwargs.get('vpsa_vol'),
- {}),
- }
+ % kwargs.get('vpsa_vol'),
+ {}), }
if cmd not in vpsa_commands.keys():
raise exception.UnknownCmd(cmd=cmd)
user = xml_tree.find('user')
if user is None:
raise exception.MalformedResponse(cmd=cmd,
- reason='no "user" field')
+ reason='no "user" field')
access_key = user.findtext('access-key')
if access_key is None:
raise exception.MalformedResponse(cmd=cmd,
- reason='no "access-key" field')
+ reason='no "access-key" field')
self.access_key = access_key
(method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"')
- % locals())
+ % locals())
if self.use_ssl:
connection = httplib.HTTPSConnection(self.host, self.port)
"""Return details of VPSA's active controller."""
xml_tree = self.vpsa.send_cmd('list_controllers')
ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
- ('state', 'active'))
+ ('state', 'active'))
if ctrl is not None:
return dict(target=ctrl.findtext('target'),
ip=ctrl.findtext('iscsi-ip'),
def create_volume(self, volume):
"""Create volume."""
- self.vpsa.send_cmd('create_volume',
- name=FLAGS.zadara_vol_name_template % volume['name'],
- size=volume['size'])
+ self.vpsa.send_cmd(
+ 'create_volume',
+ name=FLAGS.zadara_vol_name_template % volume['name'],
+ size=volume['size'])
def delete_volume(self, volume):
"""
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
- 'It might be already deleted') % locals()
+ 'It might be already deleted') % locals()
LOG.warning(msg)
if FLAGS.zadara_vpsa_allow_nonexistent_delete:
return
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
vpsa_vol=vpsa_vol)
servers = self._xml_parse_helper(xml_tree, 'servers',
- ('iqn', None), first=False)
+ ('iqn', None), first=False)
if servers:
if not FLAGS.zadara_vpsa_auto_detach_on_delete:
raise exception.VolumeAttached(volume_id=name)
vpsa_srv = server.findtext('name')
if vpsa_srv:
self.vpsa.send_cmd('detach_volume',
- vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+ vpsa_srv=vpsa_srv,
+ vpsa_vol=vpsa_vol)
# Delete volume
self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
# Attach volume to server
self.vpsa.send_cmd('attach_volume',
- vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+ vpsa_srv=vpsa_srv,
+ vpsa_vol=vpsa_vol)
# Get connection info
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
target = server.findtext('target')
lun = server.findtext('lun')
if target is None or lun is None:
- raise exception.ZadaraInvalidAttachmentInfo(name=name,
- reason='target=%s, lun=%s' % (target, lun))
+ raise exception.ZadaraInvalidAttachmentInfo(
+ name=name,
+ reason='target=%s, lun=%s' % (target, lun))
properties = {}
properties['target_discovered'] = False
# Detach volume from server
self.vpsa.send_cmd('detach_volume',
- vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+ vpsa_srv=vpsa_srv,
+ vpsa_vol=vpsa_vol)
def create_volume_from_snapshot(self, volume, snapshot):
raise NotImplementedError()
LOG = logging.getLogger(__name__)
-iscsi_helper_opt = [
- cfg.StrOpt('iscsi_helper',
- default='tgtadm',
- help='iscsi target user-land tool to use'),
- cfg.StrOpt('volumes_dir',
- default='$state_path/volumes',
- help='Volume configuration file storage directory'),
- cfg.StrOpt('iet_conf',
- default='/etc/iet/ietd.conf',
- help='IET configuration file'),
-]
+iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
+ default='tgtadm',
+ help='iscsi target user-land tool to use'),
+ cfg.StrOpt('volumes_dir',
+ default='$state_path/volumes',
+ help='Volume configuration file storage '
+ 'directory'),
+ cfg.StrOpt('iet_conf',
+ default='/etc/iet/ietd.conf',
+ help='IET configuration file'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(iscsi_helper_opt)
except exception.ProcessExecutionError, e:
vol_id = name.split(':')[1]
LOG.error(_("Failed to create iscsi target for volume "
- "id:%(vol_id)s.") % locals())
+ "id:%(vol_id)s.") % locals())
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
return tid
help='Driver to use for volume creation'),
cfg.BoolOpt('volume_force_update_capabilities',
default=False,
- help='if True will force update capabilities on each check'),
- ]
+ help='if True will force update capabilities on each check'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.driver.RBDDriver': 'cinder.volume.drivers.rbd.RBDDriver',
'cinder.volume.driver.SheepdogDriver':
- 'cinder.volume.drivers.sheepdog.SheepdogDriver',
+ 'cinder.volume.drivers.sheepdog.SheepdogDriver',
'cinder.volume.nexenta.volume.NexentaDriver':
- 'cinder.volume.drivers.nexenta.volume.NexentaDriver',
+ 'cinder.volume.drivers.nexenta.volume.NexentaDriver',
'cinder.volume.san.SanISCSIDriver':
- 'cinder.volume.drivers.san.san.SanISCSIDriver',
+ 'cinder.volume.drivers.san.san.SanISCSIDriver',
'cinder.volume.san.SolarisISCSIDriver':
- 'cinder.volume.drivers.san.solaris.SolarisISCSIDriver',
+ 'cinder.volume.drivers.san.solaris.SolarisISCSIDriver',
'cinder.volume.san.HpSanISCSIDriver':
- 'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
+ 'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
'cinder.volume.netapp.NetAppISCSIDriver':
- 'cinder.volume.drivers.netapp.NetAppISCSIDriver',
+ 'cinder.volume.drivers.netapp.NetAppISCSIDriver',
'cinder.volume.netapp.NetAppCmodeISCSIDriver':
- 'cinder.volume.drivers.netapp.NetAppCmodeISCSIDriver',
+ 'cinder.volume.drivers.netapp.NetAppCmodeISCSIDriver',
'cinder.volume.netapp_nfs.NetAppNFSDriver':
- 'cinder.volume.drivers.netapp_nfs.NetAppNFSDriver',
+ 'cinder.volume.drivers.netapp_nfs.NetAppNFSDriver',
'cinder.volume.nfs.NfsDriver':
- 'cinder.volume.drivers.nfs.NfsDriver',
+ 'cinder.volume.drivers.nfs.NfsDriver',
'cinder.volume.solidfire.SolidFire':
- 'cinder.volume.drivers.solidfire.SolidFire',
+ 'cinder.volume.drivers.solidfire.SolidFire',
'cinder.volume.storwize_svc.StorwizeSVCDriver':
- 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
+ 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
'cinder.volume.windows.WindowsDriver':
- 'cinder.volume.drivers.windows.WindowsDriver',
+ 'cinder.volume.drivers.windows.WindowsDriver',
'cinder.volume.xiv.XIVDriver':
- 'cinder.volume.drivers.xiv.XIVDriver',
+ 'cinder.volume.drivers.xiv.XIVDriver',
'cinder.volume.zadara.ZadaraVPSAISCSIDriver':
- 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver'
- }
+ 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver'}
class VolumeManager(manager.SchedulerDependentManager):
else:
self.driver = importutils.import_object(volume_driver)
super(VolumeManager, self).__init__(service_name='volume',
- *args, **kwargs)
+ *args, **kwargs)
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
vol_name = volume_ref['name']
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
- " size %(vol_size)sG") % locals())
+ " size %(vol_size)sG") % locals())
if snapshot_id is None and image_id is None:
model_update = self.driver.create_volume(volume_ref)
elif snapshot_id is not None:
else:
# create the volume from an image
image_service, image_id = \
- glance.get_remote_image_service(context,
- image_id)
+ glance.get_remote_image_service(context,
+ image_id)
image_location = image_service.get_location(context, image_id)
image_meta = image_service.show(context, image_id)
cloned = self.driver.clone_image(volume_ref, image_location)
if snapshot_id:
# Copy any Glance metadata from the original volume
self.db.volume_glance_metadata_copy_to_volume(context,
- volume_ref['id'], snapshot_id)
+ volume_ref['id'],
+ snapshot_id)
now = timeutils.utcnow()
self.db.volume_update(context,
raise exception.VolumeAttached(volume_id=volume_id)
if volume_ref['host'] != self.host:
raise exception.InvalidVolume(
- reason=_("Volume is not local to this node"))
+ reason=_("Volume is not local to this node"))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
snapshot_ref['id'], {'status': 'available',
'progress': '100%'})
self.db.volume_glance_metadata_copy_to_snapshot(context,
- snapshot_ref['id'], volume_id)
+ snapshot_ref['id'],
+ volume_id)
LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
return snapshot_id
LOG.info(_("Notification {%s} received"), event)
self._reset_stats()
- def _notify_about_volume_usage(self, context, volume, event_suffix,
- extra_usage_info=None):
+ def _notify_about_volume_usage(self,
+ context,
+ volume,
+ event_suffix,
+ extra_usage_info=None):
volume_utils.notify_about_volume_usage(
- context, volume, event_suffix,
- extra_usage_info=extra_usage_info, host=self.host)
+ context, volume, event_suffix,
+ extra_usage_info=extra_usage_info, host=self.host)
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
- super(VolumeAPI, self).__init__(topic=FLAGS.volume_topic,
+ super(VolumeAPI, self).__init__(
+ topic=FLAGS.volume_topic,
default_version=self.BASE_RPC_API_VERSION)
def create_volume(self, ctxt, volume, host,
snapshot_id=None, image_id=None):
- self.cast(ctxt, self.make_msg('create_volume',
- volume_id=volume['id'],
- snapshot_id=snapshot_id,
- image_id=image_id),
- topic=rpc.queue_get_for(ctxt, self.topic, host))
+ self.cast(ctxt,
+ self.make_msg('create_volume',
+ volume_id=volume['id'],
+ snapshot_id=snapshot_id,
+ image_id=image_id),
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ host))
def delete_volume(self, ctxt, volume):
- self.cast(ctxt, self.make_msg('delete_volume',
- volume_id=volume['id']),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ self.cast(ctxt,
+ self.make_msg('delete_volume',
+ volume_id=volume['id']),
+ topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
def create_snapshot(self, ctxt, volume, snapshot):
self.cast(ctxt, self.make_msg('create_snapshot',
volume_id=volume['id'],
snapshot_id=snapshot['id']),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
def delete_snapshot(self, ctxt, snapshot, host):
self.cast(ctxt, self.make_msg('delete_snapshot',
snapshot_id=snapshot['id']),
- topic=rpc.queue_get_for(ctxt, self.topic, host))
+ topic=rpc.queue_get_for(ctxt, self.topic, host))
def attach_volume(self, ctxt, volume, instance_uuid, mountpoint):
return self.call(ctxt, self.make_msg('attach_volume',
- volume_id=volume['id'],
- instance_uuid=instance_uuid,
- mountpoint=mountpoint),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ volume_id=volume['id'],
+ instance_uuid=instance_uuid,
+ mountpoint=mountpoint),
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ volume['host']))
def detach_volume(self, ctxt, volume):
return self.call(ctxt, self.make_msg('detach_volume',
- volume_id=volume['id']),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ volume_id=volume['id']),
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ volume['host']))
def copy_volume_to_image(self, ctxt, volume, image_id):
self.cast(ctxt, self.make_msg('copy_volume_to_image',
volume_id=volume['id'],
image_id=image_id),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ volume['host']))
def initialize_connection(self, ctxt, volume, connector):
return self.call(ctxt, self.make_msg('initialize_connection',
- volume_id=volume['id'],
- connector=connector),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ volume_id=volume['id'],
+ connector=connector),
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ volume['host']))
def terminate_connection(self, ctxt, volume, connector, force=False):
return self.call(ctxt, self.make_msg('terminate_connection',
- volume_id=volume['id'],
- connector=connector,
- force=force),
- topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+ volume_id=volume['id'],
+ connector=connector,
+ force=force),
+ topic=rpc.queue_get_for(ctxt,
+ self.topic,
+ volume['host']))
extra_usage_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end))
- notify_about_volume_usage(
- context, volume_ref, 'exists', extra_usage_info=extra_usage_info)
+ notify_about_volume_usage(context, volume_ref,
+ 'exists', extra_usage_info=extra_usage_info)
def _usage_from_volume(context, volume_ref, **kw):
def null_safe_str(s):
return str(s) if s else ''
- usage_info = dict(
- tenant_id=volume_ref['project_id'],
- user_id=volume_ref['user_id'],
- volume_id=volume_ref['id'],
- volume_type=volume_ref['volume_type_id'],
- display_name=volume_ref['display_name'],
- launched_at=null_safe_str(volume_ref['launched_at']),
- created_at=null_safe_str(volume_ref['created_at']),
- status=volume_ref['status'],
- snapshot_id=volume_ref['snapshot_id'],
- size=volume_ref['size'])
+ usage_info = dict(tenant_id=volume_ref['project_id'],
+ user_id=volume_ref['user_id'],
+ volume_id=volume_ref['id'],
+ volume_type=volume_ref['volume_type_id'],
+ display_name=volume_ref['display_name'],
+ launched_at=null_safe_str(volume_ref['launched_at']),
+ created_at=null_safe_str(volume_ref['created_at']),
+ status=volume_ref['status'],
+ snapshot_id=volume_ref['snapshot_id'],
+ size=volume_ref['size'])
usage_info.update(kw)
return usage_info
def notify_about_volume_usage(context, volume, event_suffix,
- extra_usage_info=None, host=None):
+ extra_usage_info=None, host=None):
if not host:
host = FLAGS.host
if not extra_usage_info:
extra_usage_info = {}
- usage_info = _usage_from_volume(
- context, volume, **extra_usage_info)
+ usage_info = _usage_from_volume(context, volume, **extra_usage_info)
notifier_api.notify(context, 'volume.%s' % host,
'volume.%s' % event_suffix,
default_pool_size = 1000
def __init__(self, name, app, host=None, port=None, pool_size=None,
- protocol=eventlet.wsgi.HttpProtocol):
+ protocol=eventlet.wsgi.HttpProtocol):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
"""
if backlog < 1:
raise exception.InvalidInput(
- reason='The backlog must be more than 1')
+ reason='The backlog must be more than 1')
self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
self._server = eventlet.spawn(self._start)
(self.host, self.port) = self._socket.getsockname()
#
# Until all these issues get fixed, ignore.
- ignore='--ignore=N4,E12,E711,E712,E721,E502'
+ ignore='--ignore=N4,E125,E126,E711,E712'
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
}
requires = common_setup.parse_requirements()
-setuptools.setup(name='cinder',
- version=version.canonical_version_string(),
- description='block storage service',
- author='OpenStack',
- author_email='cinder@lists.launchpad.net',
- url='http://www.openstack.org/',
- classifiers=[
- 'Environment :: OpenStack',
- 'Intended Audience :: Information Technology',
- 'Intended Audience :: System Administrators',
- 'License :: OSI Approved :: Apache Software License',
- 'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- ],
- cmdclass=common_setup.get_cmdclass(),
- packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
- install_requires=requires,
- include_package_data=True,
- test_suite='nose.collector',
- setup_requires=['setuptools_git>=0.4'],
- scripts=['bin/cinder-all',
- 'bin/cinder-api',
- 'bin/cinder-clear-rabbit-queues',
- 'bin/cinder-manage',
- 'bin/cinder-rootwrap',
- 'bin/cinder-scheduler',
- 'bin/cinder-volume',
- 'bin/cinder-volume-usage-audit',
- ],
- py_modules=[])
+setuptools.setup(
+ name='cinder',
+ version=version.canonical_version_string(),
+ description='block storage service',
+ author='OpenStack',
+ author_email='cinder@lists.launchpad.net',
+ url='http://www.openstack.org/',
+ classifiers=[
+ 'Environment :: OpenStack',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ ],
+ cmdclass=common_setup.get_cmdclass(),
+ packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
+ install_requires=requires,
+ include_package_data=True,
+ test_suite='nose.collector',
+ setup_requires=['setuptools_git>=0.4'],
+ scripts=['bin/cinder-all',
+ 'bin/cinder-api',
+ 'bin/cinder-clear-rabbit-queues',
+ 'bin/cinder-manage',
+ 'bin/cinder-rootwrap',
+ 'bin/cinder-scheduler',
+ 'bin/cinder-volume',
+ 'bin/cinder-volume-usage-audit'],
+ py_modules=[])
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if (line.startswith("from ") and "," not in line and
- split_line[2] == "import" and split_line[3] != "*" and
- split_line[1] != "__future__" and
- (len(split_line) == 4 or
- (len(split_line) == 6 and split_line[4] == "as"))):
+ split_line[2] == "import" and split_line[3] != "*" and
+ split_line[1] != "__future__" and
+ (len(split_line) == 4 or
+ (len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
"""
pos = logical_line.find(',')
parts = logical_line.split()
- if pos > -1 and (parts[0] == "import" or
- parts[0] == "from" and parts[2] == "import") and \
- not is_import_exception(parts[1]):
+ if (pos > -1 and (parts[0] == "import" or
+ parts[0] == "from" and parts[2] == "import") and
+ not is_import_exception(parts[1])):
yield pos, "CINDER N301: one import per line"
_missingImport = set([])
if parent:
if is_import_exception(parent):
return
- parent_mod = __import__(parent, globals(), locals(),
- [mod], -1)
+ parent_mod = __import__(parent,
+ globals(),
+ locals(),
+ [mod],
+ -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
if added:
sys.path.pop()
added = False
- return logical_line.find(mod), ("CINDER N304: No "
- "relative imports. '%s' is a relative import"
- % logical_line)
- return logical_line.find(mod), ("CINDER N302: import only "
- "modules. '%s' does not import a module"
- % logical_line)
+ return (logical_line.find(mod),
+ ("CINDER N304: No "
+ "relative imports. '%s' is a relative import"
+ % logical_line))
+ return (logical_line.find(mod),
+ ("CINDER N302: import only "
+ "modules. '%s' does not import a module"
+ % logical_line))
except (ImportError, NameError) as exc:
if not added:
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT:
print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
- (name, exc))
+ (name, exc))
_missingImport.add(name)
added = False
sys.path.pop()
except AttributeError:
# Invalid import
return logical_line.find(mod), ("CINDER N303: Invalid import, "
- "AttributeError raised")
+ "AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
- if (logical_line.startswith("import ") and "," not in logical_line and
+ if (logical_line.startswith("import ") and
+ "," not in logical_line and
(len(split_line) == 2 or
- (len(split_line) == 4 and split_line[2] == "as"))):
+ (len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
rval = importModuleCheck(mod)
- if rval != None:
+ if rval is not None:
yield rval
# TODO(jogo) handle "from x import *"
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(physical_line.strip()).lower().split()
- split_previous = import_normalize(lines[line_number - 2]
- ).strip().lower().split()
+ split_previous = import_normalize(
+ lines[line_number - 2]).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
- split_line[0] == "import" and split_previous[0] == "import"):
+ split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0,
"CINDER N306: imports not in alphabetical order (%s, %s)"
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
- if (physical_line[-5] != '.'):
+ if (physical_line[-5] != '.' and physical_line):
return pos, "CINDER N402: one line docstring needs a period"
FORMAT_RE = re.compile("%(?:"
- "%|" # Ignore plain percents
- "(\(\w+\))?" # mapping key
- "([#0 +-]?" # flag
- "(?:\d+|\*)?" # width
- "(?:\.\d+)?" # precision
- "[hlL]?" # length mod
- "\w))") # type
+ "%|" # Ignore plain percents
+ "(\(\w+\))?" # mapping key
+ "([#0 +-]?" # flag
+ "(?:\d+|\*)?" # width
+ "(?:\.\d+)?" # precision
+ "[hlL]?" # length mod
+ "\w))") # type
class LocalizationError(Exception):
break
if not format_string:
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N701: Empty localization string")
if token_type != tokenize.OP:
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N701: Invalid localization call")
if text != ")":
if text == "%":
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N702: Use bare string concatenation instead"
" of +")
else:
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
- if not key and spec]
+ if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
- raise LocalizationError(start,
+ raise LocalizationError(
+ start,
"CINDER N703: Multiple positional placeholders")
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
- % len(_missingImport))
+ % len(_missingImport))
def get_distro():
- if os.path.exists('/etc/fedora-release') or \
- os.path.exists('/etc/redhat-release'):
+ if (os.path.exists('/etc/fedora-release') or
+ os.path.exists('/etc/redhat-release')):
return Fedora()
else:
return Distro()
pip_install('-r', TEST_REQUIRES)
# Tell the virtual env how to "import cinder"
- pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
- "cinder.pth")
+ pthfile = os.path.join(venv, "lib",
+ PY_VERSION, "site-packages",
+ "cinder.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
- default=False, action="store_true",
- help="Do not inherit packages from global Python install")
+ default=False, action="store_true",
+ help="Do not inherit packages from "
+ "global Python install")
return parser.parse_args()
[testenv:pep8]
deps = pep8==1.3.3
commands =
- python tools/hacking.py --ignore=N4,E12,E711,E712,E721,E502 --repeat --show-source \
+ python tools/hacking.py --ignore=N4,E125,E126,E711,E712 --repeat --show-source \
--exclude=.venv,.tox,dist,doc,openstack,*egg .
- python tools/hacking.py --ignore=N4,E12,E711,E712,E721,E502 --repeat --show-source \
+ python tools/hacking.py --ignore=N4,E125,E126,E711,E712 --repeat --show-source \
--filename=cinder* bin
[testenv:venv]