]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Make pep8 checks a bit stricter.
authorJohn Griffith <john.griffith@solidfire.com>
Sun, 25 Nov 2012 03:17:32 +0000 (20:17 -0700)
committerJohn Griffith <john.griffith@solidfire.com>
Mon, 26 Nov 2012 23:57:15 +0000 (16:57 -0700)
Along with moving to pep8 1.3.3, we also want to standardize
on what we ignore. This patch get's us most of the way there
by setting the ignore list to:
N4,E125, E126, E711,E712.

Almost all changes made here are white-space/indentation changes.

The removal of Hacking N4 errors from the ignore list will
be handled in a seperate patch.

Change-Id: If45f156600485d23769449018590f60b4f69b0c5

127 files changed:
bin/cinder-manage
cinder/api/common.py
cinder/api/contrib/extended_snapshot_attributes.py
cinder/api/contrib/quota_classes.py
cinder/api/contrib/quotas.py
cinder/api/contrib/types_extra_specs.py
cinder/api/contrib/types_manage.py
cinder/api/contrib/volume_actions.py
cinder/api/middleware/auth.py
cinder/api/middleware/fault.py
cinder/api/openstack/__init__.py
cinder/api/openstack/volume/contrib/hosts.py
cinder/api/openstack/wsgi.py
cinder/api/urlmap.py
cinder/api/v1/router.py
cinder/api/v1/snapshots.py
cinder/api/v1/types.py
cinder/api/v1/volumes.py
cinder/api/v2/router.py
cinder/api/v2/snapshots.py
cinder/api/v2/types.py
cinder/api/v2/volumes.py
cinder/api/views/types.py
cinder/api/views/versions.py
cinder/common/deprecated.py
cinder/context.py
cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py
cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py
cinder/db/sqlalchemy/models.py
cinder/db/sqlalchemy/session.py
cinder/flags.py
cinder/image/glance.py
cinder/manager.py
cinder/policy.py
cinder/quota.py
cinder/scheduler/chance.py
cinder/scheduler/driver.py
cinder/scheduler/manager.py
cinder/scheduler/rpcapi.py
cinder/scheduler/simple.py
cinder/service.py
cinder/test.py
cinder/testing/runner.py
cinder/tests/api/contrib/test_extended_snapshot_attributes.py
cinder/tests/api/contrib/test_volume_actions.py
cinder/tests/api/contrib/test_volume_tenant_attribute.py
cinder/tests/api/extensions/foxinsocks.py
cinder/tests/api/middleware/test_faults.py
cinder/tests/api/openstack/fakes.py
cinder/tests/api/openstack/test_wsgi.py
cinder/tests/api/openstack/volume/contrib/test_hosts.py
cinder/tests/api/test_extensions.py
cinder/tests/api/test_xmlutil.py
cinder/tests/api/v1/test_limits.py
cinder/tests/api/v1/test_snapshots.py
cinder/tests/api/v1/test_volumes.py
cinder/tests/api/v2/test_limits.py
cinder/tests/api/v2/test_volumes.py
cinder/tests/db/fakes.py
cinder/tests/image/fake.py
cinder/tests/image/test_glance.py
cinder/tests/integrated/api/client.py
cinder/tests/monkey_patch_example/__init__.py
cinder/tests/scheduler/test_rpcapi.py
cinder/tests/scheduler/test_scheduler.py
cinder/tests/test_HpSanISCSIDriver.py
cinder/tests/test_api.py
cinder/tests/test_cinder_rootwrap.py
cinder/tests/test_context.py
cinder/tests/test_flags.py
cinder/tests/test_iscsi.py
cinder/tests/test_migrations.py
cinder/tests/test_netapp.py
cinder/tests/test_netapp_nfs.py
cinder/tests/test_nexenta.py
cinder/tests/test_nfs.py
cinder/tests/test_policy.py
cinder/tests/test_quota.py
cinder/tests/test_rbd.py
cinder/tests/test_service.py
cinder/tests/test_storwize_svc.py
cinder/tests/test_test_utils.py
cinder/tests/test_utils.py
cinder/tests/test_versions.py
cinder/tests/test_volume.py
cinder/tests/test_volume_glance_metadata.py
cinder/tests/test_volume_rpcapi.py
cinder/tests/test_volume_types.py
cinder/tests/test_volume_types_extra_specs.py
cinder/tests/test_volume_utils.py
cinder/tests/test_windows.py
cinder/tests/test_xenapi_sm.py
cinder/tests/test_xiv.py
cinder/tests/test_zadara.py
cinder/tests/windows/basetestcase.py
cinder/tests/windows/db_fakes.py
cinder/tests/windows/mockproxy.py
cinder/tests/windows/windowsutils.py
cinder/utils.py
cinder/volume/api.py
cinder/volume/driver.py
cinder/volume/drivers/netapp.py
cinder/volume/drivers/netapp_nfs.py
cinder/volume/drivers/nexenta/jsonrpc.py
cinder/volume/drivers/nexenta/volume.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/rbd.py
cinder/volume/drivers/san/san.py
cinder/volume/drivers/san/solaris.py
cinder/volume/drivers/solidfire.py
cinder/volume/drivers/storwize_svc.py
cinder/volume/drivers/windows.py
cinder/volume/drivers/xenapi/lib.py
cinder/volume/drivers/xiv.py
cinder/volume/drivers/zadara.py
cinder/volume/iscsi.py
cinder/volume/manager.py
cinder/volume/rpcapi.py
cinder/volume/utils.py
cinder/wsgi.py
run_tests.sh
setup.py
tools/hacking.py
tools/install_venv.py
tox.ini

index cfd0c762125cfb563098e819b7f67f7f5c5471ac..a9077e67971e48652b0d49be33d6a9e882499e26 100755 (executable)
@@ -127,8 +127,9 @@ class ShellCommands(object):
         Falls back to Python shell if unavailable"""
         self.run('python')
 
-    @args('--shell', dest="shell", metavar='<bpython|ipython|python >',
-            help='Python shell')
+    @args('--shell', dest="shell",
+          metavar='<bpython|ipython|python >',
+          help='Python shell')
     def run(self, shell=None):
         """Runs a Python interactive interpreter."""
         if not shell:
@@ -180,7 +181,7 @@ def _db_error(caught_exception):
 
 
 class HostCommands(object):
-    """List hosts"""
+    """List hosts."""
 
     def list(self, zone=None):
         """Show a list of all physical hosts. Filter by zone.
@@ -206,8 +207,9 @@ class DbCommands(object):
     def __init__(self):
         pass
 
-    @args('--version', dest='version', metavar='<version>',
-            help='Database version')
+    @args('--version', dest='version',
+          metavar='<version>',
+          help='Database version')
     def sync(self, version=None):
         """Sync the database up to the most recent version."""
         return migration.db_sync(version)
@@ -224,9 +226,10 @@ class VersionCommands(object):
         pass
 
     def list(self):
-        print _("%(version)s (%(vcs)s)") % \
-                {'version': version.version_string(),
-                 'vcs': version.version_string_with_vcs()}
+        print(
+            _("%(version)s (%(vcs)s)") %
+            {'version': version.version_string(),
+             'vcs': version.version_string_with_vcs()})
 
     def __call__(self):
         self.list()
@@ -313,7 +316,7 @@ class ImportCommands(object):
             columns = table.columns.keys()
             for row in src.query(table).all():
                 data = dict([(str(column), getattr(row, column))
-                    for column in columns])
+                             for column in columns])
                 dest.add(new_row(**data))
             dest.commit()
 
@@ -325,7 +328,7 @@ class ImportCommands(object):
             for row in src.query(table).all():
                 if row.resource == 'gigabytes' or row.resource == 'volumes':
                     data = dict([(str(column), getattr(row, column))
-                        for column in columns])
+                                 for column in columns])
                     dest.add(new_row(**data))
                 dest.commit()
 
@@ -352,10 +355,14 @@ class ImportCommands(object):
         dest_db = '%s/cinder' % dest_db
         self._import_db(src_db, dest_db, backup_db)
 
-    @args('--src', dest='src_tgts', metavar='<src tgts>',
-        help='[login@src_host:]/opt/stack/nova/volumes/')
-    @args('--dest', dest='dest_tgts', metavar='<dest tgts>',
-        help='[login@src_host:/opt/stack/cinder/volumes/]')
+    @args('--src',
+          dest='src_tgts',
+          metavar='<src tgts>',
+          help='[login@src_host:]/opt/stack/nova/volumes/')
+    @args('--dest',
+          dest='dest_tgts',
+          metavar='<dest tgts>',
+          help='[login@src_host:/opt/stack/cinder/volumes/]')
     def copy_ptgt_files(self, src_tgts, dest_tgts=None):
         """Copy persistent scsi tgt files from nova to cinder.
 
@@ -380,10 +387,12 @@ class ImportCommands(object):
 
 
 class VolumeCommands(object):
-    """Methods for dealing with a cloud in an odd state"""
+    """Methods for dealing with a cloud in an odd state."""
 
-    @args('--volume', dest='volume_id', metavar='<volume id>',
-            help='Volume ID')
+    @args('--volume',
+          dest='volume_id',
+          metavar='<volume id>',
+          help='Volume ID')
     def delete(self, volume_id):
         """Delete a volume, bypassing the check that it
         must be available."""
@@ -407,8 +416,10 @@ class VolumeCommands(object):
                  {"method": "delete_volume",
                   "args": {"volume_id": volume['id']}})
 
-    @args('--volume', dest='volume_id', metavar='<volume id>',
-            help='Volume ID')
+    @args('--volume',
+          dest='volume_id',
+          metavar='<volume id>',
+          help='Volume ID')
     def reattach(self, volume_id):
         """Re-attach a volume that has previously been attached
         to an instance.  Typically called after a compute host
@@ -429,7 +440,7 @@ class VolumeCommands(object):
 
 
 class StorageManagerCommands(object):
-    """Class for mangaging Storage Backends and Flavors"""
+    """Class for mangaging Storage Backends and Flavors."""
 
     def flavor_list(self, flavor=None):
         ctxt = context.get_admin_context()
@@ -449,9 +460,9 @@ class StorageManagerCommands(object):
 
         for flav in flavors:
             print "%-18s\t%-20s\t%s" % (
-                    flav['id'],
-                    flav['label'],
-                    flav['description'])
+                flav['id'],
+                flav['label'],
+                flav['description'])
 
     def flavor_create(self, label, desc):
         # TODO(renukaapte) flavor name must be unique
@@ -487,10 +498,10 @@ class StorageManagerCommands(object):
             sys.exit(2)
 
         print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
-                                                      _('Flavor id'),
-                                                      _('SR UUID'),
-                                                      _('SR Type'),
-                                                      _('Config Parameters'),)
+                                                 _('Flavor id'),
+                                                 _('SR UUID'),
+                                                 _('SR Type'),
+                                                 _('Config Parameters'),)
 
         for b in backends:
             print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
@@ -516,8 +527,8 @@ class StorageManagerCommands(object):
             print "error: %s" % ex
             sys.exit(2)
 
-        config_params = " ".join(['%s=%s' %
-                               (key, params[key]) for key in params])
+        config_params = " ".join(
+            ['%s=%s' % (key, params[key]) for key in params])
 
         if 'sr_uuid' in params:
             sr_uuid = params['sr_uuid']
@@ -532,11 +543,12 @@ class StorageManagerCommands(object):
                 c = raw_input('Proceed? (y/n) ')
                 if c == 'y' or c == 'Y':
                     try:
-                        db.sm_backend_conf_update(ctxt, backend['id'],
-                                        dict(created=False,
-                                             flavor_id=flavors['id'],
-                                             sr_type=sr_type,
-                                             config_params=config_params))
+                        db.sm_backend_conf_update(
+                            ctxt, backend['id'],
+                            dict(created=False,
+                                 flavor_id=flavors['id'],
+                                 sr_type=sr_type,
+                                 config_params=config_params))
                     except exception.DBError, e:
                         _db_error(e)
                 return
@@ -578,10 +590,10 @@ class ConfigCommands(object):
 
 
 class GetLogCommands(object):
-    """Get logging information"""
+    """Get logging information."""
 
     def errors(self):
-        """Get all of the errors from the log files"""
+        """Get all of the errors from the log files."""
         error_found = 0
         if FLAGS.logdir:
             logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
@@ -601,7 +613,7 @@ class GetLogCommands(object):
             print "No errors in logfiles!"
 
     def syslog(self, num_entries=10):
-        """Get <num_entries> of the cinder syslog events"""
+        """Get <num_entries> of the cinder syslog events."""
         entries = int(num_entries)
         count = 0
         log_file = ''
@@ -692,8 +704,8 @@ def main():
     script_name = argv.pop(0)
     if len(argv) < 1:
         print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
-                {'version': version.version_string(),
-                 'vcs': version.version_string_with_vcs()}
+            {'version': version.version_string(),
+             'vcs': version.version_string_with_vcs()}
         print script_name + " category action [<args>]"
         print _("Available categories:")
         for k, _v in CATEGORIES:
index 392bbb551fe6ee63f862c5a3599ea9c03436bfe9..53b1c87f2711eff5bbc04ed6ed1f31ca8a3a3cca 100644 (file)
@@ -181,14 +181,10 @@ class ViewBuilder(object):
     _collection_name = None
 
     def _get_links(self, request, identifier):
-        return [{
-            "rel": "self",
-            "href": self._get_href_link(request, identifier),
-        },
-        {
-            "rel": "bookmark",
-            "href": self._get_bookmark_link(request, identifier),
-        }]
+        return [{"rel": "self",
+                 "href": self._get_href_link(request, identifier), },
+                {"rel": "bookmark",
+                 "href": self._get_bookmark_link(request, identifier), }]
 
     def _get_next_link(self, request, identifier):
         """Return href string with proper limit and marker params."""
index a14e11b764fcf6f66415837b019122862455f887..f55fa53ac2579be57ee878a8b3fdc67c53ad19e2 100644 (file)
@@ -27,14 +27,15 @@ from cinder import volume
 
 FLAGS = flags.FLAGS
 LOG = logging.getLogger(__name__)
-authorize = extensions.soft_extension_authorizer('volume',
-                                                'extended_snapshot_attributes')
+authorize = extensions.soft_extension_authorizer(
+    'volume',
+    'extended_snapshot_attributes')
 
 
 class ExtendedSnapshotAttributesController(wsgi.Controller):
     def __init__(self, *args, **kwargs):
         super(ExtendedSnapshotAttributesController, self).__init__(*args,
-                                                                 **kwargs)
+                                                                   **kwargs)
         self.volume_api = volume.API()
 
     def _get_snapshots(self, context):
index 897fd31043b763536d622217ba0f0ee6d4dcdc26..597ccd004c540df511397a4a44de6c18d1126a9d 100644 (file)
@@ -63,10 +63,8 @@ class QuotaClassSetsController(object):
         except exception.NotAuthorized:
             raise webob.exc.HTTPForbidden()
 
-        return self._format_quota_set(
-            id,
-            QUOTAS.get_class_quotas(context, id)
-            )
+        return self._format_quota_set(id,
+                                      QUOTAS.get_class_quotas(context, id))
 
     @wsgi.serializers(xml=QuotaClassTemplate)
     def update(self, req, id, body):
index 1daa133186580a6e5d6a20e0738b6f63c78adf8b..374eed1803cc64d5862aa6b82222eeafc5b446dd 100644 (file)
@@ -118,8 +118,8 @@ class Quotas(extensions.ExtensionDescriptor):
         resources = []
 
         res = extensions.ResourceExtension('os-quota-sets',
-                                            QuotaSetsController(),
-                                            member_actions={'defaults': 'GET'})
+                                           QuotaSetsController(),
+                                           member_actions={'defaults': 'GET'})
         resources.append(res)
 
         return resources
index 80243eacaca4db694b8d319afd4afde687df972d..40e43d7371c6d04f227890ecf1e011f4ff61891d 100644 (file)
@@ -140,10 +140,10 @@ class Types_extra_specs(extensions.ExtensionDescriptor):
     def get_resources(self):
         resources = []
         res = extensions.ResourceExtension('extra_specs',
-                            VolumeTypeExtraSpecsController(),
-                            parent=dict(
-                                member_name='type',
-                                collection_name='types'))
+                                           VolumeTypeExtraSpecsController(),
+                                           parent=dict(member_name='type',
+                                                       collection_name='types')
+                                           )
         resources.append(res)
 
         return resources
index 2e67c3539b7dcdbe13446406eac135bbcc1af26b..801d686e85c9891a0c5125dfb14ae7138445988c 100644 (file)
@@ -31,7 +31,7 @@ authorize = extensions.extension_authorizer('volume', 'types_manage')
 
 
 class VolumeTypesManageController(wsgi.Controller):
-    """ The volume types API controller for the OpenStack API """
+    """The volume types API controller for the OpenStack API."""
 
     _view_builder_class = views_types.ViewBuilder
 
@@ -64,7 +64,7 @@ class VolumeTypesManageController(wsgi.Controller):
 
     @wsgi.action("delete")
     def _delete(self, req, id):
-        """ Deletes an existing volume type """
+        """Deletes an existing volume type."""
         context = req.environ['cinder.context']
         authorize(context)
 
@@ -78,7 +78,7 @@ class VolumeTypesManageController(wsgi.Controller):
 
 
 class Types_manage(extensions.ExtensionDescriptor):
-    """Types manage support"""
+    """Types manage support."""
 
     name = "TypesManage"
     alias = "os-types-manage"
index 2f054215e4c4ccffa98b1d3a3f9e880e9cd25243..ae75885ff65962264831131037eee9d9f48429ca 100644 (file)
@@ -52,7 +52,7 @@ class VolumeToImageSerializer(xmlutil.TemplateBuilder):
 
 
 class VolumeToImageDeserializer(wsgi.XMLDeserializer):
-    """Deserializer to handle xml-formatted requests"""
+    """Deserializer to handle xml-formatted requests."""
     def default(self, string):
         dom = minidom.parseString(string)
         action_node = dom.childNodes[0]
index e806576d8e4bb86fc8d03347df1b2963e66176fa..cd93866fbbb54935f3eb7e782581ffc680149ae4 100644 (file)
@@ -31,10 +31,11 @@ from cinder.openstack.common import log as logging
 from cinder import wsgi as base_wsgi
 
 
-use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
-        default=False,
-        help='Treat X-Forwarded-For as the canonical remote address. '
-             'Only enable this if you have a sanitizing proxy.')
+use_forwarded_for_opt = cfg.BoolOpt(
+    'use_forwarded_for',
+    default=False,
+    help='Treat X-Forwarded-For as the canonical remote address. '
+         'Only enable this if you have a sanitizing proxy.')
 
 FLAGS = flags.FLAGS
 FLAGS.register_opt(use_forwarded_for_opt)
index 15892b4e3cd41bd4e05b03e228a4e887c297334f..dddd166ac509c71238c8dee5cbe085ad2c7ba266 100644 (file)
@@ -39,7 +39,7 @@ class FaultWrapper(base_wsgi.Middleware):
             for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
                 FaultWrapper._status_to_type[clazz.code] = clazz
         return FaultWrapper._status_to_type.get(
-                                  status, webob.exc.HTTPInternalServerError)()
+            status, webob.exc.HTTPInternalServerError)()
 
     def _error(self, inner, req):
         LOG.exception(_("Caught error: %s"), unicode(inner))
index d68639860caa469eacb97d956803731b5e2dd921..ae2dfeb07179b578723a273e0a5237d9258e63e8 100644 (file)
@@ -49,9 +49,10 @@ class ProjectMapper(APIMapper):
             p_member = parent_resource['member_name']
             kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
                                                                 p_member)
-        routes.Mapper.resource(self, member_name,
-                                     collection_name,
-                                     **kwargs)
+        routes.Mapper.resource(self,
+                               member_name,
+                               collection_name,
+                               **kwargs)
 
 
 class APIRouter(base_wsgi.Router):
index cb83f57b5af91cab8d584b7a5a081bb9168eeef4..7ba7c24a60edd099d70dd776c976765f03fba211 100644 (file)
 """The hosts admin extension."""
 
 import webob.exc
-from xml.dom import minidom
-from xml.parsers import expat
 
 from cinder.api.openstack import extensions
 from cinder.api.openstack import wsgi
 from cinder.api.openstack import xmlutil
-from cinder.volume import api as volume_api
 from cinder import db
 from cinder import exception
 from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder import utils
+from cinder.volume import api as volume_api
+from xml.dom import minidom
+from xml.parsers import expat
 
 FLAGS = flags.FLAGS
 LOG = logging.getLogger(__name__)
@@ -176,8 +176,9 @@ class HostController(object):
         context = req.environ['cinder.context']
         state = "enabled" if enabled else "disabled"
         LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
-        result = self.api.set_host_enabled(context, host=host,
-                enabled=enabled)
+        result = self.api.set_host_enabled(context,
+                                           host=host,
+                                           enabled=enabled)
         if result not in ("enabled", "disabled"):
             # An error message was returned
             raise webob.exc.HTTPBadRequest(explanation=result)
@@ -230,13 +231,14 @@ class HostController(object):
             (snap_count, snap_sum) = db.snapshot_data_get_for_project(
                 context,
                 project_id)
-            resources.append({'resource':
-                                 {'host': host,
-                                  'project': project_id,
-                                  'volume_count': str(count),
-                                  'total_volume_gb': str(sum),
-                                  'snapshot_count': str(snap_count),
-                                  'total_snapshot_gb': str(snap_sum)}})
+            resources.append(
+                {'resource':
+                    {'host': host,
+                     'project': project_id,
+                     'volume_count': str(count),
+                     'total_volume_gb': str(sum),
+                     'snapshot_count': str(snap_count),
+                     'total_snapshot_gb': str(snap_sum)}})
             snap_count_total += int(snap_count)
             snap_sum_total += int(snap_sum)
         resources[0]['resource']['snapshot_count'] = str(snap_count_total)
@@ -254,8 +256,11 @@ class Hosts(extensions.ExtensionDescriptor):
 
     def get_resources(self):
         resources = [extensions.ResourceExtension('os-hosts',
-                HostController(),
-                collection_actions={'update': 'PUT'},
-                member_actions={"startup": "GET", "shutdown": "GET",
-                        "reboot": "GET"})]
+                                                  HostController(),
+                                                  collection_actions={
+                                                      'update': 'PUT'},
+                                                  member_actions={
+                                                      'startup': 'GET',
+                                                      'shutdown': 'GET',
+                                                      'reboot': 'GET'})]
         return resources
index e38a85cd4ffee8df320ffca66d6712e195d06902..70b9e36a9c44983332e650ecbd9e80547110febb 100644 (file)
@@ -79,7 +79,7 @@ class Request(webob.Request):
                 content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
 
             self.environ['cinder.best_content_type'] = (content_type or
-                                                      'application/json')
+                                                        'application/json')
 
         return self.environ['cinder.best_content_type']
 
@@ -577,8 +577,9 @@ class ResourceExceptionHandler(object):
                 code=ex_value.code, explanation=unicode(ex_value)))
         elif isinstance(ex_value, TypeError):
             exc_info = (ex_type, ex_value, ex_traceback)
-            LOG.error(_('Exception handling resource: %s') % ex_value,
-                    exc_info=exc_info)
+            LOG.error(_(
+                'Exception handling resource: %s') %
+                ex_value, exc_info=exc_info)
             raise Fault(webob.exc.HTTPBadRequest())
         elif isinstance(ex_value, Fault):
             LOG.info(_("Fault thrown: %s"), unicode(ex_value))
@@ -901,7 +902,7 @@ class Resource(wsgi.Application):
                 meth = getattr(self.controller, action)
         except AttributeError:
             if (not self.wsgi_actions or
-                action not in ['action', 'create', 'delete']):
+                    action not in ['action', 'create', 'delete']):
                 # Propagate the error
                 raise
         else:
@@ -1038,17 +1039,16 @@ class Controller(object):
 class Fault(webob.exc.HTTPException):
     """Wrap webob.exc.HTTPException to provide API friendly response."""
 
-    _fault_names = {
-            400: "badRequest",
-            401: "unauthorized",
-            403: "forbidden",
-            404: "itemNotFound",
-            405: "badMethod",
-            409: "conflictingRequest",
-            413: "overLimit",
-            415: "badMediaType",
-            501: "notImplemented",
-            503: "serviceUnavailable"}
+    _fault_names = {400: "badRequest",
+                    401: "unauthorized",
+                    403: "forbidden",
+                    404: "itemNotFound",
+                    405: "badMethod",
+                    409: "conflictingRequest",
+                    413: "overLimit",
+                    415: "badMediaType",
+                    501: "notImplemented",
+                    503: "serviceUnavailable"}
 
     def __init__(self, exception):
         """Create a Fault for the given webob.exc.exception."""
index 894d36cb8ef2fca732df5e19625e6d657e366e83..18ec2020d0df2af04bef806b1031f78527cd51a1 100644 (file)
@@ -24,8 +24,9 @@ from cinder.openstack.common import log as logging
 
 
 _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
-_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
-                                     r'(?:=\s*([^;]+|%s))?\s*' %
+_option_header_piece_re = re.compile(
+    r';\s*([^\s;=]+|%s)\s*'
+    r'(?:=\s*([^;]+|%s))?\s*' %
     (_quoted_string_re, _quoted_string_re))
 
 LOG = logging.getLogger(__name__)
@@ -171,8 +172,7 @@ class URLMap(paste.urlmap.URLMap):
         for (domain, app_url), app in self.applications:
             if domain and domain != host and domain != host + ':' + port:
                 continue
-            if (path_info == app_url
-                or path_info.startswith(app_url + '/')):
+            if (path_info == app_url or path_info.startswith(app_url + '/')):
                 return app, app_url
 
         return None, None
@@ -274,7 +274,7 @@ class URLMap(paste.urlmap.URLMap):
 
         if not mime_type or not app:
             possible_mime_type, possible_app = self._accept_strategy(
-                    host, port, environ, supported_content_types)
+                host, port, environ, supported_content_types)
             if possible_mime_type and not mime_type:
                 mime_type = possible_mime_type
             if possible_app and not app:
index bd56fb2f6498b4b19302795d84a138d7a9dbb8f4..87969a784945428db66cc57a9bd25ecbe3f4c49c 100644 (file)
@@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
     def _setup_routes(self, mapper, ext_mgr):
         self.resources['versions'] = versions.create_resource()
         mapper.connect("versions", "/",
-                    controller=self.resources['versions'],
-                    action='show')
+                       controller=self.resources['versions'],
+                       action='show')
 
         mapper.redirect("", "/")
 
index c44d63718305e0aaf79166a3e8dd27e038128589..9685a095d23c39f64f3e0a9be853e067d6e3f8e1 100644 (file)
@@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
             raise exception.InvalidParameterValue(err=msg)
 
         if utils.bool_from_str(force):
-            new_snapshot = self.volume_api.create_snapshot_force(context,
-                                        volume,
-                                        snapshot.get('display_name'),
-                                        snapshot.get('display_description'))
+            new_snapshot = self.volume_api.create_snapshot_force(
+                context,
+                volume,
+                snapshot.get('display_name'),
+                snapshot.get('display_description'))
         else:
-            new_snapshot = self.volume_api.create_snapshot(context,
-                                        volume,
-                                        snapshot.get('display_name'),
-                                        snapshot.get('display_description'))
+            new_snapshot = self.volume_api.create_snapshot(
+                context,
+                volume,
+                snapshot.get('display_name'),
+                snapshot.get('display_description'))
 
         retval = _translate_snapshot_detail_view(context, new_snapshot)
 
index 87583681c733d6244debcb5ef3e2a882fd082e69..1513b8dcfa4ccd608495739dc791cdf04c8faaec 100644 (file)
@@ -15,7 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-""" The volume type & volume types extra specs extension"""
+"""The volume type & volume types extra specs extension."""
 
 from webob import exc
 
@@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
 
 
 class VolumeTypesController(wsgi.Controller):
-    """ The volume types API controller for the OpenStack API """
+    """The volume types API controller for the OpenStack API."""
 
     _view_builder_class = views_types.ViewBuilder
 
     @wsgi.serializers(xml=VolumeTypesTemplate)
     def index(self, req):
-        """ Returns the list of volume types """
+        """Returns the list of volume types."""
         context = req.environ['cinder.context']
         vol_types = volume_types.get_all_types(context).values()
         return self._view_builder.index(req, vol_types)
 
     @wsgi.serializers(xml=VolumeTypeTemplate)
     def show(self, req, id):
-        """ Return a single volume type item """
+        """Return a single volume type item."""
         context = req.environ['cinder.context']
 
         try:
index 065e4d5a663ba5e39495bb940f3a19b007a4794b..40b3af40d74d91e9867c21526cb676630eb9ba4e 100644 (file)
@@ -296,7 +296,7 @@ class VolumeController(wsgi.Controller):
         if req_volume_type:
             try:
                 kwargs['volume_type'] = volume_types.get_volume_type_by_name(
-                        context, req_volume_type)
+                    context, req_volume_type)
             except exception.VolumeTypeNotFound:
                 explanation = 'Volume type not found.'
                 raise exc.HTTPNotFound(explanation=explanation)
@@ -394,7 +394,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
         return
     # Otherwise, strip out all unknown options
     unknown_options = [opt for opt in search_options
-            if opt not in allowed_search_options]
+                       if opt not in allowed_search_options]
     bad_options = ", ".join(unknown_options)
     log_msg = _("Removing options '%(bad_options)s' from query") % locals()
     LOG.debug(log_msg)
index d2039ca56ccb12448b2f5c57f3b7266fc609d2b6..e3e51399f1f6328c34b5fc612978935f0714b714 100644 (file)
@@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
     def _setup_routes(self, mapper, ext_mgr):
         self.resources['versions'] = versions.create_resource()
         mapper.connect("versions", "/",
-                    controller=self.resources['versions'],
-                    action='show')
+                       controller=self.resources['versions'],
+                       action='show')
 
         mapper.redirect("", "/")
 
index 7fea38c06aa08fbfbccda81fc8067eea36da4b80..5853a0180c8a176af6b14948f9ad7a55faf7ca0d 100644 (file)
@@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
             raise exception.InvalidParameterValue(err=msg)
 
         if utils.bool_from_str(force):
-            new_snapshot = self.volume_api.create_snapshot_force(context,
-                                        volume,
-                                        snapshot.get('display_name'),
-                                        snapshot.get('display_description'))
+            new_snapshot = self.volume_api.create_snapshot_force(
+                context,
+                volume,
+                snapshot.get('display_name'),
+                snapshot.get('display_description'))
         else:
-            new_snapshot = self.volume_api.create_snapshot(context,
-                                        volume,
-                                        snapshot.get('display_name'),
-                                        snapshot.get('display_description'))
+            new_snapshot = self.volume_api.create_snapshot(
+                context,
+                volume,
+                snapshot.get('display_name'),
+                snapshot.get('display_description'))
 
         retval = _translate_snapshot_detail_view(context, new_snapshot)
 
index 87583681c733d6244debcb5ef3e2a882fd082e69..1513b8dcfa4ccd608495739dc791cdf04c8faaec 100644 (file)
@@ -15,7 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-""" The volume type & volume types extra specs extension"""
+"""The volume type & volume types extra specs extension."""
 
 from webob import exc
 
@@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
 
 
 class VolumeTypesController(wsgi.Controller):
-    """ The volume types API controller for the OpenStack API """
+    """The volume types API controller for the OpenStack API."""
 
     _view_builder_class = views_types.ViewBuilder
 
     @wsgi.serializers(xml=VolumeTypesTemplate)
     def index(self, req):
-        """ Returns the list of volume types """
+        """Returns the list of volume types."""
         context = req.environ['cinder.context']
         vol_types = volume_types.get_all_types(context).values()
         return self._view_builder.index(req, vol_types)
 
     @wsgi.serializers(xml=VolumeTypeTemplate)
     def show(self, req, id):
-        """ Return a single volume type item """
+        """Return a single volume type item."""
         context = req.environ['cinder.context']
 
         try:
index 7e73040da625e930bd89913637909b48c8c3eb6d..f304f143bb8f6ed3fca44f64795ab0c747c93cc8 100644 (file)
@@ -291,7 +291,7 @@ class VolumeController(wsgi.Controller):
         if req_volume_type:
             try:
                 kwargs['volume_type'] = volume_types.get_volume_type_by_name(
-                        context, req_volume_type)
+                    context, req_volume_type)
             except exception.VolumeTypeNotFound:
                 explanation = 'Volume type not found.'
                 raise exc.HTTPNotFound(explanation=explanation)
@@ -389,7 +389,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
         return
     # Otherwise, strip out all unknown options
     unknown_options = [opt for opt in search_options
-            if opt not in allowed_search_options]
+                       if opt not in allowed_search_options]
     bad_options = ", ".join(unknown_options)
     log_msg = _("Removing options '%(bad_options)s' from query") % locals()
     LOG.debug(log_msg)
index 4b89167c8d5585499289880ce8584187b1e39e66..675ec01fe55756a6abe08b6a53bb681f6b816697 100644 (file)
@@ -23,8 +23,8 @@ class ViewBuilder(common.ViewBuilder):
     def show(self, request, volume_type, brief=False):
         """Trim away extraneous volume type attributes."""
         trimmed = dict(id=volume_type.get('id'),
-                          name=volume_type.get('name'),
-                          extra_specs=volume_type.get('extra_specs'))
+                       name=volume_type.get('name'),
+                       extra_specs=volume_type.get('extra_specs'))
         return trimmed if brief else dict(volume_type=trimmed)
 
     def index(self, request, volume_types):
index f551382b930e0935f2c3b53a7eb3b2415e05ea21..371033ded61d1c1607619f662d24ff8f8682c56a 100644 (file)
@@ -38,14 +38,9 @@ class ViewBuilder(object):
             version_objs.append({
                 "id": version['id'],
                 "status": version['status'],
-                "links": [
-                        {
-                        "rel": "self",
-                        "href": self.generate_href(req.path),
-                        },
-                ],
-                "media-types": version['media-types'],
-                })
+                "links": [{"rel": "self",
+                           "href": self.generate_href(req.path), }, ],
+                "media-types": version['media-types'], })
 
         return dict(choices=version_objs)
 
@@ -57,8 +52,7 @@ class ViewBuilder(object):
                 "id": version['id'],
                 "status": version['status'],
                 "updated": version['updated'],
-                "links": self._build_links(version),
-                })
+                "links": self._build_links(version), })
 
         return dict(versions=version_objs)
 
@@ -66,20 +60,15 @@ class ViewBuilder(object):
         reval = copy.deepcopy(version)
         reval['links'].insert(0, {
             "rel": "self",
-            "href": self.base_url.rstrip('/') + '/',
-            })
+            "href": self.base_url.rstrip('/') + '/', })
         return dict(version=reval)
 
     def _build_links(self, version_data):
         """Generate a container of links that refer to the provided version."""
         href = self.generate_href()
 
-        links = [
-                {
-                "rel": "self",
-                "href": href,
-                },
-        ]
+        links = [{'rel': 'self',
+                  'href': href, }, ]
 
         return links
 
index b06c6f2c8527ee2e693f320594c76c87ab3e9e5f..165b5308180370eaf0e67096cb9d6aa7c163ae76 100644 (file)
@@ -26,8 +26,7 @@ LOG = logging.getLogger(__name__)
 deprecate_opts = [
     cfg.BoolOpt('fatal_deprecations',
                 default=False,
-                help='make deprecations fatal')
-    ]
+                help='make deprecations fatal')]
 FLAGS = flags.FLAGS
 FLAGS.register_opts(deprecate_opts)
 
index f8ca839f4ca631bff04a72eaeee42171101cb9a2..9da642982098962eccf8f263af0026b6e641f3b6 100644 (file)
@@ -59,7 +59,7 @@ class RequestContext(object):
         """
         if kwargs:
             LOG.warn(_('Arguments dropped when creating context: %s') %
-                    str(kwargs))
+                     str(kwargs))
 
         self.user_id = user_id
         self.project_id = project_id
index 8136eab2688eef3c7db2fcd634401a0b45f6eb0a..8c4a4caceecff8840184a292ac0ffb060ad3e551 100644 (file)
@@ -61,8 +61,7 @@ db_opts = [
                help='Template string to be used to generate volume names'),
     cfg.StrOpt('snapshot_name_template',
                default='snapshot-%s',
-               help='Template string to be used to generate snapshot names'),
-    ]
+               help='Template string to be used to generate snapshot names'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(db_opts)
@@ -155,8 +154,9 @@ def migration_get(context, migration_id):
 
 def migration_get_by_instance_and_status(context, instance_uuid, status):
     """Finds a migration by the instance uuid its migrating."""
-    return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
-            status)
+    return IMPL.migration_get_by_instance_and_status(context,
+                                                     instance_uuid,
+                                                     status)
 
 
 def migration_get_all_unconfirmed(context, confirm_window):
@@ -378,12 +378,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
     IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
 
 
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
-                                               extra_specs):
+def volume_type_extra_specs_update_or_create(context,
+                                             volume_type_id,
+                                             extra_specs):
     """Create or update volume type extra specs. This adds or modifies the
     key/value pairs specified in the extra specs dict argument"""
-    IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
-                                                    extra_specs)
+    IMPL.volume_type_extra_specs_update_or_create(context,
+                                                  volume_type_id,
+                                                  extra_specs)
 
 
 ###################
@@ -391,8 +393,10 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
 
 def volume_glance_metadata_create(context, volume_id, key, value):
     """Update the Glance metadata for the specified volume."""
-    return IMPL.volume_glance_metadata_create(context, volume_id,
-                                              key, value)
+    return IMPL.volume_glance_metadata_create(context,
+                                              volume_id,
+                                              key,
+                                              value)
 
 
 def volume_glance_metadata_get(context, volume_id):
index 7f107402f395713e6f62970e7a48dd7654333dfd..c2272902217e2f40ad79ff36a41d9d5260c4880c 100644 (file)
 """Implementation of SQLAlchemy backend."""
 
 import datetime
-import functools
 import uuid
 import warnings
 
 from sqlalchemy.exc import IntegrityError
 from sqlalchemy import or_
 from sqlalchemy.orm import joinedload
-from sqlalchemy.orm import joinedload_all
-from sqlalchemy.sql.expression import asc
-from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import literal_column
 from sqlalchemy.sql.expression import literal_column
 from sqlalchemy.sql import func
 
@@ -179,7 +174,7 @@ def model_query(context, *args, **kwargs):
         query = query.filter_by(deleted=True)
     else:
         raise Exception(
-                _("Unrecognized read_deleted value '%s'") % read_deleted)
+            _("Unrecognized read_deleted value '%s'") % read_deleted)
 
     if project_only and is_user_context(context):
         query = query.filter_by(project_id=context.project_id)
@@ -242,9 +237,12 @@ def service_destroy(context, service_id):
 
 @require_admin_context
 def service_get(context, service_id, session=None):
-    result = model_query(context, models.Service, session=session).\
-                     filter_by(id=service_id).\
-                     first()
+    result = model_query(
+        context,
+        models.Service,
+        session=session).\
+        filter_by(id=service_id).\
+        first()
     if not result:
         raise exception.ServiceNotFound(service_id=service_id)
 
@@ -263,19 +261,21 @@ def service_get_all(context, disabled=None):
 
 @require_admin_context
 def service_get_all_by_topic(context, topic):
-    return model_query(context, models.Service, read_deleted="no").\
-                filter_by(disabled=False).\
-                filter_by(topic=topic).\
-                all()
+    return model_query(
+        context, models.Service, read_deleted="no").\
+        filter_by(disabled=False).\
+        filter_by(topic=topic).\
+        all()
 
 
 @require_admin_context
 def service_get_by_host_and_topic(context, host, topic):
-    result = model_query(context, models.Service, read_deleted="no").\
-                filter_by(disabled=False).\
-                filter_by(host=host).\
-                filter_by(topic=topic).\
-                first()
+    result = model_query(
+        context, models.Service, read_deleted="no").\
+        filter_by(disabled=False).\
+        filter_by(host=host).\
+        filter_by(topic=topic).\
+        first()
     if not result:
         raise exception.ServiceNotFound(host=host, topic=topic)
     return result
@@ -283,9 +283,10 @@ def service_get_by_host_and_topic(context, host, topic):
 
 @require_admin_context
 def service_get_all_by_host(context, host):
-    return model_query(context, models.Service, read_deleted="no").\
-                filter_by(host=host).\
-                all()
+    return model_query(
+        context, models.Service, read_deleted="no").\
+        filter_by(host=host).\
+        all()
 
 
 @require_admin_context
@@ -294,11 +295,11 @@ def _service_get_all_topic_subquery(context, session, topic, subq, label):
     return model_query(context, models.Service,
                        func.coalesce(sort_value, 0),
                        session=session, read_deleted="no").\
-                filter_by(topic=topic).\
-                filter_by(disabled=False).\
-                outerjoin((subq, models.Service.host == subq.c.host)).\
-                order_by(sort_value).\
-                all()
+        filter_by(topic=topic).\
+        filter_by(disabled=False).\
+        outerjoin((subq, models.Service.host == subq.c.host)).\
+        order_by(sort_value).\
+        all()
 
 
 @require_admin_context
@@ -310,8 +311,8 @@ def service_get_all_volume_sorted(context):
         subq = model_query(context, models.Volume.host,
                            func.sum(models.Volume.size).label(label),
                            session=session, read_deleted="no").\
-                       group_by(models.Volume.host).\
-                       subquery()
+            group_by(models.Volume.host).\
+            subquery()
         return _service_get_all_topic_subquery(context,
                                                session,
                                                topic,
@@ -322,9 +323,9 @@ def service_get_all_volume_sorted(context):
 @require_admin_context
 def service_get_by_args(context, host, binary):
     result = model_query(context, models.Service).\
-                     filter_by(host=host).\
-                     filter_by(binary=binary).\
-                     first()
+        filter_by(host=host).\
+        filter_by(binary=binary).\
+        first()
 
     if not result:
         raise exception.HostBinaryNotFound(host=host, binary=binary)
@@ -390,8 +391,8 @@ def _dict_with_extra_specs(inst_type_query):
 @require_admin_context
 def iscsi_target_count_by_host(context, host):
     return model_query(context, models.IscsiTarget).\
-                   filter_by(host=host).\
-                   count()
+        filter_by(host=host).\
+        count()
 
 
 @require_admin_context
@@ -414,9 +415,9 @@ def iscsi_target_create_safe(context, values):
 def quota_get(context, project_id, resource, session=None):
     result = model_query(context, models.Quota, session=session,
                          read_deleted="no").\
-                     filter_by(project_id=project_id).\
-                     filter_by(resource=resource).\
-                     first()
+        filter_by(project_id=project_id).\
+        filter_by(resource=resource).\
+        first()
 
     if not result:
         raise exception.ProjectQuotaNotFound(project_id=project_id)
@@ -429,8 +430,8 @@ def quota_get_all_by_project(context, project_id):
     authorize_project_context(context, project_id)
 
     rows = model_query(context, models.Quota, read_deleted="no").\
-                   filter_by(project_id=project_id).\
-                   all()
+        filter_by(project_id=project_id).\
+        all()
 
     result = {'project_id': project_id}
     for row in rows:
@@ -473,9 +474,9 @@ def quota_destroy(context, project_id, resource):
 def quota_class_get(context, class_name, resource, session=None):
     result = model_query(context, models.QuotaClass, session=session,
                          read_deleted="no").\
-                     filter_by(class_name=class_name).\
-                     filter_by(resource=resource).\
-                     first()
+        filter_by(class_name=class_name).\
+        filter_by(resource=resource).\
+        first()
 
     if not result:
         raise exception.QuotaClassNotFound(class_name=class_name)
@@ -488,8 +489,8 @@ def quota_class_get_all_by_name(context, class_name):
     authorize_quota_class_context(context, class_name)
 
     rows = model_query(context, models.QuotaClass, read_deleted="no").\
-                   filter_by(class_name=class_name).\
-                   all()
+        filter_by(class_name=class_name).\
+        all()
 
     result = {'class_name': class_name}
     for row in rows:
@@ -533,8 +534,8 @@ def quota_class_destroy_all_by_name(context, class_name):
     with session.begin():
         quota_classes = model_query(context, models.QuotaClass,
                                     session=session, read_deleted="no").\
-                                filter_by(class_name=class_name).\
-                                all()
+            filter_by(class_name=class_name).\
+            all()
 
         for quota_class_ref in quota_classes:
             quota_class_ref.delete(session=session)
@@ -547,9 +548,9 @@ def quota_class_destroy_all_by_name(context, class_name):
 def quota_usage_get(context, project_id, resource, session=None):
     result = model_query(context, models.QuotaUsage, session=session,
                          read_deleted="no").\
-                     filter_by(project_id=project_id).\
-                     filter_by(resource=resource).\
-                     first()
+        filter_by(project_id=project_id).\
+        filter_by(resource=resource).\
+        first()
 
     if not result:
         raise exception.QuotaUsageNotFound(project_id=project_id)
@@ -562,8 +563,8 @@ def quota_usage_get_all_by_project(context, project_id):
     authorize_project_context(context, project_id)
 
     rows = model_query(context, models.QuotaUsage, read_deleted="no").\
-                   filter_by(project_id=project_id).\
-                   all()
+        filter_by(project_id=project_id).\
+        all()
 
     result = {'project_id': project_id}
     for row in rows:
@@ -593,8 +594,7 @@ def quota_usage_create(context, project_id, resource, in_use, reserved,
 def reservation_get(context, uuid, session=None):
     result = model_query(context, models.Reservation, session=session,
                          read_deleted="no").\
-                     filter_by(uuid=uuid).\
-                     first()
+        filter_by(uuid=uuid).first()
 
     if not result:
         raise exception.ReservationNotFound(uuid=uuid)
@@ -607,8 +607,7 @@ def reservation_get_all_by_project(context, project_id):
     authorize_project_context(context, project_id)
 
     rows = model_query(context, models.QuotaUsage, read_deleted="no").\
-                   filter_by(project_id=project_id).\
-                   all()
+        filter_by(project_id=project_id).all()
 
     result = {'project_id': project_id}
     for row in rows:
@@ -653,9 +652,9 @@ def _get_quota_usages(context, session):
     rows = model_query(context, models.QuotaUsage,
                        read_deleted="no",
                        session=session).\
-                   filter_by(project_id=context.project_id).\
-                   with_lockmode('update').\
-                   all()
+        filter_by(project_id=context.project_id).\
+        with_lockmode('update').\
+        all()
     return dict((row.resource, row) for row in rows)
 
 
@@ -798,9 +797,9 @@ def _quota_reservations(session, context, reservations):
     return model_query(context, models.Reservation,
                        read_deleted="no",
                        session=session).\
-                   filter(models.Reservation.uuid.in_(reservations)).\
-                   with_lockmode('update').\
-                   all()
+        filter(models.Reservation.uuid.in_(reservations)).\
+        with_lockmode('update').\
+        all()
 
 
 @require_context
@@ -844,24 +843,24 @@ def quota_destroy_all_by_project(context, project_id):
     with session.begin():
         quotas = model_query(context, models.Quota, session=session,
                              read_deleted="no").\
-                         filter_by(project_id=project_id).\
-                         all()
+            filter_by(project_id=project_id).\
+            all()
 
         for quota_ref in quotas:
             quota_ref.delete(session=session)
 
         quota_usages = model_query(context, models.QuotaUsage,
                                    session=session, read_deleted="no").\
-                               filter_by(project_id=project_id).\
-                               all()
+            filter_by(project_id=project_id).\
+            all()
 
         for quota_usage_ref in quota_usages:
             quota_usage_ref.delete(session=session)
 
         reservations = model_query(context, models.Reservation,
                                    session=session, read_deleted="no").\
-                               filter_by(project_id=project_id).\
-                               all()
+            filter_by(project_id=project_id).\
+            all()
 
         for reservation_ref in reservations:
             reservation_ref.delete(session=session)
@@ -874,8 +873,8 @@ def reservation_expire(context):
         current_time = timeutils.utcnow()
         results = model_query(context, models.Reservation, session=session,
                               read_deleted="no").\
-                          filter(models.Reservation.expire < current_time).\
-                          all()
+            filter(models.Reservation.expire < current_time).\
+            all()
 
         if results:
             for reservation in results:
@@ -895,10 +894,10 @@ def volume_allocate_iscsi_target(context, volume_id, host):
     with session.begin():
         iscsi_target_ref = model_query(context, models.IscsiTarget,
                                        session=session, read_deleted="no").\
-                                filter_by(volume=None).\
-                                filter_by(host=host).\
-                                with_lockmode('update').\
-                                first()
+            filter_by(volume=None).\
+            filter_by(host=host).\
+            with_lockmode('update').\
+            first()
 
         # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
         #             then this has concurrency issues
@@ -949,8 +948,8 @@ def volume_data_get_for_host(context, host, session=None):
                          func.sum(models.Volume.size),
                          read_deleted="no",
                          session=session).\
-                     filter_by(host=host).\
-                     first()
+        filter_by(host=host).\
+        first()
 
     # NOTE(vish): convert None to 0
     return (result[0] or 0, result[1] or 0)
@@ -963,8 +962,8 @@ def volume_data_get_for_project(context, project_id, session=None):
                          func.sum(models.Volume.size),
                          read_deleted="no",
                          session=session).\
-                     filter_by(project_id=project_id).\
-                     first()
+        filter_by(project_id=project_id).\
+        first()
 
     # NOTE(vish): convert None to 0
     return (result[0] or 0, result[1] or 0)
@@ -975,19 +974,19 @@ def volume_destroy(context, volume_id):
     session = get_session()
     with session.begin():
         session.query(models.Volume).\
-                filter_by(id=volume_id).\
-                update({'status': 'deleted',
-                        'deleted': True,
-                        'deleted_at': timeutils.utcnow(),
-                        'updated_at': literal_column('updated_at')})
+            filter_by(id=volume_id).\
+            update({'status': 'deleted',
+                    'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
         session.query(models.IscsiTarget).\
-                filter_by(volume_id=volume_id).\
-                update({'volume_id': None})
+            filter_by(volume_id=volume_id).\
+            update({'volume_id': None})
         session.query(models.VolumeMetadata).\
-                filter_by(volume_id=volume_id).\
-                update({'deleted': True,
-                        'deleted_at': timeutils.utcnow(),
-                        'updated_at': literal_column('updated_at')})
+            filter_by(volume_id=volume_id).\
+            update({'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
 
 
 @require_admin_context
@@ -1006,15 +1005,15 @@ def volume_detached(context, volume_id):
 def _volume_get_query(context, session=None, project_only=False):
     return model_query(context, models.Volume, session=session,
                        project_only=project_only).\
-                       options(joinedload('volume_metadata')).\
-                       options(joinedload('volume_type'))
+        options(joinedload('volume_metadata')).\
+        options(joinedload('volume_type'))
 
 
 @require_context
 def volume_get(context, volume_id, session=None):
     result = _volume_get_query(context, session=session, project_only=True).\
-                    filter_by(id=volume_id).\
-                    first()
+        filter_by(id=volume_id).\
+        first()
 
     if not result:
         raise exception.VolumeNotFound(volume_id=volume_id)
@@ -1035,10 +1034,10 @@ def volume_get_all_by_host(context, host):
 @require_admin_context
 def volume_get_all_by_instance_uuid(context, instance_uuid):
     result = model_query(context, models.Volume, read_deleted="no").\
-                     options(joinedload('volume_metadata')).\
-                     options(joinedload('volume_type')).\
-                     filter_by(instance_uuid=instance_uuid).\
-                     all()
+        options(joinedload('volume_metadata')).\
+        options(joinedload('volume_type')).\
+        filter_by(instance_uuid=instance_uuid).\
+        all()
 
     if not result:
         return []
@@ -1055,8 +1054,8 @@ def volume_get_all_by_project(context, project_id):
 @require_admin_context
 def volume_get_iscsi_target_num(context, volume_id):
     result = model_query(context, models.IscsiTarget, read_deleted="yes").\
-                     filter_by(volume_id=volume_id).\
-                     first()
+        filter_by(volume_id=volume_id).\
+        first()
 
     if not result:
         raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
@@ -1085,7 +1084,7 @@ def volume_update(context, volume_id, values):
 def _volume_metadata_get_query(context, volume_id, session=None):
     return model_query(context, models.VolumeMetadata,
                        session=session, read_deleted="no").\
-                    filter_by(volume_id=volume_id)
+        filter_by(volume_id=volume_id)
 
 
 @require_context
@@ -1113,8 +1112,8 @@ def volume_metadata_delete(context, volume_id, key):
 @require_volume_exists
 def volume_metadata_get_item(context, volume_id, key, session=None):
     result = _volume_metadata_get_query(context, volume_id, session=session).\
-                    filter_by(key=key).\
-                    first()
+        filter_by(key=key).\
+        first()
 
     if not result:
         raise exception.VolumeMetadataNotFound(metadata_key=key,
@@ -1179,19 +1178,19 @@ def snapshot_destroy(context, snapshot_id):
     session = get_session()
     with session.begin():
         session.query(models.Snapshot).\
-                filter_by(id=snapshot_id).\
-                update({'status': 'deleted',
-                        'deleted': True,
-                        'deleted_at': timeutils.utcnow(),
-                        'updated_at': literal_column('updated_at')})
+            filter_by(id=snapshot_id).\
+            update({'status': 'deleted',
+                    'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
 
 
 @require_context
 def snapshot_get(context, snapshot_id, session=None):
     result = model_query(context, models.Snapshot, session=session,
                          project_only=True).\
-                filter_by(id=snapshot_id).\
-                first()
+        filter_by(id=snapshot_id).\
+        first()
 
     if not result:
         raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
@@ -1208,15 +1207,15 @@ def snapshot_get_all(context):
 def snapshot_get_all_for_volume(context, volume_id):
     return model_query(context, models.Snapshot, read_deleted='no',
                        project_only=True).\
-              filter_by(volume_id=volume_id).all()
+        filter_by(volume_id=volume_id).all()
 
 
 @require_context
 def snapshot_get_all_by_project(context, project_id):
     authorize_project_context(context, project_id)
     return model_query(context, models.Snapshot).\
-                   filter_by(project_id=project_id).\
-                   all()
+        filter_by(project_id=project_id).\
+        all()
 
 
 @require_context
@@ -1227,8 +1226,8 @@ def snapshot_data_get_for_project(context, project_id, session=None):
                          func.sum(models.Snapshot.volume_size),
                          read_deleted="no",
                          session=session).\
-                     filter_by(project_id=project_id).\
-                     first()
+        filter_by(project_id=project_id).\
+        first()
 
     # NOTE(vish): convert None to 0
     return (result[0] or 0, result[1] or 0)
@@ -1268,8 +1267,8 @@ def migration_update(context, id, values):
 def migration_get(context, id, session=None):
     result = model_query(context, models.Migration, session=session,
                          read_deleted="yes").\
-                     filter_by(id=id).\
-                     first()
+        filter_by(id=id).\
+        first()
 
     if not result:
         raise exception.MigrationNotFound(migration_id=id)
@@ -1280,9 +1279,9 @@ def migration_get(context, id, session=None):
 @require_admin_context
 def migration_get_by_instance_and_status(context, instance_uuid, status):
     result = model_query(context, models.Migration, read_deleted="yes").\
-                     filter_by(instance_uuid=instance_uuid).\
-                     filter_by(status=status).\
-                     first()
+        filter_by(instance_uuid=instance_uuid).\
+        filter_by(status=status).\
+        first()
 
     if not result:
         raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
@@ -1294,13 +1293,13 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
 @require_admin_context
 def migration_get_all_unconfirmed(context, confirm_window, session=None):
     confirm_window = timeutils.utcnow() - datetime.timedelta(
-            seconds=confirm_window)
+        seconds=confirm_window)
 
     return model_query(context, models.Migration, session=session,
                        read_deleted="yes").\
-            filter(models.Migration.updated_at <= confirm_window).\
-            filter_by(status="finished").\
-            all()
+        filter(models.Migration.updated_at <= confirm_window).\
+        filter_by(status="finished").\
+        all()
 
 
 ##################
@@ -1342,9 +1341,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
     read_deleted = "yes" if inactive else "no"
     rows = model_query(context, models.VolumeTypes,
                        read_deleted=read_deleted).\
-                        options(joinedload('extra_specs')).\
-                        order_by("name").\
-                        all()
+        options(joinedload('extra_specs')).\
+        order_by("name").\
+        all()
 
     # TODO(sirp): this patern of converting rows to a result with extra_specs
     # is repeated quite a bit, might be worth creating a method for it
@@ -1359,9 +1358,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
 def volume_type_get(context, id, session=None):
     """Returns a dict describing specific volume_type"""
     result = model_query(context, models.VolumeTypes, session=session).\
-                    options(joinedload('extra_specs')).\
-                    filter_by(id=id).\
-                    first()
+        options(joinedload('extra_specs')).\
+        filter_by(id=id).\
+        first()
 
     if not result:
         raise exception.VolumeTypeNotFound(volume_type_id=id)
@@ -1373,9 +1372,9 @@ def volume_type_get(context, id, session=None):
 def volume_type_get_by_name(context, name, session=None):
     """Returns a dict describing specific volume_type"""
     result = model_query(context, models.VolumeTypes, session=session).\
-                    options(joinedload('extra_specs')).\
-                    filter_by(name=name).\
-                    first()
+        options(joinedload('extra_specs')).\
+        filter_by(name=name).\
+        first()
 
     if not result:
         raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
@@ -1391,25 +1390,27 @@ def volume_type_destroy(context, name):
                                                   session=session)
         volume_type_id = volume_type_ref['id']
         session.query(models.VolumeTypes).\
-                filter_by(id=volume_type_id).\
-                update({'deleted': True,
-                        'deleted_at': timeutils.utcnow(),
-                        'updated_at': literal_column('updated_at')})
+            filter_by(id=volume_type_id).\
+            update({'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
         session.query(models.VolumeTypeExtraSpecs).\
-                filter_by(volume_type_id=volume_type_id).\
-                update({'deleted': True,
-                        'deleted_at': timeutils.utcnow(),
-                        'updated_at': literal_column('updated_at')})
+            filter_by(volume_type_id=volume_type_id).\
+            update({'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
 
 
 @require_context
-def volume_get_active_by_window(context, begin, end=None,
-                                         project_id=None):
+def volume_get_active_by_window(context,
+                                begin,
+                                end=None,
+                                project_id=None):
     """Return volumes that were active during window."""
     session = get_session()
     query = session.query(models.Volume)
 
-    query = query.filter(or_(models.Volume.deleted_at == None,
+    query = query.filter(or_(models.Volume.deleted_at is None,
                              models.Volume.deleted_at > begin))
     if end:
         query = query.filter(models.Volume.created_at < end)
@@ -1425,13 +1426,13 @@ def volume_get_active_by_window(context, begin, end=None,
 def _volume_type_extra_specs_query(context, volume_type_id, session=None):
     return model_query(context, models.VolumeTypeExtraSpecs, session=session,
                        read_deleted="no").\
-                    filter_by(volume_type_id=volume_type_id)
+        filter_by(volume_type_id=volume_type_id)
 
 
 @require_context
 def volume_type_extra_specs_get(context, volume_type_id):
     rows = _volume_type_extra_specs_query(context, volume_type_id).\
-                    all()
+        all()
 
     result = {}
     for row in rows:
@@ -1453,13 +1454,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
 def volume_type_extra_specs_get_item(context, volume_type_id, key,
                                      session=None):
     result = _volume_type_extra_specs_query(
-                                    context, volume_type_id, session=session).\
-                    filter_by(key=key).\
-                    first()
+        context, volume_type_id, session=session).\
+        filter_by(key=key).\
+        first()
 
     if not result:
         raise exception.VolumeTypeExtraSpecsNotFound(
-                   extra_specs_key=key, volume_type_id=volume_type_id)
+            extra_specs_key=key,
+            volume_type_id=volume_type_id)
 
     return result
 
@@ -1493,8 +1495,8 @@ def volume_glance_metadata_get(context, volume_id, session=None):
         session = get_session()
 
     return session.query(models.VolumeGlanceMetadata).\
-                         filter_by(volume_id=volume_id).\
-                         filter_by(deleted=False).all()
+        filter_by(volume_id=volume_id).\
+        filter_by(deleted=False).all()
 
 
 @require_context
@@ -1505,8 +1507,8 @@ def volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
         session = get_session()
 
     return session.query(models.VolumeGlanceMetadata).\
-                         filter_by(snapshot_id=snapshot_id).\
-                         filter_by(deleted=False).all()
+        filter_by(snapshot_id=snapshot_id).\
+        filter_by(deleted=False).all()
 
 
 @require_context
@@ -1523,9 +1525,9 @@ def volume_glance_metadata_create(context, volume_id, key, value,
 
     with session.begin():
         rows = session.query(models.VolumeGlanceMetadata).\
-                filter_by(volume_id=volume_id).\
-                filter_by(key=key).\
-                filter_by(deleted=False).all()
+            filter_by(volume_id=volume_id).\
+            filter_by(key=key).\
+            filter_by(deleted=False).all()
 
         if len(rows) > 0:
             raise exception.GlanceMetadataExists(key=key,
@@ -1577,7 +1579,7 @@ def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id,
         session = get_session()
 
     metadata = volume_snapshot_glance_metadata_get(context, snapshot_id,
-                                                session=session)
+                                                   session=session)
     with session.begin():
         for meta in metadata:
             vol_glance_metadata = models.VolumeGlanceMetadata()
@@ -1628,8 +1630,8 @@ def sm_backend_conf_update(context, sm_backend_id, values):
         backend_conf = model_query(context, models.SMBackendConf,
                                    session=session,
                                    read_deleted="yes").\
-                           filter_by(id=sm_backend_id).\
-                           first()
+            filter_by(id=sm_backend_id).\
+            first()
 
         if not backend_conf:
             raise exception.NotFound(
@@ -1648,15 +1650,15 @@ def sm_backend_conf_delete(context, sm_backend_id):
     with session.begin():
         model_query(context, models.SMBackendConf, session=session,
                     read_deleted="yes").\
-                filter_by(id=sm_backend_id).\
-                delete()
+            filter_by(id=sm_backend_id).\
+            delete()
 
 
 @require_admin_context
 def sm_backend_conf_get(context, sm_backend_id):
     result = model_query(context, models.SMBackendConf, read_deleted="yes").\
-                     filter_by(id=sm_backend_id).\
-                     first()
+        filter_by(id=sm_backend_id).\
+        first()
 
     if not result:
         raise exception.NotFound(_("No backend config with id "
@@ -1668,14 +1670,14 @@ def sm_backend_conf_get(context, sm_backend_id):
 @require_admin_context
 def sm_backend_conf_get_by_sr(context, sr_uuid):
     return model_query(context, models.SMBackendConf, read_deleted="yes").\
-                    filter_by(sr_uuid=sr_uuid).\
-                    first()
+        filter_by(sr_uuid=sr_uuid).\
+        first()
 
 
 @require_admin_context
 def sm_backend_conf_get_all(context):
     return model_query(context, models.SMBackendConf, read_deleted="yes").\
-                    all()
+        all()
 
 
 ####################
@@ -1684,7 +1686,7 @@ def sm_backend_conf_get_all(context):
 def _sm_flavor_get_query(context, sm_flavor_label, session=None):
     return model_query(context, models.SMFlavors, session=session,
                        read_deleted="yes").\
-                        filter_by(label=sm_flavor_label)
+        filter_by(label=sm_flavor_label)
 
 
 @require_admin_context
@@ -1716,7 +1718,7 @@ def sm_flavor_get(context, sm_flavor_label):
 
     if not result:
         raise exception.NotFound(
-                _("No sm_flavor called %(sm_flavor)s") % locals())
+            _("No sm_flavor called %(sm_flavor)s") % locals())
 
     return result
 
@@ -1732,7 +1734,7 @@ def sm_flavor_get_all(context):
 def _sm_volume_get_query(context, volume_id, session=None):
     return model_query(context, models.SMVolume, session=session,
                        read_deleted="yes").\
-                        filter_by(id=volume_id)
+        filter_by(id=volume_id)
 
 
 def sm_volume_create(context, values):
@@ -1760,7 +1762,7 @@ def sm_volume_get(context, volume_id):
 
     if not result:
         raise exception.NotFound(
-                _("No sm_volume with id %(volume_id)s") % locals())
+            _("No sm_volume with id %(volume_id)s") % locals())
 
     return result
 
index 3491734d9b3b1b2f56b0f63c16fca98d72bb6b27..9f5b643c52fe7d9966fdad4447d28d4fd4e5d72c 100644 (file)
@@ -28,23 +28,29 @@ def upgrade(migrate_engine):
 
     # New table
     quota_classes = Table('quota_classes', meta,
-            Column('created_at', DateTime(timezone=False)),
-            Column('updated_at', DateTime(timezone=False)),
-            Column('deleted_at', DateTime(timezone=False)),
-            Column('deleted', Boolean(create_constraint=True, name=None)),
-            Column('id', Integer(), primary_key=True),
-            Column('class_name',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False), index=True),
-            Column('resource',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False)),
-            Column('hard_limit', Integer(), nullable=True),
-            mysql_engine='InnoDB',
-            mysql_charset='utf8',
-            )
+                          Column('created_at', DateTime(timezone=False)),
+                          Column('updated_at', DateTime(timezone=False)),
+                          Column('deleted_at', DateTime(timezone=False)),
+                          Column('deleted', Boolean(create_constraint=True,
+                                                    name=None)),
+                          Column('id', Integer(), primary_key=True),
+                          Column('class_name',
+                                 String(length=255,
+                                        convert_unicode=True,
+                                        assert_unicode=None,
+                                        unicode_error=None,
+                                        _warn_on_bytestring=False),
+                                 index=True),
+                          Column('resource',
+                                 String(length=255,
+                                        convert_unicode=True,
+                                        assert_unicode=None,
+                                        unicode_error=None,
+                                        _warn_on_bytestring=False)),
+                          Column('hard_limit', Integer(), nullable=True),
+                          mysql_engine='InnoDB',
+                          mysql_charset='utf8',
+                          )
 
     try:
         quota_classes.create()
@@ -53,26 +59,27 @@ def upgrade(migrate_engine):
         raise
 
     quota_usages = Table('quota_usages', meta,
-            Column('created_at', DateTime(timezone=False)),
-            Column('updated_at', DateTime(timezone=False)),
-            Column('deleted_at', DateTime(timezone=False)),
-            Column('deleted', Boolean(create_constraint=True, name=None)),
-            Column('id', Integer(), primary_key=True),
-            Column('project_id',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False),
-                   index=True),
-            Column('resource',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False)),
-            Column('in_use', Integer(), nullable=False),
-            Column('reserved', Integer(), nullable=False),
-            Column('until_refresh', Integer(), nullable=True),
-            mysql_engine='InnoDB',
-            mysql_charset='utf8',
-            )
+                         Column('created_at', DateTime(timezone=False)),
+                         Column('updated_at', DateTime(timezone=False)),
+                         Column('deleted_at', DateTime(timezone=False)),
+                         Column('deleted', Boolean(create_constraint=True,
+                                                   name=None)),
+                         Column('id', Integer(), primary_key=True),
+                         Column('project_id',
+                                String(length=255, convert_unicode=True,
+                                       assert_unicode=None, unicode_error=None,
+                                       _warn_on_bytestring=False),
+                                index=True),
+                         Column('resource',
+                                String(length=255, convert_unicode=True,
+                                       assert_unicode=None, unicode_error=None,
+                                       _warn_on_bytestring=False)),
+                         Column('in_use', Integer(), nullable=False),
+                         Column('reserved', Integer(), nullable=False),
+                         Column('until_refresh', Integer(), nullable=True),
+                         mysql_engine='InnoDB',
+                         mysql_charset='utf8',
+                         )
 
     try:
         quota_usages.create()
@@ -81,31 +88,37 @@ def upgrade(migrate_engine):
         raise
 
     reservations = Table('reservations', meta,
-            Column('created_at', DateTime(timezone=False)),
-            Column('updated_at', DateTime(timezone=False)),
-            Column('deleted_at', DateTime(timezone=False)),
-            Column('deleted', Boolean(create_constraint=True, name=None)),
-            Column('id', Integer(), primary_key=True),
-            Column('uuid',
-                   String(length=36, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False), nullable=False),
-            Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
-                   nullable=False),
-            Column('project_id',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False),
-                   index=True),
-            Column('resource',
-                   String(length=255, convert_unicode=True,
-                          assert_unicode=None, unicode_error=None,
-                          _warn_on_bytestring=False)),
-            Column('delta', Integer(), nullable=False),
-            Column('expire', DateTime(timezone=False)),
-            mysql_engine='InnoDB',
-            mysql_charset='utf8',
-            )
+                         Column('created_at', DateTime(timezone=False)),
+                         Column('updated_at', DateTime(timezone=False)),
+                         Column('deleted_at', DateTime(timezone=False)),
+                         Column('deleted', Boolean(create_constraint=True,
+                                                   name=None)),
+                         Column('id', Integer(), primary_key=True),
+                         Column('uuid',
+                                String(length=36,
+                                       convert_unicode=True,
+                                       assert_unicode=None,
+                                       unicode_error=None,
+                                       _warn_on_bytestring=False),
+                                nullable=False),
+                         Column('usage_id',
+                                Integer(),
+                                ForeignKey('quota_usages.id'),
+                                nullable=False),
+                         Column('project_id',
+                                String(length=255, convert_unicode=True,
+                                       assert_unicode=None, unicode_error=None,
+                                       _warn_on_bytestring=False),
+                                index=True),
+                         Column('resource',
+                                String(length=255, convert_unicode=True,
+                                       assert_unicode=None, unicode_error=None,
+                                       _warn_on_bytestring=False)),
+                         Column('delta', Integer(), nullable=False),
+                         Column('expire', DateTime(timezone=False)),
+                         mysql_engine='InnoDB',
+                         mysql_charset='utf8',
+                         )
 
     try:
         reservations.create()
index ff990a9e548f8f4d58c8a2291d915a512442172f..768a13d86b37c3505b95e4f4cdd5030341cd7161 100644 (file)
@@ -29,27 +29,31 @@ def upgrade(migrate_engine):
     # Just for the ForeignKey and column creation to succeed, these are not the
     # actual definitions of tables .
     #
-    volumes = Table('volumes', meta,
-           Column('id', Integer(), primary_key=True, nullable=False),
-           mysql_engine='InnoDB'
-           )
-    snapshots = Table('snapshots', meta,
-           Column('id', Integer(), primary_key=True, nullable=False),
-           mysql_engine='InnoDB'
-           )
+    volumes = Table('volumes',
+                    meta,
+                    Column('id', Integer(),
+                           primary_key=True, nullable=False),
+                    mysql_engine='InnoDB')
+    snapshots = Table('snapshots',
+                      meta,
+                      Column('id', Integer(),
+                             primary_key=True, nullable=False),
+                      mysql_engine='InnoDB')
     # Create new table
-    volume_glance_metadata = Table('volume_glance_metadata', meta,
-            Column('created_at', DateTime(timezone=False)),
-            Column('updated_at', DateTime(timezone=False)),
-            Column('deleted_at', DateTime(timezone=False)),
-            Column('deleted', Boolean(create_constraint=True, name=None)),
-            Column('id', Integer(), primary_key=True, nullable=False),
-            Column('volume_id', String(length=36), ForeignKey('volumes.id')),
-            Column('snapshot_id', String(length=36),
-                   ForeignKey('snapshots.id')),
-            Column('key', String(255)),
-            Column('value', Text),
-            mysql_engine='InnoDB'
+    volume_glance_metadata = Table(
+        'volume_glance_metadata',
+        meta,
+        Column('created_at', DateTime(timezone=False)),
+        Column('updated_at', DateTime(timezone=False)),
+        Column('deleted_at', DateTime(timezone=False)),
+        Column('deleted', Boolean(create_constraint=True, name=None)),
+        Column('id', Integer(), primary_key=True, nullable=False),
+        Column('volume_id', String(length=36), ForeignKey('volumes.id')),
+        Column('snapshot_id', String(length=36),
+               ForeignKey('snapshots.id')),
+        Column('key', String(255)),
+        Column('value', Text),
+        mysql_engine='InnoDB'
     )
 
     try:
index aae7af6a716c84d470cbf05f7e033dfc015bd40d..6207e214da7eaf33c8df717b95a2000b932bab91 100644 (file)
@@ -85,7 +85,7 @@ class CinderBase(object):
         return n, getattr(self, n)
 
     def update(self, values):
-        """Make the model object behave like a dict"""
+        """Make the model object behave like a dict."""
         for k, v in values.iteritems():
             setattr(self, k, v)
 
@@ -159,7 +159,7 @@ class Volume(BASE, CinderBase):
 
 
 class VolumeMetadata(BASE, CinderBase):
-    """Represents a metadata key/value pair for a volume"""
+    """Represents a metadata key/value pair for a volume."""
     __tablename__ = 'volume_metadata'
     id = Column(Integer, primary_key=True)
     key = Column(String(255))
@@ -173,7 +173,7 @@ class VolumeMetadata(BASE, CinderBase):
 
 
 class VolumeTypes(BASE, CinderBase):
-    """Represent possible volume_types of volumes offered"""
+    """Represent possible volume_types of volumes offered."""
     __tablename__ = "volume_types"
     id = Column(Integer, primary_key=True)
     name = Column(String(255))
@@ -187,7 +187,7 @@ class VolumeTypes(BASE, CinderBase):
 
 
 class VolumeTypeExtraSpecs(BASE, CinderBase):
-    """Represents additional specs as key/value pairs for a volume_type"""
+    """Represents additional specs as key/value pairs for a volume_type."""
     __tablename__ = 'volume_type_extra_specs'
     id = Column(Integer, primary_key=True)
     key = Column(String(255))
@@ -206,7 +206,7 @@ class VolumeTypeExtraSpecs(BASE, CinderBase):
 
 
 class VolumeGlanceMetadata(BASE, CinderBase):
-    """Glance metadata for a bootable volume"""
+    """Glance metadata for a bootable volume."""
     __tablename__ = 'volume_glance_metadata'
     id = Column(Integer, primary_key=True, nullable=False)
     volume_id = Column(String(36), ForeignKey('volumes.id'))
@@ -317,7 +317,7 @@ class Snapshot(BASE, CinderBase):
 
 
 class IscsiTarget(BASE, CinderBase):
-    """Represents an iscsi target for a given host"""
+    """Represents an iscsi target for a given host."""
     __tablename__ = 'iscsi_targets'
     __table_args__ = (schema.UniqueConstraint("target_num", "host"),
                       {'mysql_engine': 'InnoDB'})
index 26b02299d36f72f50fb7f12c71e985e80b63851e..766ce0c1dffcc74b381df407e5077f8402927949 100644 (file)
@@ -138,8 +138,8 @@ def get_engine():
                     _ENGINE.connect()
                     break
                 except OperationalError, e:
-                    if (remaining != 'infinite' and remaining == 0) or \
-                       not is_db_connection_error(e.args[0]):
+                    if ((remaining != 'infinite' and remaining == 0) or
+                            not is_db_connection_error(e.args[0])):
                         raise
     return _ENGINE
 
index 6687da723aebf0f948a2bed2719cb7bb59ab30a8..10476d78fa04ecfd3ecd6ad508b2c2220b4da0cd 100644 (file)
@@ -98,8 +98,7 @@ core_opts = [
                help='Directory where cinder binaries are installed'),
     cfg.StrOpt('state_path',
                default='$pybasedir',
-               help="Top-level directory for maintaining cinder's state"),
-    ]
+               help="Top-level directory for maintaining cinder's state"), ]
 
 debug_opts = [
 ]
@@ -122,8 +121,8 @@ global_opts = [
                 help='A list of the glance api servers available to cinder '
                      '([hostname|ip]:port)'),
     cfg.IntOpt('glance_num_retries',
-                default=0,
-                help='Number retries when downloading an image from glance'),
+               default=0,
+               help='Number retries when downloading an image from glance'),
     cfg.StrOpt('scheduler_topic',
                default='cinder-scheduler',
                help='the topic scheduler nodes listen on'),
@@ -217,8 +216,8 @@ global_opts = [
                default=60,
                help='maximum time since last check-in for up service'),
     cfg.StrOpt('volume_api_class',
-                default='cinder.volume.api.API',
-                help='The full class name of the volume API class to use'),
+               default='cinder.volume.api.API',
+               help='The full class name of the volume API class to use'),
     cfg.StrOpt('auth_strategy',
                default='noauth',
                help='The strategy to use for auth. Supports noauth, keystone, '
@@ -228,7 +227,6 @@ global_opts = [
                help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
     cfg.BoolOpt('secure_delete',
                 default=True,
-                help='Whether to perform secure delete'),
-]
+                help='Whether to perform secure delete'), ]
 
 FLAGS.register_opts(global_opts)
index c81be9e40460a9e1ae2051d4493536784d551cd5..890a4b4963fd7ada98b7eb8447f62d430aa994c1 100644 (file)
@@ -111,8 +111,8 @@ class GlanceClientWrapper(object):
         retry the request according to FLAGS.glance_num_retries.
         """
         retry_excs = (glanceclient.exc.ServiceUnavailable,
-                glanceclient.exc.InvalidEndpoint,
-                glanceclient.exc.CommunicationError)
+                      glanceclient.exc.InvalidEndpoint,
+                      glanceclient.exc.CommunicationError)
         num_attempts = 1 + FLAGS.glance_num_retries
 
         for attempt in xrange(1, num_attempts + 1):
@@ -125,12 +125,14 @@ class GlanceClientWrapper(object):
                 port = self.port
                 extra = "retrying"
                 error_msg = _("Error contacting glance server "
-                        "'%(host)s:%(port)s' for '%(method)s', %(extra)s.")
+                              "'%(host)s:%(port)s' for '%(method)s', "
+                              "%(extra)s.")
                 if attempt == num_attempts:
                     extra = 'done trying'
                     LOG.exception(error_msg, locals())
-                    raise exception.GlanceConnectionFailed(
-                            host=host, port=port, reason=str(e))
+                    raise exception.GlanceConnectionFailed(host=host,
+                                                           port=port,
+                                                           reason=str(e))
                 LOG.exception(error_msg, locals())
                 time.sleep(1)
 
@@ -220,8 +222,8 @@ class GlanceImageService(object):
 
         return self._translate_from_glance(recv_service_image_meta)
 
-    def update(self, context, image_id, image_meta, data=None,
-            purge_props=True):
+    def update(self, context, image_id,
+               image_meta, data=None, purge_props=True):
         """Modify the given image with the new data."""
         image_meta = self._translate_to_glance(image_meta)
         image_meta['purge_props'] = purge_props
@@ -378,7 +380,7 @@ def _reraise_translated_exception():
 
 def _translate_image_exception(image_id, exc_value):
     if isinstance(exc_value, (glanceclient.exc.Forbidden,
-                    glanceclient.exc.Unauthorized)):
+                              glanceclient.exc.Unauthorized)):
         return exception.ImageNotAuthorized(image_id=image_id)
     if isinstance(exc_value, glanceclient.exc.NotFound):
         return exception.ImageNotFound(image_id=image_id)
@@ -389,7 +391,7 @@ def _translate_image_exception(image_id, exc_value):
 
 def _translate_plain_exception(exc_value):
     if isinstance(exc_value, (glanceclient.exc.Forbidden,
-                    glanceclient.exc.Unauthorized)):
+                              glanceclient.exc.Unauthorized)):
         return exception.NotAuthorized(exc_value)
     if isinstance(exc_value, glanceclient.exc.NotFound):
         return exception.NotFound(exc_value)
@@ -419,7 +421,8 @@ def get_remote_image_service(context, image_href):
     try:
         (image_id, glance_host, glance_port) = _parse_image_ref(image_href)
         glance_client = GlanceClientWrapper(context=context,
-                host=glance_host, port=glance_port)
+                                            host=glance_host,
+                                            port=glance_port)
     except ValueError:
         raise exception.InvalidImageRef(image_href=image_href)
 
index 68f3609e51b3a30e6ac5573fd92f06ba79a97ecd..5f52e56865a2f7e0b5c04e90a1862378d8a888a8 100644 (file)
@@ -214,5 +214,8 @@ class SchedulerDependentManager(Manager):
         """Pass data back to the scheduler at a periodic interval."""
         if self.last_capabilities:
             LOG.debug(_('Notifying Schedulers of capabilities ...'))
-            self.scheduler_rpcapi.update_service_capabilities(context,
-                    self.service_name, self.host, self.last_capabilities)
+            self.scheduler_rpcapi.update_service_capabilities(
+                context,
+                self.service_name,
+                self.host,
+                self.last_capabilities)
index 95cfd4c25bafd62acf33bf21cee42dfa1bc6225d..a427c3cffe948db7fdea02f69d7f07ed659c84a3 100644 (file)
@@ -30,8 +30,7 @@ policy_opts = [
                help=_('JSON file representing policy')),
     cfg.StrOpt('policy_default_rule',
                default='default',
-               help=_('Rule checked when requested rule is not found')),
-    ]
+               help=_('Rule checked when requested rule is not found')), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(policy_opts)
index 8c6057d42ddabb7e02ed3254b99c0478a79d3f6c..60d66bdf8575617782bc8b59f73ca50a8f79e736 100644 (file)
@@ -49,8 +49,7 @@ quota_opts = [
                help='number of seconds between subsequent usage refreshes'),
     cfg.StrOpt('quota_driver',
                default='cinder.quota.DbQuotaDriver',
-               help='default driver to use for quota checks'),
-    ]
+               help='default driver to use for quota checks'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(quota_opts)
@@ -156,9 +155,9 @@ class DbQuotaDriver(object):
                 continue
 
             quotas[resource.name] = dict(
-                limit=project_quotas.get(resource.name, class_quotas.get(
-                        resource.name, resource.default)),
-                )
+                limit=project_quotas.get(resource.name,
+                                         class_quotas.get(resource.name,
+                                                          resource.default)), )
 
             # Include usages if desired.  This is optional because one
             # internal consumer of this interface wants to access the
@@ -167,8 +166,7 @@ class DbQuotaDriver(object):
                 usage = project_usages.get(resource.name, {})
                 quotas[resource.name].update(
                     in_use=usage.get('in_use', 0),
-                    reserved=usage.get('reserved', 0),
-                    )
+                    reserved=usage.get('reserved', 0), )
 
         return quotas
 
@@ -577,10 +575,10 @@ class QuotaEngine(object):
         """
 
         return self._driver.get_project_quotas(context, self._resources,
-                                              project_id,
-                                              quota_class=quota_class,
-                                              defaults=defaults,
-                                              usages=usages)
+                                               project_id,
+                                               quota_class=quota_class,
+                                               defaults=defaults,
+                                               usages=usages)
 
     def count(self, context, resource, *args, **kwargs):
         """Count a resource.
@@ -729,14 +727,16 @@ class QuotaEngine(object):
 
 def _sync_instances(context, project_id, session):
     return dict(zip(('instances', 'cores', 'ram'),
-                    db.instance_data_get_for_project(
-                context, project_id, session=session)))
+                    db.instance_data_get_for_project(context,
+                                                     project_id,
+                                                     session=session)))
 
 
 def _sync_volumes(context, project_id, session):
     return dict(zip(('volumes', 'gigabytes'),
-                    db.volume_data_get_for_project(
-                context, project_id, session=session)))
+                db.volume_data_get_for_project(context,
+                                               project_id,
+                                               session=session)))
 
 
 QUOTAS = QuotaEngine()
@@ -744,8 +744,7 @@ QUOTAS = QuotaEngine()
 
 resources = [
     ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
-    ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
-    ]
+    ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
 
 
 QUOTAS.register_resources(resources)
index 363d73857f80cbd49405608a8121c980ac8eead9..876a12d8f5b5e3be90f1acb1a6e60821afc500be 100644 (file)
@@ -69,4 +69,4 @@ class ChanceScheduler(driver.Scheduler):
 
         updated_volume = driver.volume_update_db(context, volume_id, host)
         self.volume_rpcapi.create_volume(context, updated_volume, host,
-                                    snapshot_id, image_id)
+                                         snapshot_id, image_id)
index be1c3f320943ddf2111b6d3a6650406625e1da77..f7d72983fef48a54dbd5b2bd433ca26b262439d6 100644 (file)
@@ -33,8 +33,7 @@ from cinder.volume import rpcapi as volume_rpcapi
 scheduler_driver_opts = [
     cfg.StrOpt('scheduler_host_manager',
                default='cinder.scheduler.host_manager.HostManager',
-               help='The scheduler host manager class to use'),
-    ]
+               help='The scheduler host manager class to use'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(scheduler_driver_opts)
@@ -55,7 +54,7 @@ class Scheduler(object):
 
     def __init__(self):
         self.host_manager = importutils.import_object(
-                FLAGS.scheduler_host_manager)
+            FLAGS.scheduler_host_manager)
         self.volume_rpcapi = volume_rpcapi.VolumeAPI()
 
     def get_host_list(self):
@@ -70,7 +69,8 @@ class Scheduler(object):
     def update_service_capabilities(self, service_name, host, capabilities):
         """Process a capability update from a service node."""
         self.host_manager.update_service_capabilities(service_name,
-                host, capabilities)
+                                                      host,
+                                                      capabilities)
 
     def hosts_up(self, context, topic):
         """Return the list of hosts that have a running service for topic."""
index e3b3a01c1d580eadb6b1e691a25bfe145df923d7..a423e8dc719f14854ef99ed0fef17dbf925d3202 100644 (file)
@@ -36,16 +36,17 @@ from cinder.openstack.common.notifier import api as notifier
 
 LOG = logging.getLogger(__name__)
 
-scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
-        default='cinder.scheduler.simple.SimpleScheduler',
-        help='Default driver to use for the scheduler')
+scheduler_driver_opt = cfg.StrOpt(
+    'scheduler_driver',
+    default='cinder.scheduler.simple.SimpleScheduler',
+    help='Default driver to use for the scheduler')
 
 FLAGS = flags.FLAGS
 FLAGS.register_opt(scheduler_driver_opt)
 
 
 class SchedulerManager(manager.Manager):
-    """Chooses a host to create volumes"""
+    """Chooses a host to create volumes."""
 
     RPC_API_VERSION = '1.2'
 
@@ -64,12 +65,13 @@ class SchedulerManager(manager.Manager):
         return self.driver.get_service_capabilities()
 
     def update_service_capabilities(self, context, service_name=None,
-            host=None, capabilities=None, **kwargs):
+                                    host=None, capabilities=None, **kwargs):
         """Process a capability update from a service node."""
         if capabilities is None:
             capabilities = {}
-        self.driver.update_service_capabilities(service_name, host,
-                capabilities)
+        self.driver.update_service_capabilities(service_name,
+                                                host,
+                                                capabilities)
 
     def create_volume(self, context, topic, volume_id, snapshot_id=None,
                       image_id=None, request_spec=None,
@@ -86,11 +88,12 @@ class SchedulerManager(manager.Manager):
                 volume_properties = {'size': size,
                                      'availability_zone': availability_zone,
                                      'volume_type_id': volume_type_id}
-                request_spec.update({'volume_id': volume_id,
-                                 'snapshot_id': snapshot_id,
-                                 'image_id': image_id,
-                                 'volume_properties': volume_properties,
-                                 'volume_type': dict(vol_type).iteritems()})
+                request_spec.update(
+                    {'volume_id': volume_id,
+                     'snapshot_id': snapshot_id,
+                     'image_id': image_id,
+                     'volume_properties': volume_properties,
+                     'volume_type': dict(vol_type).iteritems()})
 
             self.driver.schedule_create_volume(context, request_spec,
                                                filter_properties)
index 5966423373b430232321328472629b197a346da9..93bdb6e3e195a417d796effb1852d1c91836a8c5 100644 (file)
@@ -39,23 +39,26 @@ class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
     RPC_API_VERSION = '1.0'
 
     def __init__(self):
-        super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
-                default_version=self.RPC_API_VERSION)
+        super(SchedulerAPI, self).__init__(
+            topic=FLAGS.scheduler_topic,
+            default_version=self.RPC_API_VERSION)
 
     def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
                       image_id=None, request_spec=None,
                       filter_properties=None):
-        return self.cast(ctxt, self.make_msg('create_volume',
-                                         topic=topic,
-                                         volume_id=volume_id,
-                                         snapshot_id=snapshot_id,
-                                         image_id=image_id,
-                                         request_spec=request_spec,
-                                         filter_properties=filter_properties),
-                         version='1.2')
-
-    def update_service_capabilities(self, ctxt, service_name, host,
-            capabilities):
+        return self.cast(ctxt, self.make_msg(
+            'create_volume',
+            topic=topic,
+            volume_id=volume_id,
+            snapshot_id=snapshot_id,
+            image_id=image_id,
+            request_spec=request_spec,
+            filter_properties=filter_properties),
+            version='1.2')
+
+    def update_service_capabilities(self, ctxt,
+                                    service_name, host,
+                                    capabilities):
         self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
-                service_name=service_name, host=host,
-                capabilities=capabilities))
+                         service_name=service_name, host=host,
+                         capabilities=capabilities))
index 9e138a79f2a774bcc1d4323aea10c0fa6fb23dce..6f93972e3c62fe0120fdc57f2fda5dc848fff4a0 100644 (file)
@@ -33,8 +33,7 @@ from cinder import utils
 simple_scheduler_opts = [
     cfg.IntOpt("max_gigabytes",
                default=10000,
-               help="maximum number of volume gigabytes to allow per host"),
-    ]
+               help="maximum number of volume gigabytes to allow per host"), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(simple_scheduler_opts)
@@ -63,10 +62,11 @@ class SimpleScheduler(chance.ChanceScheduler):
             if not utils.service_is_up(service):
                 raise exception.WillNotSchedule(host=host)
             updated_volume = driver.volume_update_db(context, volume_id, host)
-            self.volume_rpcapi.create_volume(context, updated_volume,
-                    host,
-                    snapshot_id,
-                    image_id)
+            self.volume_rpcapi.create_volume(context,
+                                             updated_volume,
+                                             host,
+                                             snapshot_id,
+                                             image_id)
             return None
 
         results = db.service_get_all_volume_sorted(elevated)
@@ -81,10 +81,11 @@ class SimpleScheduler(chance.ChanceScheduler):
             if utils.service_is_up(service) and not service['disabled']:
                 updated_volume = driver.volume_update_db(context, volume_id,
                                                          service['host'])
-                self.volume_rpcapi.create_volume(context, updated_volume,
-                                            service['host'],
-                                            snapshot_id,
-                                            image_id)
+                self.volume_rpcapi.create_volume(context,
+                                                 updated_volume,
+                                                 service['host'],
+                                                 snapshot_id,
+                                                 image_id)
                 return None
         msg = _("Is the appropriate service running?")
         raise exception.NoValidHost(reason=msg)
index 6f0ac4b82ee735128b46f8434413c102f7641968..d4fbfc2027cda366e94a4e7c418e4b0ca99a1552 100644 (file)
@@ -59,8 +59,7 @@ service_opts = [
                help='IP address for OpenStack Volume API to listen'),
     cfg.IntOpt('osapi_volume_listen_port',
                default=8776,
-               help='port for os volume api to listen'),
-    ]
+               help='port for os volume api to listen'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(service_opts)
@@ -305,7 +304,7 @@ class Service(object):
                 state_catalog['availability_zone'] = zone
 
             db.service_update(ctxt,
-                             self.service_id, state_catalog)
+                              self.service_id, state_catalog)
 
             # TODO(termie): make this pattern be more elegant.
             if getattr(self, 'model_disconnected', False):
index e6a81bbe53435afd0c555b876df96726b0506c07..29be96467548e85b4a8767918b67aa006ccc8e71 100644 (file)
@@ -46,8 +46,7 @@ test_opts = [
                help='File name of clean sqlite db'),
     cfg.BoolOpt('fake_tests',
                 default=True,
-                help='should we use everything for testing'),
-    ]
+                help='should we use everything for testing'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(test_opts)
index 23661ec2dcc460a4f19da48bc8df18ea7d095f76..c6658d95177e5cf5fab1e6d01921a0e86e8590f1 100755 (executable)
@@ -131,8 +131,10 @@ class _Win32Colorizer(object):
     """
     def __init__(self, stream):
         import win32console as win
-        red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
-                                 win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
+        red, green, blue, bold = (win.FOREGROUND_RED,
+                                  win.FOREGROUND_GREEN,
+                                  win.FOREGROUND_BLUE,
+                                  win.FOREGROUND_INTENSITY)
         self.stream = stream
         self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
         self._colors = {
@@ -143,8 +145,7 @@ class _Win32Colorizer(object):
             'yellow': red | green | bold,
             'magenta': red | blue | bold,
             'cyan': green | blue | bold,
-            'white': red | green | blue | bold
-            }
+            'white': red | green | blue | bold}
 
     def supported(cls, stream=sys.stdout):
         try:
@@ -314,10 +315,10 @@ class CinderTestRunner(core.TextTestRunner):
 
     def _makeResult(self):
         return CinderTestResult(self.stream,
-                              self.descriptions,
-                              self.verbosity,
-                              self.config,
-                              show_elapsed=self.show_elapsed)
+                                self.descriptions,
+                                self.verbosity,
+                                self.config,
+                                show_elapsed=self.show_elapsed)
 
     def _writeSlowTests(self, result_):
         # Pare out 'fast' tests
@@ -359,9 +360,9 @@ def run():
                       plugins=core.DefaultPluginManager())
 
     runner = CinderTestRunner(stream=c.stream,
-                            verbosity=c.verbosity,
-                            config=c,
-                            show_elapsed=not hide_elapsed)
+                              verbosity=c.verbosity,
+                              config=c,
+                              show_elapsed=not hide_elapsed)
     sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
 
 
index 9ba1f53f32b8947b9fc4147a5a7041c87954b0b0..fdac9d709332875b7866fd29487ca6d077e1d18e 100644 (file)
@@ -33,17 +33,15 @@ UUID2 = '00000000-0000-0000-0000-000000000002'
 
 
 def _get_default_snapshot_param():
-    return {
-        'id': UUID1,
-        'volume_id': 12,
-        'status': 'available',
-        'volume_size': 100,
-        'created_at': None,
-        'display_name': 'Default name',
-        'display_description': 'Default description',
-        'project_id': 'fake',
-        'progress': '0%'
-        }
+    return {'id': UUID1,
+            'volume_id': 12,
+            'status': 'available',
+            'volume_size': 100,
+            'created_at': None,
+            'display_name': 'Default name',
+            'display_description': 'Default description',
+            'project_id': 'fake',
+            'progress': '0%'}
 
 
 def fake_snapshot_get(self, context, snapshot_id):
@@ -80,7 +78,7 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
 
     def assertSnapshotAttributes(self, snapshot, project_id, progress):
         self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
-                                      project_id)
+                         project_id)
         self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
 
     def test_show(self):
@@ -89,8 +87,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
 
         self.assertEqual(res.status_int, 200)
         self.assertSnapshotAttributes(self._get_snapshot(res.body),
-                                project_id='fake',
-                                progress='0%')
+                                      project_id='fake',
+                                      progress='0%')
 
     def test_detail(self):
         url = '/v1/fake/snapshots/detail'
@@ -99,8 +97,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
         self.assertEqual(res.status_int, 200)
         for i, snapshot in enumerate(self._get_snapshots(res.body)):
             self.assertSnapshotAttributes(snapshot,
-                                    project_id='fake',
-                                    progress='0%')
+                                          project_id='fake',
+                                          progress='0%')
 
     def test_no_instance_passthrough_404(self):
 
index a2f984e16ed0507efe102563a8de3f8cd28a2641..3e1b9c613f0be13fef7af1a83301bcbc8be0852b 100644 (file)
@@ -59,7 +59,7 @@ class VolumeActionsTest(test.TestCase):
         app = fakes.wsgi_app()
         for _action in self._actions:
             req = webob.Request.blank('/v1/fake/volumes/%s/action' %
-                    self.UUID)
+                                      self.UUID)
             req.method = 'POST'
             req.body = jsonutils.dumps({_action: None})
             req.content_type = 'application/json'
@@ -153,15 +153,15 @@ class VolumeImageActionsTest(test.TestCase):
         req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
         res_dict = self.controller._volume_upload_image(req, id, body)
         expected = {'os-volume_upload_image': {'id': id,
-                           'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                           'status': 'uploading',
-                           'display_description': 'displaydesc',
-                           'size': 1,
-                           'volume_type': {'name': 'vol_type_name'},
-                           'image_id': 1,
-                           'container_format': 'bare',
-                           'disk_format': 'raw',
-                           'image_name': 'image_name'}}
+                    'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+                    'status': 'uploading',
+                    'display_description': 'displaydesc',
+                    'size': 1,
+                    'volume_type': {'name': 'vol_type_name'},
+                    'image_id': 1,
+                    'container_format': 'bare',
+                    'disk_format': 'raw',
+                    'image_name': 'image_name'}}
         self.assertDictMatch(res_dict, expected)
 
     def test_copy_volume_to_image_volumenotfound(self):
@@ -185,7 +185,7 @@ class VolumeImageActionsTest(test.TestCase):
 
     def test_copy_volume_to_image_invalidvolume(self):
         def stub_upload_volume_to_image_service_raise(self, context, volume,
-                                               metadata, force):
+                                                      metadata, force):
             raise exception.InvalidVolume
         self.stubs.Set(volume_api.API,
                        "copy_volume_to_image",
@@ -206,7 +206,7 @@ class VolumeImageActionsTest(test.TestCase):
 
     def test_copy_volume_to_image_valueerror(self):
         def stub_upload_volume_to_image_service_raise(self, context, volume,
-                                               metadata, force):
+                                                      metadata, force):
             raise ValueError
         self.stubs.Set(volume_api.API,
                        "copy_volume_to_image",
@@ -227,7 +227,7 @@ class VolumeImageActionsTest(test.TestCase):
 
     def test_copy_volume_to_image_remoteerror(self):
         def stub_upload_volume_to_image_service_raise(self, context, volume,
-                                               metadata, force):
+                                                      metadata, force):
             raise rpc_common.RemoteError
         self.stubs.Set(volume_api.API,
                        "copy_volume_to_image",
index 300ccaf3a2a595b0f3945a3eef1306e65fec48f0..2904fcc93593a7d70bdb1453ba4f44fd1d9e22c3 100644 (file)
@@ -121,7 +121,7 @@ class VolumeTenantAttributeTest(test.TestCase):
         res = req.get_response(app())
         vol = etree.XML(res.body)
         tenant_key = ('{http://docs.openstack.org/volume/ext/'
-                    'volume_tenant_attribute/api/v1}tenant_id')
+                      'volume_tenant_attribute/api/v1}tenant_id')
         self.assertEqual(vol.get(tenant_key), PROJECT_ID)
 
     def test_list_volumes_detail_xml(self):
@@ -133,5 +133,5 @@ class VolumeTenantAttributeTest(test.TestCase):
         res = req.get_response(app())
         vol = list(etree.XML(res.body))[0]
         tenant_key = ('{http://docs.openstack.org/volume/ext/'
-                       'volume_tenant_attribute/api/v1}tenant_id')
+                      'volume_tenant_attribute/api/v1}tenant_id')
         self.assertEqual(vol.get(tenant_key), PROJECT_ID)
index f01244d77cda3be45e3a74027e0fc7aacea98c1b..45fc106b884fc24da505fd8cf86525f46e490a3f 100644 (file)
@@ -74,7 +74,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
     def get_resources(self):
         resources = []
         resource = extensions.ResourceExtension('foxnsocks',
-                                               FoxInSocksController())
+                                                FoxInSocksController())
         resources.append(resource)
         return resources
 
@@ -84,8 +84,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
         extension_set = [
             (FoxInSocksServerControllerExtension, 'servers'),
             (FoxInSocksFlavorGooseControllerExtension, 'flavors'),
-            (FoxInSocksFlavorBandsControllerExtension, 'flavors'),
-            ]
+            (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
         for klass, collection in extension_set:
             controller = klass()
             ext = extensions.ControllerExtension(self, collection, controller)
index 4ca375e08f6cb2f6d4761c1381ecc9712d320277..aff1dfbcc96d9ff760bb3ccb3ae08a74cbc99659 100644 (file)
@@ -69,7 +69,7 @@ class TestFaults(test.TestCase):
         for request in requests:
             exc = webob.exc.HTTPRequestEntityTooLarge
             fault = wsgi.Fault(exc(explanation='sorry',
-                        headers={'Retry-After': 4}))
+                                   headers={'Retry-After': 4}))
             response = request.get_response(fault)
 
             expected = {
index f415c70cc7bdcaed06260dc33829d81a0046dd78..3cabca28d4138fda3aa6facc33a566fb354ac550 100644 (file)
@@ -62,7 +62,7 @@ def fake_wsgi(self, req):
 
 
 def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
-        use_no_auth=False, ext_mgr=None):
+             use_no_auth=False, ext_mgr=None):
     if not inner_app_v1:
         inner_app_v1 = router.APIRouter(ext_mgr)
 
@@ -72,13 +72,13 @@ def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
         else:
             ctxt = context.RequestContext('fake', 'fake', auth_token=True)
         api_v1 = fault.FaultWrapper(auth.InjectContext(ctxt,
-              inner_app_v1))
+                                                       inner_app_v1))
     elif use_no_auth:
         api_v1 = fault.FaultWrapper(auth.NoAuthMiddleware(
-              limits.RateLimitingMiddleware(inner_app_v1)))
+            limits.RateLimitingMiddleware(inner_app_v1)))
     else:
         api_v1 = fault.FaultWrapper(auth.AuthMiddleware(
-              limits.RateLimitingMiddleware(inner_app_v1)))
+            limits.RateLimitingMiddleware(inner_app_v1)))
 
     mapper = urlmap.URLMap()
     mapper['/v1'] = api_v1
@@ -125,8 +125,10 @@ class HTTPRequest(webob.Request):
         kwargs['base_url'] = 'http://localhost/v1'
         use_admin_context = kwargs.pop('use_admin_context', False)
         out = webob.Request.blank(*args, **kwargs)
-        out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake',
-                is_admin=use_admin_context)
+        out.environ['cinder.context'] = FakeRequestContext(
+            'fake_user',
+            'fake',
+            is_admin=use_admin_context)
         return out
 
 
@@ -254,16 +256,14 @@ def stub_volume_get_all_by_project(self, context, search_opts=None):
 
 
 def stub_snapshot(id, **kwargs):
-    snapshot = {
-        'id': id,
-        'volume_id': 12,
-        'status': 'available',
-        'volume_size': 100,
-        'created_at': None,
-        'display_name': 'Default name',
-        'display_description': 'Default description',
-        'project_id': 'fake'
-        }
+    snapshot = {'id': id,
+                'volume_id': 12,
+                'status': 'available',
+                'volume_size': 100,
+                'created_at': None,
+                'display_name': 'Default name',
+                'display_description': 'Default description',
+                'project_id': 'fake'}
 
     snapshot.update(kwargs)
     return snapshot
index 83248de943067a0467e46e8ffa2f486217e5b32d..152a35793ea06a3bbbec93087c3661e4f517fcf4 100644 (file)
@@ -441,10 +441,9 @@ class ResourceTest(test.TestCase):
 
         extended = ControllerExtended()
         resource.register_actions(extended)
-        self.assertEqual({
-                'fooAction': extended._action_foo,
-                'barAction': extended._action_bar,
-                }, resource.wsgi_actions)
+        self.assertEqual({'fooAction': extended._action_foo,
+                          'barAction': extended._action_bar, },
+                         resource.wsgi_actions)
 
     def test_register_extensions(self):
         class Controller(object):
index fc71a1be172a6d417ca4e6e6884ad455e5c67b59..fbf68c56b50a70e4ab6366c3e761db85ce34fec6 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from lxml import etree
+import datetime
 import webob.exc
 
 from cinder.api.openstack.volume.contrib import hosts as os_hosts
 from cinder import context
-import datetime
 from cinder import db
 from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder import test
+from lxml import etree
 
 
 FLAGS = flags.FLAGS
@@ -34,18 +34,18 @@ created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
 curr_time = timeutils.utcnow()
 
 SERVICE_LIST = [
-        {'created_at': created_time, 'updated_at': curr_time,
-         'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
-         'availability_zone': 'cinder'},
-        {'created_at': created_time, 'updated_at': curr_time,
-         'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
-         'availability_zone': 'cinder'},
-        {'created_at': created_time, 'updated_at': curr_time,
-         'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
-         'availability_zone': 'cinder'},
-        {'created_at': created_time, 'updated_at': curr_time,
-         'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
-         'availability_zone': 'cinder'}]
+    {'created_at': created_time, 'updated_at': curr_time,
+     'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+     'availability_zone': 'cinder'},
+    {'created_at': created_time, 'updated_at': curr_time,
+     'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+     'availability_zone': 'cinder'},
+    {'created_at': created_time, 'updated_at': curr_time,
+     'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+     'availability_zone': 'cinder'},
+    {'created_at': created_time, 'updated_at': curr_time,
+     'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
+     'availability_zone': 'cinder'}]
 
 LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
                   'zone': 'cinder', 'service-state': 'enabled',
@@ -97,7 +97,7 @@ class HostTestCase(test.TestCase):
 
         cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
         expected = [host for host in LIST_RESPONSE
-                if host['service'] == 'cinder-volume']
+                    if host['service'] == 'cinder-volume']
         self.assertEqual(cinder_hosts, expected)
 
     def test_list_hosts_with_zone(self):
@@ -107,19 +107,22 @@ class HostTestCase(test.TestCase):
 
     def test_bad_status_value(self):
         self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
-                self.req, 'test.host.1', body={'status': 'bad'})
-        self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
-                self.req, 'test.host.1', body={'status': 'disablabc'})
+                          self.req, 'test.host.1', body={'status': 'bad'})
+        self.assertRaises(webob.exc.HTTPBadRequest,
+                          self.controller.update,
+                          self.req,
+                          'test.host.1',
+                          body={'status': 'disablabc'})
 
     def test_bad_update_key(self):
         bad_body = {'crazy': 'bad'}
         self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
-                self.req, 'test.host.1', body=bad_body)
+                          self.req, 'test.host.1', body=bad_body)
 
     def test_bad_update_key_and_correct_udpate_key(self):
         bad_body = {'status': 'disable', 'crazy': 'bad'}
         self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
-                self.req, 'test.host.1', body=bad_body)
+                          self.req, 'test.host.1', body=bad_body)
 
     def test_good_udpate_keys(self):
         body = {'status': 'disable'}
@@ -127,8 +130,11 @@ class HostTestCase(test.TestCase):
                           self.req, 'test.host.1', body=body)
 
     def test_bad_host(self):
-        self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
-                self.req, 'bogus_host_name', body={'disabled': 0})
+        self.assertRaises(webob.exc.HTTPNotFound,
+                          self.controller.update,
+                          self.req,
+                          'bogus_host_name',
+                          body={'disabled': 0})
 
     def test_show_forbidden(self):
         self.req.environ['cinder.context'].is_admin = False
index caad79db32eda8c32e9b9c71008fef6068d4fd60..9ad19f18eda6ff4aa5c5ac07aa94cc6c4e687736 100644 (file)
@@ -44,10 +44,7 @@ class ExtensionControllerTest(ExtensionTestCase):
 
     def setUp(self):
         super(ExtensionControllerTest, self).setUp()
-        self.ext_list = [
-            "TypesManage",
-            "TypesExtraSpecs",
-            ]
+        self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
         self.ext_list.sort()
 
     def test_list_extensions_json(self):
@@ -70,15 +67,13 @@ class ExtensionControllerTest(ExtensionTestCase):
         # Make sure that at least Fox in Sox is correct.
         (fox_ext, ) = [
             x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
-        self.assertEqual(fox_ext, {
-                'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
-                'name': 'Fox In Socks',
-                'updated': '2011-01-22T13:25:27-06:00',
-                'description': 'The Fox In Socks Extension',
-                'alias': 'FOXNSOX',
-                'links': []
-            },
-        )
+        self.assertEqual(
+            fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
+                      'name': 'Fox In Socks',
+                      'updated': '2011-01-22T13:25:27-06:00',
+                      'description': 'The Fox In Socks Extension',
+                      'alias': 'FOXNSOX',
+                      'links': []}, )
 
         for ext in data['extensions']:
             url = '/fake/extensions/%s' % ext['alias']
@@ -94,13 +89,14 @@ class ExtensionControllerTest(ExtensionTestCase):
         self.assertEqual(200, response.status_int)
 
         data = jsonutils.loads(response.body)
-        self.assertEqual(data['extension'], {
-                "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
-                "name": "Fox In Socks",
-                "updated": "2011-01-22T13:25:27-06:00",
-                "description": "The Fox In Socks Extension",
-                "alias": "FOXNSOX",
-                "links": []})
+        self.assertEqual(
+            data['extension'],
+            {"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
+             "name": "Fox In Socks",
+             "updated": "2011-01-22T13:25:27-06:00",
+             "description": "The Fox In Socks Extension",
+             "alias": "FOXNSOX",
+             "links": []})
 
     def test_get_non_existing_extension_json(self):
         app = router.APIRouter()
@@ -125,10 +121,12 @@ class ExtensionControllerTest(ExtensionTestCase):
         # Make sure that at least Fox in Sox is correct.
         (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
         self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
-        self.assertEqual(fox_ext.get('namespace'),
+        self.assertEqual(
+            fox_ext.get('namespace'),
             'http://www.fox.in.socks/api/ext/pie/v1.0')
         self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
-        self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
+        self.assertEqual(
+            fox_ext.findtext('{0}description'.format(NS)),
             'The Fox In Socks Extension')
 
         xmlutil.validate_schema(root, 'extensions')
@@ -145,10 +143,12 @@ class ExtensionControllerTest(ExtensionTestCase):
         self.assertEqual(root.tag.split('extension')[0], NS)
         self.assertEqual(root.get('alias'), 'FOXNSOX')
         self.assertEqual(root.get('name'), 'Fox In Socks')
-        self.assertEqual(root.get('namespace'),
+        self.assertEqual(
+            root.get('namespace'),
             'http://www.fox.in.socks/api/ext/pie/v1.0')
         self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
-        self.assertEqual(root.findtext('{0}description'.format(NS)),
+        self.assertEqual(
+            root.findtext('{0}description'.format(NS)),
             'The Fox In Socks Extension')
 
         xmlutil.validate_schema(root, 'extension')
index 1799c45ff12fd65b293fd312674fb66af16af510..4e145e741d9f87affbc2f9d5152b6e9d5079fc0d 100644 (file)
@@ -22,17 +22,11 @@ from cinder import test
 
 
 class SelectorTest(test.TestCase):
-    obj_for_test = {
-        'test': {
-            'name': 'test',
-            'values': [1, 2, 3],
-            'attrs': {
-                'foo': 1,
-                'bar': 2,
-                'baz': 3,
-                },
-            },
-        }
+    obj_for_test = {'test': {'name': 'test',
+                             'values': [1, 2, 3],
+                             'attrs': {'foo': 1,
+                                       'bar': 2,
+                                       'baz': 3, }, }, }
 
     def test_empty_selector(self):
         sel = xmlutil.Selector()
@@ -217,11 +211,9 @@ class TemplateElementTest(test.TestCase):
         self.assertEqual(len(elem), 0)
 
         # Create a few children
-        children = [
-            xmlutil.TemplateElement('child1'),
-            xmlutil.TemplateElement('child2'),
-            xmlutil.TemplateElement('child3'),
-            ]
+        children = [xmlutil.TemplateElement('child1'),
+                    xmlutil.TemplateElement('child2'),
+                    xmlutil.TemplateElement('child3'), ]
 
         # Extend the parent by those children
         elem.extend(children)
@@ -234,10 +226,8 @@ class TemplateElementTest(test.TestCase):
             self.assertEqual(elem[children[idx].tag], children[idx])
 
         # Ensure that multiple children of the same name are rejected
-        children2 = [
-            xmlutil.TemplateElement('child4'),
-            xmlutil.TemplateElement('child1'),
-            ]
+        children2 = [xmlutil.TemplateElement('child4'),
+                     xmlutil.TemplateElement('child1'), ]
         self.assertRaises(KeyError, elem.extend, children2)
 
         # Also ensure that child4 was not added
@@ -252,11 +242,9 @@ class TemplateElementTest(test.TestCase):
         self.assertEqual(len(elem), 0)
 
         # Create a few children
-        children = [
-            xmlutil.TemplateElement('child1'),
-            xmlutil.TemplateElement('child2'),
-            xmlutil.TemplateElement('child3'),
-            ]
+        children = [xmlutil.TemplateElement('child1'),
+                    xmlutil.TemplateElement('child2'),
+                    xmlutil.TemplateElement('child3'), ]
 
         # Extend the parent by those children
         elem.extend(children)
@@ -287,11 +275,9 @@ class TemplateElementTest(test.TestCase):
         self.assertEqual(len(elem), 0)
 
         # Create a few children
-        children = [
-            xmlutil.TemplateElement('child1'),
-            xmlutil.TemplateElement('child2'),
-            xmlutil.TemplateElement('child3'),
-            ]
+        children = [xmlutil.TemplateElement('child1'),
+                    xmlutil.TemplateElement('child2'),
+                    xmlutil.TemplateElement('child3'), ]
 
         # Extend the parent by those children
         elem.extend(children)
@@ -384,10 +370,8 @@ class TemplateElementTest(test.TestCase):
         master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
 
         # Create a couple of slave template element
-        slave_elems = [
-            xmlutil.TemplateElement('test', attr2=attrs['attr2']),
-            xmlutil.TemplateElement('test', attr3=attrs['attr3']),
-            ]
+        slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
+                       xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
 
         # Try the render
         elem = master_elem._render(None, None, slave_elems, None)
@@ -589,22 +573,13 @@ class TemplateTest(test.TestCase):
 
     def test__serialize(self):
         # Our test object to serialize
-        obj = {
-            'test': {
-                'name': 'foobar',
-                'values': [1, 2, 3, 4],
-                'attrs': {
-                    'a': 1,
-                    'b': 2,
-                    'c': 3,
-                    'd': 4,
-                    },
-                'image': {
-                    'name': 'image_foobar',
-                    'id': 42,
-                    },
-                },
-            }
+        obj = {'test': {'name': 'foobar',
+                        'values': [1, 2, 3, 4],
+                        'attrs': {'a': 1,
+                                  'b': 2,
+                                  'c': 3,
+                                  'd': 4, },
+                        'image': {'name': 'image_foobar', 'id': 42, }, }, }
 
         # Set up our master template
         root = xmlutil.TemplateElement('test', selector='test',
index 23dffd0d9f43ec41a37be2975fab899aebc48448..74110bb872d7d0cff0f36c1290f827610269bb19 100644 (file)
@@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
                     },
 
                 ],
-                "absolute": {
-                    "maxTotalVolumeGigabytes": 512,
-                    "maxTotalVolumes": 5,
-                    },
+                "absolute": {"maxTotalVolumeGigabytes": 512,
+                             "maxTotalVolumes": 5, },
             },
         }
         body = jsonutils.loads(response.body)
@@ -776,26 +774,26 @@ class LimitsViewBuilderTest(test.TestCase):
                                 "injected_file_content_bytes": 5}
 
     def test_build_limits(self):
-        expected_limits = {"limits": {
-                "rate": [{
-                      "uri": "*",
-                      "regex": ".*",
-                      "limit": [{"value": 10,
-                                 "verb": "POST",
-                                 "remaining": 2,
-                                 "unit": "MINUTE",
-                                 "next-available": "2011-07-21T18:17:06Z"}]},
-                   {"uri": "*/volumes",
-                    "regex": "^/volumes",
-                    "limit": [{"value": 50,
-                               "verb": "POST",
-                               "remaining": 10,
-                               "unit": "DAY",
-                               "next-available": "2011-07-21T18:17:06Z"}]}],
-                "absolute": {"maxServerMeta": 1,
-                             "maxImageMeta": 1,
-                             "maxPersonality": 5,
-                             "maxPersonalitySize": 5}}}
+        tdate = "2011-07-21T18:17:06Z"
+        expected_limits = \
+            {"limits": {"rate": [{"uri": "*",
+                                  "regex": ".*",
+                                  "limit": [{"value": 10,
+                                             "verb": "POST",
+                                             "remaining": 2,
+                                             "unit": "MINUTE",
+                                             "next-available": tdate}]},
+                                 {"uri": "*/volumes",
+                                  "regex": "^/volumes",
+                                  "limit": [{"value": 50,
+                                             "verb": "POST",
+                                             "remaining": 10,
+                                             "unit": "DAY",
+                                             "next-available": tdate}]}],
+                        "absolute": {"maxServerMeta": 1,
+                                     "maxImageMeta": 1,
+                                     "maxPersonality": 5,
+                                     "maxPersonalitySize": 5}}}
 
         output = self.view_builder.build(self.rate_limits,
                                          self.absolute_limits)
@@ -827,27 +825,27 @@ class LimitsXMLSerializationTest(test.TestCase):
         serializer = limits.LimitsTemplate()
         fixture = {
             "limits": {
-                   "rate": [{
-                         "uri": "*",
-                         "regex": ".*",
-                         "limit": [{
-                              "value": 10,
-                              "verb": "POST",
-                              "remaining": 2,
-                              "unit": "MINUTE",
-                              "next-available": "2011-12-15T22:42:45Z"}]},
-                          {"uri": "*/servers",
-                           "regex": "^/servers",
-                           "limit": [{
-                              "value": 50,
-                              "verb": "POST",
-                              "remaining": 10,
-                              "unit": "DAY",
-                              "next-available": "2011-12-15T22:42:45Z"}]}],
-                    "absolute": {"maxServerMeta": 1,
-                                 "maxImageMeta": 1,
-                                 "maxPersonality": 5,
-                                 "maxPersonalitySize": 10240}}}
+                "rate": [{
+                    "uri": "*",
+                    "regex": ".*",
+                    "limit": [{
+                        "value": 10,
+                        "verb": "POST",
+                        "remaining": 2,
+                        "unit": "MINUTE",
+                        "next-available": "2011-12-15T22:42:45Z"}]},
+                    {"uri": "*/servers",
+                     "regex": "^/servers",
+                     "limit": [{
+                         "value": 50,
+                         "verb": "POST",
+                         "remaining": 10,
+                         "unit": "DAY",
+                         "next-available": "2011-12-15T22:42:45Z"}]}],
+                "absolute": {"maxServerMeta": 1,
+                             "maxImageMeta": 1,
+                             "maxPersonality": 5,
+                             "maxPersonalitySize": 10240}}}
 
         output = serializer.serialize(fixture)
         root = etree.XML(output)
@@ -873,8 +871,9 @@ class LimitsXMLSerializationTest(test.TestCase):
             for j, limit in enumerate(rate_limits):
                 for key in ['verb', 'value', 'remaining', 'unit',
                             'next-available']:
-                    self.assertEqual(limit.get(key),
-                         str(fixture['limits']['rate'][i]['limit'][j][key]))
+                    self.assertEqual(
+                        limit.get(key),
+                        str(fixture['limits']['rate'][i]['limit'][j][key]))
 
     def test_index_no_limits(self):
         serializer = limits.LimitsTemplate()
index 4582ce4a05e448ead3f3da956b81f5530ffd00d5..235d41f2abc900b3ffa80b37a61d0804f1edbaab 100644 (file)
@@ -36,15 +36,13 @@ INVALID_UUID = '00000000-0000-0000-0000-000000000002'
 
 
 def _get_default_snapshot_param():
-    return {
-        'id': UUID,
-        'volume_id': 12,
-        'status': 'available',
-        'volume_size': 100,
-        'created_at': None,
-        'display_name': 'Default name',
-        'display_description': 'Default description',
-        }
+    return {'id': UUID,
+            'volume_id': 12,
+            'status': 'available',
+            'volume_size': 100,
+            'created_at': None,
+            'display_name': 'Default name',
+            'display_description': 'Default description', }
 
 
 def stub_snapshot_create(self, context, volume_id, name, description):
@@ -81,47 +79,48 @@ class SnapshotApiTest(test.TestCase):
         self.stubs.Set(db, 'snapshot_get_all_by_project',
                        fakes.stub_snapshot_get_all_by_project)
         self.stubs.Set(db, 'snapshot_get_all',
-                      fakes.stub_snapshot_get_all)
+                       fakes.stub_snapshot_get_all)
 
     def test_snapshot_create(self):
         self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
         self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
         snapshot = {"volume_id": '12',
-                "force": False,
-                "display_name": "Snapshot Test Name",
-                "display_description": "Snapshot Test Desc"}
+                    "force": False,
+                    "display_name": "Snapshot Test Name",
+                    "display_description": "Snapshot Test Desc"}
         body = dict(snapshot=snapshot)
         req = fakes.HTTPRequest.blank('/v1/snapshots')
         resp_dict = self.controller.create(req, body)
 
         self.assertTrue('snapshot' in resp_dict)
         self.assertEqual(resp_dict['snapshot']['display_name'],
-                        snapshot['display_name'])
+                         snapshot['display_name'])
         self.assertEqual(resp_dict['snapshot']['display_description'],
-                        snapshot['display_description'])
+                         snapshot['display_description'])
 
     def test_snapshot_create_force(self):
-        self.stubs.Set(volume.api.API, "create_snapshot_force",
-            stub_snapshot_create)
+        self.stubs.Set(volume.api.API,
+                       "create_snapshot_force",
+                       stub_snapshot_create)
         self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
         snapshot = {"volume_id": '12',
-                "force": True,
-                "display_name": "Snapshot Test Name",
-                "display_description": "Snapshot Test Desc"}
+                    "force": True,
+                    "display_name": "Snapshot Test Name",
+                    "display_description": "Snapshot Test Desc"}
         body = dict(snapshot=snapshot)
         req = fakes.HTTPRequest.blank('/v1/snapshots')
         resp_dict = self.controller.create(req, body)
 
         self.assertTrue('snapshot' in resp_dict)
         self.assertEqual(resp_dict['snapshot']['display_name'],
-                        snapshot['display_name'])
+                         snapshot['display_name'])
         self.assertEqual(resp_dict['snapshot']['display_description'],
-                        snapshot['display_description'])
+                         snapshot['display_description'])
 
         snapshot = {"volume_id": "12",
-                "force": "**&&^^%%$$##@@",
-                "display_name": "Snapshot Test Name",
-                "display_description": "Snapshot Test Desc"}
+                    "force": "**&&^^%%$$##@@",
+                    "display_name": "Snapshot Test Name",
+                    "display_description": "Snapshot Test Desc"}
         body = dict(snapshot=snapshot)
         req = fakes.HTTPRequest.blank('/v1/snapshots')
         self.assertRaises(exception.InvalidParameterValue,
@@ -133,9 +132,7 @@ class SnapshotApiTest(test.TestCase):
         self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
         self.stubs.Set(volume.api.API, "update_snapshot",
                        fakes.stub_snapshot_update)
-        updates = {
-            "display_name": "Updated Test Name",
-        }
+        updates = {"display_name": "Updated Test Name", }
         body = {"snapshot": updates}
         req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
         res_dict = self.controller.update(req, UUID, body)
@@ -207,8 +204,9 @@ class SnapshotApiTest(test.TestCase):
                           snapshot_id)
 
     def test_snapshot_detail(self):
-        self.stubs.Set(volume.api.API, "get_all_snapshots",
-            stub_snapshot_get_all)
+        self.stubs.Set(volume.api.API,
+                       "get_all_snapshots",
+                       stub_snapshot_get_all)
         req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
         resp_dict = self.controller.detail(req)
 
@@ -350,8 +348,7 @@ class SnapshotSerializerTest(test.TestCase):
             created_at=datetime.datetime.now(),
             display_name='snap_name',
             display_description='snap_desc',
-            volume_id='vol_id',
-            )
+            volume_id='vol_id', )
         text = serializer.serialize(dict(snapshot=raw_snapshot))
 
         print text
@@ -361,24 +358,20 @@ class SnapshotSerializerTest(test.TestCase):
 
     def test_snapshot_index_detail_serializer(self):
         serializer = snapshots.SnapshotsTemplate()
-        raw_snapshots = [dict(
-                id='snap1_id',
-                status='snap1_status',
-                size=1024,
-                created_at=datetime.datetime.now(),
-                display_name='snap1_name',
-                display_description='snap1_desc',
-                volume_id='vol1_id',
-                ),
-                       dict(
-                id='snap2_id',
-                status='snap2_status',
-                size=1024,
-                created_at=datetime.datetime.now(),
-                display_name='snap2_name',
-                display_description='snap2_desc',
-                volume_id='vol2_id',
-                )]
+        raw_snapshots = [dict(id='snap1_id',
+                              status='snap1_status',
+                              size=1024,
+                              created_at=datetime.datetime.now(),
+                              display_name='snap1_name',
+                              display_description='snap1_desc',
+                              volume_id='vol1_id', ),
+                         dict(id='snap2_id',
+                              status='snap2_status',
+                              size=1024,
+                              created_at=datetime.datetime.now(),
+                              display_name='snap2_name',
+                              display_description='snap2_desc',
+                              volume_id='vol2_id', )]
         text = serializer.serialize(dict(snapshots=raw_snapshots))
 
         print text
index ebf5e1df82982122155da5aac903255e123107ca..cbce5c078808afac0e79281ad8a0ff36782617a2 100644 (file)
@@ -40,15 +40,13 @@ def stub_snapshot_get(self, context, snapshot_id):
     if snapshot_id != TEST_SNAPSHOT_UUID:
         raise exception.NotFound
 
-    return {
-            'id': snapshot_id,
+    return {'id': snapshot_id,
             'volume_id': 12,
             'status': 'available',
             'volume_size': 100,
             'created_at': None,
             'display_name': 'Default name',
-            'display_description': 'Default description',
-            }
+            'display_description': 'Default description', }
 
 
 class VolumeApiTest(test.TestCase):
@@ -89,7 +87,7 @@ class VolumeApiTest(test.TestCase):
                                'metadata': {},
                                'id': '1',
                                'created_at': datetime.datetime(1, 1, 1,
-                                                              1, 1, 1),
+                                                               1, 1, 1),
                                'size': 100}}
         self.assertEqual(res_dict, expected)
 
@@ -105,8 +103,7 @@ class VolumeApiTest(test.TestCase):
                "display_name": "Volume Test Name",
                "display_description": "Volume Test Desc",
                "availability_zone": "zone1:host1",
-               "volume_type": db_vol_type['name'],
-              }
+               "volume_type": db_vol_type['name'], }
         body = {"volume": vol}
         req = fakes.HTTPRequest.blank('/v1/volumes')
         res_dict = self.controller.create(req, body)
@@ -128,28 +125,29 @@ class VolumeApiTest(test.TestCase):
     def test_volume_create_with_image_id(self):
         self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
         self.ext_mgr.extensions = {'os-image-create': 'fake'}
+        test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
         vol = {"size": '1',
                "display_name": "Volume Test Name",
                "display_description": "Volume Test Desc",
                "availability_zone": "nova",
-               "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
+               "imageRef": test_id}
         expected = {'volume': {'status': 'fakestatus',
-                           'display_description': 'Volume Test Desc',
-                           'availability_zone': 'nova',
-                           'display_name': 'Volume Test Name',
-                           'attachments': [{'device': '/',
-                                            'server_id': 'fakeuuid',
-                                            'id': '1',
-                                            'volume_id': '1'}],
-                            'bootable': 'false',
-                            'volume_type': 'vol_type_name',
-                            'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
-                            'snapshot_id': None,
-                            'metadata': {},
-                            'id': '1',
-                            'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                            'size': '1'}
-                    }
+                               'display_description': 'Volume Test Desc',
+                               'availability_zone': 'nova',
+                               'display_name': 'Volume Test Name',
+                               'attachments': [{'device': '/',
+                                                'server_id': 'fakeuuid',
+                                                'id': '1',
+                                                'volume_id': '1'}],
+                               'bootable': 'false',
+                               'volume_type': 'vol_type_name',
+                               'image_id': test_id,
+                               'snapshot_id': None,
+                               'metadata': {},
+                               'id': '1',
+                               'created_at': datetime.datetime(1, 1, 1,
+                                                               1, 1, 1),
+                               'size': '1'}}
         body = {"volume": vol}
         req = fakes.HTTPRequest.blank('/v1/volumes')
         res_dict = self.controller.create(req, body)
@@ -160,11 +158,11 @@ class VolumeApiTest(test.TestCase):
         self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
         self.ext_mgr.extensions = {'os-image-create': 'fake'}
         vol = {"size": '1',
-                "display_name": "Volume Test Name",
-                "display_description": "Volume Test Desc",
-                "availability_zone": "cinder",
-                "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
-                "snapshot_id": TEST_SNAPSHOT_UUID}
+               "display_name": "Volume Test Name",
+               "display_description": "Volume Test Desc",
+               "availability_zone": "cinder",
+               "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
+               "snapshot_id": TEST_SNAPSHOT_UUID}
         body = {"volume": vol}
         req = fakes.HTTPRequest.blank('/v1/volumes')
         self.assertRaises(webob.exc.HTTPBadRequest,
@@ -176,10 +174,10 @@ class VolumeApiTest(test.TestCase):
         self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
         self.ext_mgr.extensions = {'os-image-create': 'fake'}
         vol = {"size": '1',
-                "display_name": "Volume Test Name",
-                "display_description": "Volume Test Desc",
-                "availability_zone": "cinder",
-                "imageRef": 1234}
+               "display_name": "Volume Test Name",
+               "display_description": "Volume Test Desc",
+               "availability_zone": "cinder",
+               "imageRef": 1234}
         body = {"volume": vol}
         req = fakes.HTTPRequest.blank('/v1/volumes')
         self.assertRaises(webob.exc.HTTPBadRequest,
@@ -191,10 +189,10 @@ class VolumeApiTest(test.TestCase):
         self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
         self.ext_mgr.extensions = {'os-image-create': 'fake'}
         vol = {"size": '1',
-                "display_name": "Volume Test Name",
-                "display_description": "Volume Test Desc",
-                "availability_zone": "cinder",
-                "imageRef": '12345'}
+               "display_name": "Volume Test Name",
+               "display_description": "Volume Test Desc",
+               "availability_zone": "cinder",
+               "imageRef": '12345'}
         body = {"volume": vol}
         req = fakes.HTTPRequest.blank('/v1/volumes')
         self.assertRaises(webob.exc.HTTPBadRequest,
@@ -305,7 +303,7 @@ class VolumeApiTest(test.TestCase):
                                  'metadata': {},
                                  'id': '1',
                                  'created_at': datetime.datetime(1, 1, 1,
-                                                                1, 1, 1),
+                                                                 1, 1, 1),
                                  'size': 1}]}
         self.assertEqual(res_dict, expected)
 
@@ -328,7 +326,7 @@ class VolumeApiTest(test.TestCase):
                                  'metadata': {},
                                  'id': '1',
                                  'created_at': datetime.datetime(1, 1, 1,
-                                                                1, 1, 1),
+                                                                 1, 1, 1),
                                  'size': 1}]}
         self.assertEqual(res_dict, expected)
 
@@ -410,7 +408,7 @@ class VolumeApiTest(test.TestCase):
                                'metadata': {},
                                'id': '1',
                                'created_at': datetime.datetime(1, 1, 1,
-                                                              1, 1, 1),
+                                                               1, 1, 1),
                                'size': 1}}
         self.assertEqual(res_dict, expected)
 
@@ -433,7 +431,7 @@ class VolumeApiTest(test.TestCase):
                                'metadata': {},
                                'id': '1',
                                'created_at': datetime.datetime(1, 1, 1,
-                                                              1, 1, 1),
+                                                               1, 1, 1),
                                'size': 1}}
         self.assertEqual(res_dict, expected)
 
@@ -460,7 +458,7 @@ class VolumeApiTest(test.TestCase):
                                'metadata': {},
                                'id': '1',
                                'created_at': datetime.datetime(1, 1, 1,
-                                                              1, 1, 1),
+                                                               1, 1, 1),
                                'size': 1}}
         self.assertEqual(res_dict, expected)
 
@@ -552,20 +550,16 @@ class VolumeSerializerTest(test.TestCase):
             size=1024,
             availability_zone='vol_availability',
             created_at=datetime.datetime.now(),
-            attachments=[dict(
-                    id='vol_id',
-                    volume_id='vol_id',
-                    server_id='instance_uuid',
-                    device='/foo')],
+            attachments=[dict(id='vol_id',
+                              volume_id='vol_id',
+                              server_id='instance_uuid',
+                              device='/foo')],
             display_name='vol_name',
             display_description='vol_desc',
             volume_type='vol_type',
             snapshot_id='snap_id',
-            metadata=dict(
-                foo='bar',
-                baz='quux',
-                ),
-            )
+            metadata=dict(foo='bar',
+                          baz='quux', ), )
         text = serializer.serialize(dict(volume=raw_volume))
 
         print text
@@ -575,46 +569,36 @@ class VolumeSerializerTest(test.TestCase):
 
     def test_volume_index_detail_serializer(self):
         serializer = volumes.VolumesTemplate()
-        raw_volumes = [dict(
-                id='vol1_id',
-                status='vol1_status',
-                size=1024,
-                availability_zone='vol1_availability',
-                created_at=datetime.datetime.now(),
-                attachments=[dict(
-                        id='vol1_id',
-                        volume_id='vol1_id',
-                        server_id='instance_uuid',
-                        device='/foo1')],
-                display_name='vol1_name',
-                display_description='vol1_desc',
-                volume_type='vol1_type',
-                snapshot_id='snap1_id',
-                metadata=dict(
-                    foo='vol1_foo',
-                    bar='vol1_bar',
-                    ),
-                ),
-                       dict(
-                id='vol2_id',
-                status='vol2_status',
-                size=1024,
-                availability_zone='vol2_availability',
-                created_at=datetime.datetime.now(),
-                attachments=[dict(
-                        id='vol2_id',
-                        volume_id='vol2_id',
-                        server_id='instance_uuid',
-                        device='/foo2')],
-                display_name='vol2_name',
-                display_description='vol2_desc',
-                volume_type='vol2_type',
-                snapshot_id='snap2_id',
-                metadata=dict(
-                    foo='vol2_foo',
-                    bar='vol2_bar',
-                    ),
-                )]
+        raw_volumes = [dict(id='vol1_id',
+                            status='vol1_status',
+                            size=1024,
+                            availability_zone='vol1_availability',
+                            created_at=datetime.datetime.now(),
+                            attachments=[dict(id='vol1_id',
+                                              volume_id='vol1_id',
+                                              server_id='instance_uuid',
+                                              device='/foo1')],
+                            display_name='vol1_name',
+                            display_description='vol1_desc',
+                            volume_type='vol1_type',
+                            snapshot_id='snap1_id',
+                            metadata=dict(foo='vol1_foo',
+                                          bar='vol1_bar', ), ),
+                       dict(id='vol2_id',
+                            status='vol2_status',
+                            size=1024,
+                            availability_zone='vol2_availability',
+                            created_at=datetime.datetime.now(),
+                            attachments=[dict(id='vol2_id',
+                                              volume_id='vol2_id',
+                                              server_id='instance_uuid',
+                                              device='/foo2')],
+                            display_name='vol2_name',
+                            display_description='vol2_desc',
+                            volume_type='vol2_type',
+                            snapshot_id='snap2_id',
+                            metadata=dict(foo='vol2_foo',
+                                          bar='vol2_bar', ), )]
         text = serializer.serialize(dict(volumes=raw_volumes))
 
         print text
@@ -637,11 +621,7 @@ class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
 <volume xmlns="http://docs.openstack.org/compute/api/v1.1"
         size="1"></volume>"""
         request = self.deserializer.deserialize(self_request)
-        expected = {
-            "volume": {
-                    "size": "1",
-            },
-        }
+        expected = {"volume": {"size": "1", }, }
         self.assertEquals(request['body'], expected)
 
     def test_display_name(self):
index cde90226ce0cd7dccd8706ebee5f421bed01c68f..bfbff2249e8f399ea0b8021161d4fec01c58a5c7 100644 (file)
@@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
                     },
 
                 ],
-                "absolute": {
-                    "maxTotalVolumeGigabytes": 512,
-                    "maxTotalVolumes": 5,
-                    },
+                "absolute": {"maxTotalVolumeGigabytes": 512,
+                             "maxTotalVolumes": 5, },
             },
         }
         body = jsonutils.loads(response.body)
@@ -590,7 +588,6 @@ class WsgiLimiterTest(BaseLimitTestSuite):
 
     def test_invalid_methods(self):
         """Only POSTs should work."""
-        requests = []
         for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
             request = webob.Request.blank("/", method=method)
             response = request.get_response(self.app)
@@ -776,44 +773,26 @@ class LimitsViewBuilderTest(test.TestCase):
                                 "injected_file_content_bytes": 5}
 
     def test_build_limits(self):
+        tdate = "2011-07-21T18:17:06Z"
         expected_limits = {
-            "limits": {
-                "rate": [
-                    {
-                        "uri": "*",
-                        "regex": ".*",
-                        "limit": [
-                            {
-                                "value": 10,
-                                 "verb": "POST",
-                                 "remaining": 2,
-                                 "unit": "MINUTE",
-                                 "next-available": "2011-07-21T18:17:06Z"
-                            }
-                        ]
-                    },
-                    {
-                        "uri": "*/volumes",
-                        "regex": "^/volumes",
-                        "limit": [
-                            {
-                                "value": 50,
-                                "verb": "POST",
-                                "remaining": 10,
-                                "unit": "DAY",
-                                "next-available": "2011-07-21T18:17:06Z"
-                            }
-                        ]
-                    }
-                ],
-                "absolute": {
-                    "maxServerMeta": 1,
-                    "maxImageMeta": 1,
-                    "maxPersonality": 5,
-                    "maxPersonalitySize": 5
-                }
-            }
-        }
+            "limits": {"rate": [{"uri": "*",
+                                 "regex": ".*",
+                                 "limit": [{"value": 10,
+                                            "verb": "POST",
+                                            "remaining": 2,
+                                            "unit": "MINUTE",
+                                            "next-available": tdate}]},
+                                {"uri": "*/volumes",
+                                 "regex": "^/volumes",
+                                 "limit": [{"value": 50,
+                                            "verb": "POST",
+                                            "remaining": 10,
+                                            "unit": "DAY",
+                                            "next-available": tdate}]}],
+                       "absolute": {"maxServerMeta": 1,
+                                    "maxImageMeta": 1,
+                                    "maxPersonality": 5,
+                                    "maxPersonalitySize": 5}}}
 
         output = self.view_builder.build(self.rate_limits,
                                          self.absolute_limits)
@@ -842,30 +821,26 @@ class LimitsXMLSerializationTest(test.TestCase):
         self.assertTrue(has_dec)
 
     def test_index(self):
+        tdate = "2011-12-15T22:42:45Z"
         serializer = limits.LimitsTemplate()
-        fixture = {
-            "limits": {
-                   "rate": [{
-                         "uri": "*",
-                         "regex": ".*",
-                         "limit": [{
-                              "value": 10,
-                              "verb": "POST",
-                              "remaining": 2,
-                              "unit": "MINUTE",
-                              "next-available": "2011-12-15T22:42:45Z"}]},
-                          {"uri": "*/servers",
-                           "regex": "^/servers",
-                           "limit": [{
-                              "value": 50,
-                              "verb": "POST",
-                              "remaining": 10,
-                              "unit": "DAY",
-                              "next-available": "2011-12-15T22:42:45Z"}]}],
-                    "absolute": {"maxServerMeta": 1,
-                                 "maxImageMeta": 1,
-                                 "maxPersonality": 5,
-                                 "maxPersonalitySize": 10240}}}
+        fixture = {"limits": {"rate": [{"uri": "*",
+                                        "regex": ".*",
+                                        "limit": [{"value": 10,
+                                                   "verb": "POST",
+                                                   "remaining": 2,
+                                                   "unit": "MINUTE",
+                                                   "next-available": tdate}]},
+                                       {"uri": "*/servers",
+                                        "regex": "^/servers",
+                                        "limit": [{"value": 50,
+                                                   "verb": "POST",
+                                                   "remaining": 10,
+                                                   "unit": "DAY",
+                                                   "next-available": tdate}]}],
+                              "absolute": {"maxServerMeta": 1,
+                                           "maxImageMeta": 1,
+                                           "maxPersonality": 5,
+                                           "maxPersonalitySize": 10240}}}
 
         output = serializer.serialize(fixture)
         root = etree.XML(output)
@@ -891,8 +866,9 @@ class LimitsXMLSerializationTest(test.TestCase):
             for j, limit in enumerate(rate_limits):
                 for key in ['verb', 'value', 'remaining', 'unit',
                             'next-available']:
-                    self.assertEqual(limit.get(key),
-                         str(fixture['limits']['rate'][i]['limit'][j][key]))
+                    self.assertEqual(
+                        limit.get(key),
+                        str(fixture['limits']['rate'][i]['limit'][j][key]))
 
     def test_index_no_limits(self):
         serializer = limits.LimitsTemplate()
index 5d72b1a277684a48a2fbec63c39a5b28b4f8f4cb..b4539ebda30334050c64fc1c69fb4f7139b30bc1 100644 (file)
@@ -614,33 +614,24 @@ class VolumeSerializerTest(test.TestCase):
                 display_description='vol1_desc',
                 volume_type='vol1_type',
                 snapshot_id='snap1_id',
-                metadata=dict(
-                            foo='vol1_foo',
-                            bar='vol1_bar',
-                ),
-            ),
+                metadata=dict(foo='vol1_foo',
+                              bar='vol1_bar', ), ),
             dict(
                 id='vol2_id',
                 status='vol2_status',
                 size=1024,
                 availability_zone='vol2_availability',
                 created_at=datetime.datetime.now(),
-                attachments=[
-                    dict(
-                        id='vol2_id',
-                        volume_id='vol2_id',
-                        server_id='instance_uuid',
-                        device='/foo2')],
+                attachments=[dict(id='vol2_id',
+                                  volume_id='vol2_id',
+                                  server_id='instance_uuid',
+                                  device='/foo2')],
                 display_name='vol2_name',
                 display_description='vol2_desc',
                 volume_type='vol2_type',
                 snapshot_id='snap2_id',
-                metadata=dict(
-                            foo='vol2_foo',
-                            bar='vol2_bar',
-                ),
-            )
-        ]
+                metadata=dict(foo='vol2_foo',
+                              bar='vol2_bar', ), )]
         text = serializer.serialize(dict(volumes=raw_volumes))
 
         print text
index 99056ea8884454fbe6b5eb175c2da71136325cd0..c831fdba2c5f593f68d444e4a1f6c67effd1cca7 100644 (file)
@@ -16,7 +16,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
 
 from cinder import db
 
index b6b5eb5bb0115c422366a76bd48f41a178170f09..e8feb2433d718f116fdee346b5c3e877b27ef31f 100644 (file)
@@ -16,7 +16,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-"""Implementation of a fake image service"""
+"""Implementation of a fake image service."""
 
 import copy
 import datetime
@@ -44,101 +44,101 @@ class _FakeImageService(object):
         timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
 
         image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
-                 'name': 'fakeimage123456',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': False,
-                 'container_format': 'raw',
-                 'disk_format': 'raw',
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel',
-                                'architecture': 'x86_64'}}
+                  'name': 'fakeimage123456',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': False,
+                  'container_format': 'raw',
+                  'disk_format': 'raw',
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel',
+                                 'architecture': 'x86_64'}}
 
         image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
-                 'name': 'fakeimage123456',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': True,
-                 'container_format': 'ami',
-                 'disk_format': 'ami',
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel'}}
+                  'name': 'fakeimage123456',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': True,
+                  'container_format': 'ami',
+                  'disk_format': 'ami',
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel'}}
 
         image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
-                 'name': 'fakeimage123456',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': True,
-                 'container_format': None,
-                 'disk_format': None,
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel'}}
+                  'name': 'fakeimage123456',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': True,
+                  'container_format': None,
+                  'disk_format': None,
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel'}}
 
         image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
-                 'name': 'fakeimage123456',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': True,
-                 'container_format': 'ami',
-                 'disk_format': 'ami',
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel'}}
+                  'name': 'fakeimage123456',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': True,
+                  'container_format': 'ami',
+                  'disk_format': 'ami',
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel'}}
 
         image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
-                 'name': 'fakeimage123456',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': True,
-                 'container_format': 'ami',
-                 'disk_format': 'ami',
-                 'properties': {'kernel_id':
-                                    '155d900f-4e14-4e4c-a73d-069cbf4541e6',
-                                'ramdisk_id': None}}
+                  'name': 'fakeimage123456',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': True,
+                  'container_format': 'ami',
+                  'disk_format': 'ami',
+                  'properties': {
+                      'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+                      'ramdisk_id': None}}
 
         image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
-                 'name': 'fakeimage6',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': False,
-                 'container_format': 'ova',
-                 'disk_format': 'vhd',
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel',
-                                'architecture': 'x86_64',
-                                'auto_disk_config': 'False'}}
+                  'name': 'fakeimage6',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': False,
+                  'container_format': 'ova',
+                  'disk_format': 'vhd',
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel',
+                                 'architecture': 'x86_64',
+                                 'auto_disk_config': 'False'}}
 
         image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
-                 'name': 'fakeimage7',
-                 'created_at': timestamp,
-                 'updated_at': timestamp,
-                 'deleted_at': None,
-                 'deleted': False,
-                 'status': 'active',
-                 'is_public': False,
-                 'container_format': 'ova',
-                 'disk_format': 'vhd',
-                 'properties': {'kernel_id': 'nokernel',
-                                'ramdisk_id': 'nokernel',
-                                'architecture': 'x86_64',
-                                'auto_disk_config': 'True'}}
+                  'name': 'fakeimage7',
+                  'created_at': timestamp,
+                  'updated_at': timestamp,
+                  'deleted_at': None,
+                  'deleted': False,
+                  'status': 'active',
+                  'is_public': False,
+                  'container_format': 'ova',
+                  'disk_format': 'vhd',
+                  'properties': {'kernel_id': 'nokernel',
+                                 'ramdisk_id': 'nokernel',
+                                 'architecture': 'x86_64',
+                                 'auto_disk_config': 'True'}}
 
         self.create(None, image1)
         self.create(None, image2)
index b61a1a8180ad34cd1f6350ef3031e0cc323e28a5..d0402c8bed163f2e622c35a3088426c4406335db 100644 (file)
@@ -31,7 +31,7 @@ from cinder.tests.glance import stubs as glance_stubs
 
 
 class NullWriter(object):
-    """Used to test ImageService.get which takes a writer object"""
+    """Used to test ImageService.get which takes a writer object."""
 
     def write(self, *arg, **kwargs):
         pass
@@ -109,11 +109,11 @@ class TestGlanceImageService(test.TestCase):
         def _fake_create_glance_client(context, host, port, version):
             return client
 
-        self.stubs.Set(glance, '_create_glance_client',
-                _fake_create_glance_client)
+        self.stubs.Set(glance,
+                       '_create_glance_client',
+                       _fake_create_glance_client)
 
-        client_wrapper = glance.GlanceClientWrapper(
-                'fake', 'fake_host', 9292)
+        client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
         return glance.GlanceImageService(client=client_wrapper)
 
     @staticmethod
@@ -131,7 +131,7 @@ class TestGlanceImageService(test.TestCase):
                                   deleted_at=self.NOW_GLANCE_FORMAT)
 
     def test_create_with_instance_id(self):
-        """Ensure instance_id is persisted as an image-property"""
+        """Ensure instance_id is persisted as an image-property."""
         fixture = {'name': 'test image',
                    'is_public': False,
                    'properties': {'instance_id': '42', 'user_id': 'fake'}}
@@ -458,7 +458,10 @@ class TestGlanceImageService(test.TestCase):
         # When retries are disabled, we should get an exception
         self.flags(glance_num_retries=0)
         self.assertRaises(exception.GlanceConnectionFailed,
-                service.download, self.context, image_id, writer)
+                          service.download,
+                          self.context,
+                          image_id,
+                          writer)
 
         # Now lets enable retries. No exception should happen now.
         tries = [0]
@@ -520,19 +523,19 @@ class TestGlanceImageService(test.TestCase):
     def test_glance_client_image_id(self):
         fixture = self._make_fixture(name='test image')
         image_id = self.service.create(self.context, fixture)['id']
-        (service, same_id) = glance.get_remote_image_service(
-                self.context, image_id)
+        (service, same_id) = glance.get_remote_image_service(self.context,
+                                                             image_id)
         self.assertEquals(same_id, image_id)
 
     def test_glance_client_image_ref(self):
         fixture = self._make_fixture(name='test image')
         image_id = self.service.create(self.context, fixture)['id']
         image_url = 'http://something-less-likely/%s' % image_id
-        (service, same_id) = glance.get_remote_image_service(
-                self.context, image_url)
+        (service, same_id) = glance.get_remote_image_service(self.context,
+                                                             image_url)
         self.assertEquals(same_id, image_id)
         self.assertEquals(service._client.host,
-                'something-less-likely')
+                          'something-less-likely')
 
 
 def _create_failing_glance_client(info):
index 793101fd914da5be08508297f04524a130f635b5..2df9ba912638dadd7d7eb0e0ded5218b1b0027c4 100644 (file)
@@ -53,7 +53,7 @@ class OpenStackApiAuthorizationException(OpenStackApiException):
         if not message:
             message = _("Authorization error")
         super(OpenStackApiAuthorizationException, self).__init__(message,
-                                                                  response)
+                                                                 response)
 
 
 class OpenStackApiNotFoundException(OpenStackApiException):
@@ -157,8 +157,8 @@ class TestOpenStackClient(object):
                     raise OpenStackApiAuthorizationException(response=response)
                 else:
                     raise OpenStackApiException(
-                                        message=_("Unexpected status code"),
-                                        response=response)
+                        message=_("Unexpected status code"),
+                        response=response)
 
         return response
 
index 25cf9ccfe68c0a6146b10b7942e89f95fbef54a8..95f58e751179dbdc3392626ef736821187b13cda 100644 (file)
@@ -21,7 +21,7 @@ CALLED_FUNCTION = []
 
 
 def example_decorator(name, function):
-    """ decorator for notify which is used from utils.monkey_patch()
+    """decorator for notify which is used from utils.monkey_patch().
 
         :param name: name of the function
         :param function: - object of the function
index bf9e8abdfd134b72c7100f8e55fb28462371767b..52fbb18aa503b092054f428c5e85211e618d0331 100644 (file)
@@ -64,13 +64,18 @@ class SchedulerRpcAPITestCase(test.TestCase):
 
     def test_update_service_capabilities(self):
         self._test_scheduler_api('update_service_capabilities',
-                rpc_method='fanout_cast', service_name='fake_name',
-                host='fake_host', capabilities='fake_capabilities')
+                                 rpc_method='fanout_cast',
+                                 service_name='fake_name',
+                                 host='fake_host',
+                                 capabilities='fake_capabilities')
 
     def test_create_volume(self):
         self._test_scheduler_api('create_volume',
-                rpc_method='cast', topic='topic', volume_id='volume_id',
-                snapshot_id='snapshot_id', image_id='image_id',
-                request_spec='fake_request_spec',
-                filter_properties='filter_properties',
-                version='1.2')
+                                 rpc_method='cast',
+                                 topic='topic',
+                                 volume_id='volume_id',
+                                 snapshot_id='snapshot_id',
+                                 image_id='image_id',
+                                 request_spec='fake_request_spec',
+                                 filter_properties='filter_properties',
+                                 version='1.2')
index 97fe9b8fdf75fb75605aa6a5410e944c608110af..5a954ad016fbb6ed831b266637ad114313a95b02 100644 (file)
@@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
 
 
 class SchedulerManagerTestCase(test.TestCase):
-    """Test case for scheduler manager"""
+    """Test case for scheduler manager."""
 
     manager_cls = manager.SchedulerManager
     driver_cls = driver.Scheduler
@@ -63,29 +63,34 @@ class SchedulerManagerTestCase(test.TestCase):
         host = 'fake_host'
 
         self.mox.StubOutWithMock(self.manager.driver,
-                'update_service_capabilities')
+                                 'update_service_capabilities')
 
         # Test no capabilities passes empty dictionary
         self.manager.driver.update_service_capabilities(service_name,
-                host, {})
+                                                        host, {})
         self.mox.ReplayAll()
-        result = self.manager.update_service_capabilities(self.context,
-                service_name=service_name, host=host)
+        result = self.manager.update_service_capabilities(
+            self.context,
+            service_name=service_name,
+            host=host)
         self.mox.VerifyAll()
 
         self.mox.ResetAll()
         # Test capabilities passes correctly
         capabilities = {'fake_capability': 'fake_value'}
-        self.manager.driver.update_service_capabilities(
-                service_name, host, capabilities)
+        self.manager.driver.update_service_capabilities(service_name,
+                                                        host,
+                                                        capabilities)
         self.mox.ReplayAll()
-        result = self.manager.update_service_capabilities(self.context,
-                service_name=service_name, host=host,
-                capabilities=capabilities)
+        result = self.manager.update_service_capabilities(
+            self.context,
+            service_name=service_name, host=host,
+            capabilities=capabilities)
 
     def test_create_volume_exception_puts_volume_in_error_state(self):
-        """ Test that a NoValideHost exception for create_volume puts
-        the volume in 'error' state and eats the exception.
+        """Test that a NoValideHost exception for create_volume.
+
+        Puts the volume in 'error' state and eats the exception.
         """
         fake_volume_id = 1
         self._mox_schedule_method_helper('schedule_create_volume')
@@ -95,7 +100,8 @@ class SchedulerManagerTestCase(test.TestCase):
         volume_id = fake_volume_id
         request_spec = {'volume_id': fake_volume_id}
 
-        self.manager.driver.schedule_create_volume(self.context,
+        self.manager.driver.schedule_create_volume(
+            self.context,
             request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
         db.volume_update(self.context, fake_volume_id, {'status': 'error'})
 
@@ -112,11 +118,11 @@ class SchedulerManagerTestCase(test.TestCase):
         setattr(self.manager.driver, method_name, stub_method)
 
         self.mox.StubOutWithMock(self.manager.driver,
-                method_name)
+                                 method_name)
 
 
 class SchedulerTestCase(test.TestCase):
-    """Test case for base scheduler driver class"""
+    """Test case for base scheduler driver class."""
 
     # So we can subclass this test and re-use tests if we need.
     driver_cls = driver.Scheduler
@@ -132,14 +138,16 @@ class SchedulerTestCase(test.TestCase):
         host = 'fake_host'
 
         self.mox.StubOutWithMock(self.driver.host_manager,
-                'update_service_capabilities')
+                                 'update_service_capabilities')
 
         capabilities = {'fake_capability': 'fake_value'}
-        self.driver.host_manager.update_service_capabilities(
-                service_name, host, capabilities)
+        self.driver.host_manager.update_service_capabilities(service_name,
+                                                             host,
+                                                             capabilities)
         self.mox.ReplayAll()
         result = self.driver.update_service_capabilities(service_name,
-                host, capabilities)
+                                                         host,
+                                                         capabilities)
 
     def test_hosts_up(self):
         service1 = {'host': 'host1'}
@@ -150,7 +158,7 @@ class SchedulerTestCase(test.TestCase):
         self.mox.StubOutWithMock(utils, 'service_is_up')
 
         db.service_get_all_by_topic(self.context,
-                self.topic).AndReturn(services)
+                                    self.topic).AndReturn(services)
         utils.service_is_up(service1).AndReturn(False)
         utils.service_is_up(service2).AndReturn(True)
 
@@ -168,12 +176,12 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
         fake_kwargs = {'cat': 'meow'}
 
         self.assertRaises(NotImplementedError, self.driver.schedule,
-                         self.context, self.topic, 'schedule_something',
-                         *fake_args, **fake_kwargs)
+                          self.context, self.topic, 'schedule_something',
+                          *fake_args, **fake_kwargs)
 
 
 class SchedulerDriverModuleTestCase(test.TestCase):
-    """Test case for scheduler driver module methods"""
+    """Test case for scheduler driver module methods."""
 
     def setUp(self):
         super(SchedulerDriverModuleTestCase, self).setUp()
@@ -185,7 +193,8 @@ class SchedulerDriverModuleTestCase(test.TestCase):
 
         timeutils.utcnow().AndReturn('fake-now')
         db.volume_update(self.context, 31337,
-                {'host': 'fake_host', 'scheduled_at': 'fake-now'})
+                         {'host': 'fake_host',
+                          'scheduled_at': 'fake-now'})
 
         self.mox.ReplayAll()
         driver.volume_update_db(self.context, 31337, 'fake_host')
index 304222ed8ac79ca27465889e83574e4f82e3a814..79b2254db5d954b036387d0f1fd02582f3fe5ceb 100644 (file)
@@ -32,11 +32,12 @@ class HpSanISCSITestCase(test.TestCase):
         self.connector = {'ip': '10.0.0.2',
                           'initiator': 'iqn.1993-08.org.debian:01:222',
                           'host': 'fakehost'}
-        self.properties = {'target_discoverd': True,
-                           'target_portal': '10.0.1.6:3260',
-                           'target_iqn':
-                        'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
-                           'volume_id': 1}
+        self.properties = {
+            'target_discoverd': True,
+            'target_portal': '10.0.1.6:3260',
+            'target_iqn':
+            'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
+            'volume_id': 1}
 
     def tearDown(self):
         super(HpSanISCSITestCase, self).tearDown()
index 8a158c706395fc6f5c85be905850393eda4b4c83..4397b5b4e5136c7b592bd2f16577dfa167b9bc6a 100644 (file)
@@ -16,7 +16,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-"""Unit tests for the API endpoint"""
+"""Unit tests for the API endpoint."""
 
 import httplib
 import StringIO
@@ -25,18 +25,18 @@ import webob
 
 
 class FakeHttplibSocket(object):
-    """a fake socket implementation for httplib.HTTPResponse, trivial"""
+    """A fake socket implementation for httplib.HTTPResponse, trivial."""
     def __init__(self, response_string):
         self.response_string = response_string
         self._buffer = StringIO.StringIO(response_string)
 
     def makefile(self, _mode, _other):
-        """Returns the socket's internal buffer"""
+        """Returns the socket's internal buffer."""
         return self._buffer
 
 
 class FakeHttplibConnection(object):
-    """A fake httplib.HTTPConnection for boto to use
+    """A fake httplib.HTTPConnection for boto.
 
     requests made via this connection actually get translated and routed into
     our WSGI app, we then wait for the response and turn it back into
@@ -71,5 +71,5 @@ class FakeHttplibConnection(object):
         return self.sock.response_string
 
     def close(self):
-        """Required for compatibility with boto/tornado"""
+        """Required for compatibility with boto/tornado."""
         pass
index 0819a556ff4937c99982eafcaf9e3d3bb8b1ca9f..cf145510587db15b66ecf1e58e84ef1ea1ef1d03 100644 (file)
@@ -31,15 +31,14 @@ class RootwrapTestCase(test.TestCase):
             filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
             filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
             filters.CommandFilter("/nonexistent/cat", "root"),
-            filters.CommandFilter("/bin/cat", "root")  # Keep this one last
-            ]
+            filters.CommandFilter("/bin/cat", "root")]  # Keep this one last
 
     def test_RegExpFilter_match(self):
         usercmd = ["ls", "/root"]
         filtermatch = wrapper.match_filter(self.filters, usercmd)
         self.assertFalse(filtermatch is None)
         self.assertEqual(filtermatch.get_command(usercmd),
-            ["/bin/ls", "/root"])
+                         ["/bin/ls", "/root"])
 
     def test_RegExpFilter_reject(self):
         usercmd = ["ls", "root"]
@@ -92,7 +91,7 @@ class RootwrapTestCase(test.TestCase):
         self.assertTrue(f.match(usercmd) or f2.match(usercmd))
 
     def test_KillFilter_no_raise(self):
-        """Makes sure ValueError from bug 926412 is gone"""
+        """Makes sure ValueError from bug 926412 is gone."""
         f = filters.KillFilter("root", "")
         # Providing anything other than kill should be False
         usercmd = ['notkill', 999999]
@@ -102,7 +101,7 @@ class RootwrapTestCase(test.TestCase):
         self.assertFalse(f.match(usercmd))
 
     def test_KillFilter_deleted_exe(self):
-        """Makes sure deleted exe's are killed correctly"""
+        """Makes sure deleted exe's are killed correctly."""
         # See bug #967931.
         def fake_readlink(blah):
             return '/bin/commandddddd (deleted)'
index afa78cda6a43e14ed615b8941be2cb614bc0836b..886ca28825077917e29fc1a23f0e78e3dcb735d7 100644 (file)
@@ -63,8 +63,10 @@ class ContextTestCase(test.TestCase):
 
         self.stubs.Set(context.LOG, 'warn', fake_warn)
 
-        c = context.RequestContext('user', 'project',
-                extra_arg1='meow', extra_arg2='wuff')
+        c = context.RequestContext('user',
+                                   'project',
+                                   extra_arg1='meow',
+                                   extra_arg2='wuff')
         self.assertTrue(c)
         self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
         self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
index afb1eab17c67fb86f1df3d005c8b7c42ee8ae78b..8cdabcc63311c174594cbda2f00d78f137a4b75d 100644 (file)
@@ -62,8 +62,8 @@ class FlagsTestCase(test.TestCase):
     def test_long_vs_short_flags(self):
         FLAGS.clear()
         FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
-                                               default='val',
-                                               help='desc'))
+                                          default='val',
+                                          help='desc'))
         argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
         args = flags.parse_args(argv, default_config_files=[])
 
@@ -72,8 +72,8 @@ class FlagsTestCase(test.TestCase):
 
         FLAGS.clear()
         FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
-                                               default=60,
-                                               help='desc'))
+                                          default=60,
+                                          help='desc'))
         args = flags.parse_args(argv, default_config_files=[])
         self.assertEqual(FLAGS.duplicate_answer, 60)
         self.assertEqual(FLAGS.duplicate_answer_long, 'val')
index 38c760835b095f67fd272b083973803e04672c72..eafc5301ac8162918cf978151c093f383c65fdbd 100644 (file)
@@ -76,7 +76,7 @@ class TargetAdminTestCase(object):
         tgtadm = iscsi.get_target_admin()
         tgtadm.set_execute(self.fake_execute)
         tgtadm.create_iscsi_target(self.target_name, self.tid,
-                self.lun, self.path)
+                                   self.lun, self.path)
         tgtadm.show_target(self.tid, iqn=self.target_name)
         tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
 
@@ -95,8 +95,8 @@ class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
         self.flags(iscsi_helper='tgtadm')
         self.flags(volumes_dir=self.persist_tempdir)
         self.script_template = "\n".join([
-        'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
-        'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
+            'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
+            'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
 
     def tearDown(self):
         try:
@@ -113,9 +113,9 @@ class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
         TargetAdminTestCase.setUp(self)
         self.flags(iscsi_helper='ietadm')
         self.script_template = "\n".join([
-        'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
-        'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
-                '--params Path=%(path)s,Type=fileio',
-        'ietadm --op show --tid=%(tid)s',
-        'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
-        'ietadm --op delete --tid=%(tid)s'])
+            'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
+            'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
+            '--params Path=%(path)s,Type=fileio',
+            'ietadm --op show --tid=%(tid)s',
+            'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
+            'ietadm --op delete --tid=%(tid)s'])
index 5c160f799e1a39a54e84e3ba72305ae52458dddf..9f23a142665f251b33876dcd322aa667d3266584 100644 (file)
@@ -76,7 +76,7 @@ def _have_mysql():
 
 
 class TestMigrations(test.TestCase):
-    """Test sqlalchemy-migrate migrations"""
+    """Test sqlalchemy-migrate migrations."""
 
     TEST_DATABASES = {}
     DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
@@ -87,7 +87,7 @@ class TestMigrations(test.TestCase):
                                       DEFAULT_CONFIG_FILE)
     MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
     REPOSITORY = repository.Repository(
-                                os.path.abspath(os.path.dirname(MIGRATE_FILE)))
+        os.path.abspath(os.path.dirname(MIGRATE_FILE)))
 
     def setUp(self):
         super(TestMigrations, self).setUp()
@@ -256,11 +256,12 @@ class TestMigrations(test.TestCase):
         # upgrades successfully.
 
         # Place the database under version control
-        migration_api.version_control(engine, TestMigrations.REPOSITORY,
-                                     migration.INIT_VERSION)
+        migration_api.version_control(engine,
+                                      TestMigrations.REPOSITORY,
+                                      migration.INIT_VERSION)
         self.assertEqual(migration.INIT_VERSION,
-                migration_api.db_version(engine,
-                                         TestMigrations.REPOSITORY))
+                         migration_api.db_version(engine,
+                                                  TestMigrations.REPOSITORY))
 
         migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                               migration.INIT_VERSION + 1)
@@ -268,7 +269,7 @@ class TestMigrations(test.TestCase):
         LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
 
         for version in xrange(migration.INIT_VERSION + 2,
-                               TestMigrations.REPOSITORY.latest + 1):
+                              TestMigrations.REPOSITORY.latest + 1):
             # upgrade -> downgrade -> upgrade
             self._migrate_up(engine, version)
             if snake_walk:
@@ -300,5 +301,5 @@ class TestMigrations(test.TestCase):
                               TestMigrations.REPOSITORY,
                               version)
         self.assertEqual(version,
-                migration_api.db_version(engine,
-                                         TestMigrations.REPOSITORY))
+                         migration_api.db_version(engine,
+                                                  TestMigrations.REPOSITORY))
index 753c72fcc629eed0552a9abf1d6293227453b064..93aba26053a2e0fac464b93227fd847c86e65971 100644 (file)
@@ -578,21 +578,21 @@ RESPONSE_PREFIX = """<?xml version="1.0" encoding="UTF-8"?>
 RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
 
 APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
-    'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
-    'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
-    'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
-    'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
-    'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
-    'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
-    'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
-    'StorageServiceDatasetProvision']
+        'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
+        'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
+        'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
+        'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
+        'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
+        'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
+        'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
+        'StorageServiceDatasetProvision']
 
 iter_count = 0
 iter_table = {}
 
 
 class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
-    """HTTP handler that fakes enough stuff to allow the driver to run"""
+    """HTTP handler that fakes enough stuff to allow the driver to run."""
 
     def do_GET(s):
         """Respond to a GET request."""
@@ -622,7 +622,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
         out.write('</portType>')
         out.write('<binding name="DfmBinding" type="na:DfmInterface">')
         out.write('<soap:binding style="document" ' +
-            'transport="http://schemas.xmlsoap.org/soap/http"/>')
+                  'transport="http://schemas.xmlsoap.org/soap/http"/>')
         for api in APIS:
             out.write('<operation name="%s">' % api)
             out.write('<soap:operation soapAction="urn:%s"/>' % api)
@@ -641,7 +641,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
         request_xml = s.rfile.read(int(s.headers['Content-Length']))
         ntap_ns = 'http://www.netapp.com/management/v1'
         nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
-            'na': ntap_ns}
+                 'na': ntap_ns}
         root = etree.fromstring(request_xml)
 
         body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
@@ -977,7 +977,7 @@ class NetAppDriverTestCase(test.TestCase):
         self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
                                self.VOLUME_TYPE, self.VOLUME_SIZE)
         volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
-            'id': 0, 'provider_auth': None}
+                  'id': 0, 'provider_auth': None}
         updates = self.driver._get_export(volume)
         self.assertTrue(updates['provider_location'])
         volume['provider_location'] = updates['provider_location']
@@ -1193,7 +1193,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
         out.write('<binding name="CloudStorageBinding" '
                   'type="na:CloudStorage">')
         out.write('<soap:binding style="document" ' +
-            'transport="http://schemas.xmlsoap.org/soap/http"/>')
+                  'transport="http://schemas.xmlsoap.org/soap/http"/>')
         for api in CMODE_APIS:
             out.write('<operation name="%s">' % api)
             out.write('<soap:operation soapAction=""/>')
@@ -1212,7 +1212,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
         request_xml = s.rfile.read(int(s.headers['Content-Length']))
         ntap_ns = 'http://cloud.netapp.com/'
         nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
-            'na': ntap_ns}
+                 'na': ntap_ns}
         root = etree.fromstring(request_xml)
 
         body = root.xpath('/soapenv:Envelope/soapenv:Body',
@@ -1322,24 +1322,18 @@ class FakeCmodeHTTPConnection(object):
 
 class NetAppCmodeISCSIDriverTestCase(test.TestCase):
     """Test case for NetAppISCSIDriver"""
-    volume = {
-            'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
-            'os_type': 'linux', 'provider_location': 'lun1',
-            'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
-            'display_name': None, 'display_description': 'lun1',
-            'volume_type_id': None
-            }
-    snapshot = {
-            'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
-            'volume_size': 1, 'project_id': 'project'
-            }
-    volume_sec = {
-            'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
-            'os_type': 'linux', 'provider_location': 'lun1',
-            'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
-            'display_name': None, 'display_description': 'lun1',
-            'volume_type_id': None
-            }
+    volume = {'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
+              'os_type': 'linux', 'provider_location': 'lun1',
+              'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+              'display_name': None, 'display_description': 'lun1',
+              'volume_type_id': None}
+    snapshot = {'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
+                'volume_size': 1, 'project_id': 'project'}
+    volume_sec = {'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
+                  'os_type': 'linux', 'provider_location': 'lun1',
+                  'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
+                  'display_name': None, 'display_description': 'lun1',
+                  'volume_type_id': None}
 
     def setUp(self):
         super(NetAppCmodeISCSIDriverTestCase, self).setUp()
@@ -1371,7 +1365,7 @@ class NetAppCmodeISCSIDriverTestCase(test.TestCase):
         self.volume['provider_location'] = updates['provider_location']
         connector = {'initiator': 'init1'}
         connection_info = self.driver.initialize_connection(self.volume,
-                                                             connector)
+                                                            connector)
         self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
         properties = connection_info['data']
         self.driver.terminate_connection(self.volume, connector)
index c794be792e96c234963994357a3d31cb1f423294..47c8c80c9a95add27f6276ddc66aaade243bce70 100644 (file)
@@ -14,7 +14,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
+"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)."""
 
 from cinder import context
 from cinder import exception
@@ -67,7 +67,7 @@ class FakeResponce(object):
 
 
 class NetappNfsDriverTestCase(test.TestCase):
-    """Test case for NetApp specific NFS clone driver"""
+    """Test case for NetApp specific NFS clone driver."""
 
     def setUp(self):
         self._driver = netapp_nfs.NetAppNFSDriver()
@@ -79,13 +79,11 @@ class NetappNfsDriverTestCase(test.TestCase):
     def test_check_for_setup_error(self):
         mox = self._mox
         drv = self._driver
-        required_flags = [
-                'netapp_wsdl_url',
-                'netapp_login',
-                'netapp_password',
-                'netapp_server_hostname',
-                'netapp_server_port'
-            ]
+        required_flags = ['netapp_wsdl_url',
+                          'netapp_login',
+                          'netapp_password',
+                          'netapp_server_hostname',
+                          'netapp_server_port']
 
         # check exception raises when flags are not set
         self.assertRaises(exception.CinderException,
@@ -124,7 +122,7 @@ class NetappNfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_create_snapshot(self):
-        """Test snapshot can be created and deleted"""
+        """Test snapshot can be created and deleted."""
         mox = self._mox
         drv = self._driver
 
@@ -137,7 +135,7 @@ class NetappNfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_create_volume_from_snapshot(self):
-        """Tests volume creation from snapshot"""
+        """Tests volume creation from snapshot."""
         drv = self._driver
         mox = self._mox
         volume = FakeVolume(1)
@@ -177,8 +175,8 @@ class NetappNfsDriverTestCase(test.TestCase):
             mox.StubOutWithMock(drv, '_get_volume_path')
 
         drv._get_provider_location(IgnoreArg())
-        drv._volume_not_present(IgnoreArg(), IgnoreArg())\
-                                        .AndReturn(not snapshot_exists)
+        drv._volume_not_present(IgnoreArg(),
+                                IgnoreArg()).AndReturn(not snapshot_exists)
 
         if snapshot_exists:
             drv._get_volume_path(IgnoreArg(), IgnoreArg())
index 986ff3a2a0ba5d916d4a0275ad2f1a4f831a8dab..9762f9738ccf1ea58c2f463238729b4e18b7e95d 100644 (file)
@@ -113,26 +113,21 @@ class TestNexentaDriver(cinder.test.TestCase):
         ('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
             u'Unable to create iscsi target\n'
             u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
-                                                               u' configured\n'
-            u' itadm create-target failed with error 17\n',
-        ),
+            u' configured\n'
+            u' itadm create-target failed with error 17\n', ),
         ('stmf', 'create_targetgroup', ('cinder/volume1',),
             u'Unable to create targetgroup: stmfadm: cinder/volume1:'
-                                                          u' already exists\n',
-        ),
+            u' already exists\n', ),
         ('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
             u'Unable to add member to targetgroup: stmfadm:'
-                u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n',
-        ),
+            u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', ),
         ('scsidisk', 'create_lu', ('cinder/volume1', {}),
             u"Unable to create lu with zvol 'cinder/volume1':\n"
-            u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n",
-        ),
+            u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", ),
         ('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
-                'target_group': 'cinder/volume1', 'lun': '0'}),
+            'target_group': 'cinder/volume1', 'lun': '0'}),
             u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
-            u" stmfadm: view entry exists\n",
-        ),
+            u" stmfadm: view entry exists\n", ),
     ]
 
     def _stub_export_method(self, module, method, args, error, fail=False):
@@ -150,7 +145,8 @@ class TestNexentaDriver(cinder.test.TestCase):
         self._stub_all_export_methods()
         self.mox.ReplayAll()
         retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
-        self.assertEquals(retval,
+        self.assertEquals(
+            retval,
             {'provider_location':
                 '%s:%s,1 %s%s' % (FLAGS.nexenta_host,
                                   FLAGS.nexenta_iscsi_target_portal_port,
@@ -165,7 +161,9 @@ class TestNexentaDriver(cinder.test.TestCase):
                                      fail=True)
             self.mox.ReplayAll()
             self.assertRaises(nexenta.NexentaException,
-                        self.drv.create_export, {}, self.TEST_VOLUME_REF)
+                              self.drv.create_export,
+                              {},
+                              self.TEST_VOLUME_REF)
         return _test_create_export_fail
 
     for i in range(len(_CREATE_EXPORT_METHODS)):
@@ -185,8 +183,8 @@ class TestNexentaDriver(cinder.test.TestCase):
 
     def test_remove_export_fail_0(self):
         self.nms_mock.scsidisk.delete_lu('cinder/volume1')
-        self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise(
-                                                    nexenta.NexentaException())
+        self.nms_mock.stmf.destroy_targetgroup(
+            'cinder/volume1').AndRaise(nexenta.NexentaException())
         self.nms_mock.iscsitarget.delete_target('iqn:volume1')
         self.mox.ReplayAll()
         self.drv.remove_export({}, self.TEST_VOLUME_REF)
@@ -194,8 +192,8 @@ class TestNexentaDriver(cinder.test.TestCase):
     def test_remove_export_fail_1(self):
         self.nms_mock.scsidisk.delete_lu('cinder/volume1')
         self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
-        self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
-                                                    nexenta.NexentaException())
+        self.nms_mock.iscsitarget.delete_target(
+            'iqn:volume1').AndRaise(nexenta.NexentaException())
         self.mox.ReplayAll()
         self.drv.remove_export({}, self.TEST_VOLUME_REF)
 
@@ -205,9 +203,9 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
     URL_S = 'https://example.com/'
     USER = 'user'
     PASSWORD = 'password'
-    HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
-                                                ':'.join((USER, PASSWORD))),),
-               'Content-Type': 'application/json'}
+    HEADERS = {'Authorization': 'Basic %s' % (
+        base64.b64encode(':'.join((USER, PASSWORD))),),
+        'Content-Type': 'application/json'}
     REQUEST = 'the request'
 
     def setUp(self):
@@ -222,21 +220,23 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
         urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
 
     def test_call(self):
-        urllib2.Request(self.URL,
-                '{"object": null, "params": ["arg1", "arg2"], "method": null}',
-                self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL,
+            '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+            self.HEADERS).AndReturn(self.REQUEST)
         self.resp_info_mock.status = ''
         self.resp_mock.read().AndReturn(
-                '{"error": null, "result": "the result"}')
+            '{"error": null, "result": "the result"}')
         self.mox.ReplayAll()
         result = self.proxy('arg1', 'arg2')
         self.assertEquals("the result", result)
 
     def test_call_deep(self):
-        urllib2.Request(self.URL,
-              '{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
-                                                          ' "method": "meth"}',
-              self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL,
+            '{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
+            ' "method": "meth"}',
+            self.HEADERS).AndReturn(self.REQUEST)
         self.resp_info_mock.status = ''
         self.resp_mock.read().AndReturn(
             '{"error": null, "result": "the result"}')
@@ -245,12 +245,14 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
         self.assertEquals("the result", result)
 
     def test_call_auto(self):
-        urllib2.Request(self.URL,
-                '{"object": null, "params": ["arg1", "arg2"], "method": null}',
-                self.HEADERS).AndReturn(self.REQUEST)
-        urllib2.Request(self.URL_S,
-                '{"object": null, "params": ["arg1", "arg2"], "method": null}',
-                self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL,
+            '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+            self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL_S,
+            '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+            self.HEADERS).AndReturn(self.REQUEST)
         self.resp_info_mock.status = 'EOF in headers'
         self.resp_mock.read().AndReturn(
             '{"error": null, "result": "the result"}')
@@ -260,9 +262,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
         self.assertEquals("the result", result)
 
     def test_call_error(self):
-        urllib2.Request(self.URL,
-                '{"object": null, "params": ["arg1", "arg2"], "method": null}',
-                self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL,
+            '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+            self.HEADERS).AndReturn(self.REQUEST)
         self.resp_info_mock.status = ''
         self.resp_mock.read().AndReturn(
             '{"error": {"message": "the error"}, "result": "the result"}')
@@ -271,9 +274,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
                           self.proxy, 'arg1', 'arg2')
 
     def test_call_fail(self):
-        urllib2.Request(self.URL,
-                '{"object": null, "params": ["arg1", "arg2"], "method": null}',
-                self.HEADERS).AndReturn(self.REQUEST)
+        urllib2.Request(
+            self.URL,
+            '{"object": null, "params": ["arg1", "arg2"], "method": null}',
+            self.HEADERS).AndReturn(self.REQUEST)
         self.resp_info_mock.status = 'EOF in headers'
         self.proxy.auto = False
         self.mox.ReplayAll()
index 6a731633c9e9c4a42d70a2c943e7ad775d1b15fc..c3e98c3a275467bf38f2aa42077a3c19d64eb4b5 100644 (file)
@@ -14,7 +14,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-"""Unit tests for the NFS driver module"""
+"""Unit tests for the NFS driver module."""
 
 import __builtin__
 import errno
@@ -44,7 +44,7 @@ class DumbVolume(object):
 
 
 class NfsDriverTestCase(test.TestCase):
-    """Test case for NFS driver"""
+    """Test case for NFS driver."""
 
     TEST_NFS_EXPORT1 = 'nfs-host1:/export'
     TEST_NFS_EXPORT2 = 'nfs-host2:/export'
@@ -71,7 +71,7 @@ class NfsDriverTestCase(test.TestCase):
         self.stubs.Set(obj, attr_name, stub)
 
     def test_path_exists_should_return_true(self):
-        """_path_exists should return True if stat returns 0"""
+        """_path_exists should return True if stat returns 0."""
         mox = self._mox
         drv = self._driver
 
@@ -85,14 +85,17 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_path_exists_should_return_false(self):
-        """_path_exists should return True if stat doesn't return 0"""
+        """_path_exists should return True if stat doesn't return 0."""
         mox = self._mox
         drv = self._driver
 
         mox.StubOutWithMock(drv, '_execute')
-        drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
+        drv._execute(
+            'stat',
+            self.TEST_FILE_NAME, run_as_root=True).\
             AndRaise(ProcessExecutionError(
-            stderr="stat: cannot stat `test.txt': No such file or directory"))
+                stderr="stat: cannot stat `test.txt': No such file "
+                       "or directory"))
 
         mox.ReplayAll()
 
@@ -101,7 +104,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_local_path(self):
-        """local_path common use case"""
+        """local_path common use case."""
         nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
         drv = self._driver
 
@@ -114,7 +117,7 @@ class NfsDriverTestCase(test.TestCase):
             drv.local_path(volume))
 
     def test_mount_nfs_should_mount_correctly(self):
-        """_mount_nfs common case usage"""
+        """_mount_nfs common case usage."""
         mox = self._mox
         drv = self._driver
 
@@ -144,7 +147,7 @@ class NfsDriverTestCase(test.TestCase):
         drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
                      self.TEST_MNT_POINT, run_as_root=True).\
             AndRaise(ProcessExecutionError(
-                        stderr='is busy or already mounted'))
+                     stderr='is busy or already mounted'))
 
         mox.ReplayAll()
 
@@ -162,9 +165,13 @@ class NfsDriverTestCase(test.TestCase):
         drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
 
         mox.StubOutWithMock(drv, '_execute')
-        drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
-                     self.TEST_MNT_POINT, run_as_root=True).\
-        AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
+        drv._execute(
+            'mount',
+            '-t',
+            'nfs',
+            self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, run_as_root=True).\
+            AndRaise(ProcessExecutionError(stderr='is busy or '
+                                                  'already mounted'))
 
         mox.ReplayAll()
 
@@ -175,7 +182,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
-        """_mount_nfs should create mountpoint if it doesn't exist"""
+        """_mount_nfs should create mountpoint if it doesn't exist."""
         mox = self._mox
         drv = self._driver
 
@@ -193,7 +200,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_mount_nfs_should_not_create_mountpoint_if_already(self):
-        """_mount_nfs should not create mountpoint if it already exists"""
+        """_mount_nfs should not create mountpoint if it already exists."""
         mox = self._mox
         drv = self._driver
 
@@ -210,14 +217,14 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_get_hash_str(self):
-        """_get_hash_str should calculation correct value"""
+        """_get_hash_str should calculation correct value."""
         drv = self._driver
 
         self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
                          drv._get_hash_str(self.TEST_NFS_EXPORT1))
 
     def test_get_mount_point_for_share(self):
-        """_get_mount_point_for_share should calculate correct value"""
+        """_get_mount_point_for_share should calculate correct value."""
         drv = self._driver
 
         nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
@@ -226,7 +233,7 @@ class NfsDriverTestCase(test.TestCase):
                          drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
 
     def test_get_available_capacity_with_df(self):
-        """_get_available_capacity should calculate correct value"""
+        """_get_available_capacity should calculate correct value."""
         mox = self._mox
         drv = self._driver
 
@@ -255,7 +262,7 @@ class NfsDriverTestCase(test.TestCase):
         delattr(nfs.FLAGS, 'nfs_disk_util')
 
     def test_get_available_capacity_with_du(self):
-        """_get_available_capacity should calculate correct value"""
+        """_get_available_capacity should calculate correct value."""
         mox = self._mox
         drv = self._driver
 
@@ -316,7 +323,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_ensure_share_mounted(self):
-        """_ensure_share_mounted simple use case"""
+        """_ensure_share_mounted simple use case."""
         mox = self._mox
         drv = self._driver
 
@@ -334,7 +341,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_ensure_shares_mounted_should_save_mounting_successfully(self):
-        """_ensure_shares_mounted should save share if mounted with success"""
+        """_ensure_shares_mounted should save share if mounted with success."""
         mox = self._mox
         drv = self._driver
 
@@ -353,7 +360,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
-        """_ensure_shares_mounted should not save share if failed to mount"""
+        """_ensure_shares_mounted should not save share if failed to mount."""
         mox = self._mox
         drv = self._driver
 
@@ -371,7 +378,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_setup_should_throw_error_if_shares_config_not_configured(self):
-        """do_setup should throw error if shares config is not configured """
+        """do_setup should throw error if shares config is not configured."""
         drv = self._driver
 
         nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
@@ -380,7 +387,7 @@ class NfsDriverTestCase(test.TestCase):
                           drv.do_setup, IsA(context.RequestContext))
 
     def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
-        """do_setup should throw error if nfs client is not installed """
+        """do_setup should throw error if nfs client is not installed."""
         mox = self._mox
         drv = self._driver
 
@@ -400,7 +407,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
-        """_find_share should throw error if there is no mounted shares"""
+        """_find_share should throw error if there is no mounted shares."""
         drv = self._driver
 
         drv._mounted_shares = []
@@ -409,7 +416,7 @@ class NfsDriverTestCase(test.TestCase):
                           self.TEST_SIZE_IN_GB)
 
     def test_find_share(self):
-        """_find_share simple use case"""
+        """_find_share simple use case."""
         mox = self._mox
         drv = self._driver
 
@@ -429,7 +436,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
-        """_find_share should throw error if there is no share to host vol"""
+        """_find_share should throw error if there is no share to host vol."""
         mox = self._mox
         drv = self._driver
 
@@ -499,7 +506,7 @@ class NfsDriverTestCase(test.TestCase):
         delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
 
     def test_create_volume_should_ensure_nfs_mounted(self):
-        """create_volume should ensure shares provided in config are mounted"""
+        """create_volume ensures shares provided in config are mounted."""
         mox = self._mox
         drv = self._driver
 
@@ -519,7 +526,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_create_volume_should_return_provider_location(self):
-        """create_volume should return provider_location with found share """
+        """create_volume should return provider_location with found share."""
         mox = self._mox
         drv = self._driver
 
@@ -540,7 +547,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_delete_volume(self):
-        """delete_volume simple test case"""
+        """delete_volume simple test case."""
         mox = self._mox
         drv = self._driver
 
@@ -566,7 +573,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_delete_should_ensure_share_mounted(self):
-        """delete_volume should ensure that corresponding share is mounted"""
+        """delete_volume should ensure that corresponding share is mounted."""
         mox = self._mox
         drv = self._driver
 
@@ -586,7 +593,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_delete_should_not_delete_if_provider_location_not_provided(self):
-        """delete_volume shouldn't try to delete if provider_location missed"""
+        """delete_volume shouldn't delete if provider_location missed."""
         mox = self._mox
         drv = self._driver
 
@@ -605,7 +612,7 @@ class NfsDriverTestCase(test.TestCase):
         mox.VerifyAll()
 
     def test_delete_should_not_delete_if_there_is_no_file(self):
-        """delete_volume should not try to delete if file missed"""
+        """delete_volume should not try to delete if file missed."""
         mox = self._mox
         drv = self._driver
 
index f99c227bbd7d7fe81081471605a349e2af5967b0..ed221a4ae4875a651594c9b1f0ffc7f17b3058ad 100644 (file)
@@ -15,7 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-"""Test of Policy Engine For Cinder"""
+"""Test of Policy Engine For Cinder."""
 
 import os.path
 import StringIO
@@ -147,8 +147,8 @@ class PolicyTestCase(test.TestCase):
         # NOTE(dprince) we mix case in the Admin role here to ensure
         # case is ignored
         admin_context = context.RequestContext('admin',
-                                                'fake',
-                                                roles=['AdMiN'])
+                                               'fake',
+                                               roles=['AdMiN'])
         policy.enforce(admin_context, lowercase_action, self.target)
         policy.enforce(admin_context, uppercase_action, self.target)
 
@@ -180,7 +180,7 @@ class DefaultPolicyTestCase(test.TestCase):
 
     def test_policy_called(self):
         self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
-                self.context, "example:exist", {})
+                          self.context, "example:exist", {})
 
     def test_not_found_policy_calls_default(self):
         policy.enforce(self.context, "example:noexist", {})
@@ -188,7 +188,7 @@ class DefaultPolicyTestCase(test.TestCase):
     def test_default_not_found(self):
         self._set_brain("default_noexist")
         self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
-                self.context, "example:noexist", {})
+                          self.context, "example:noexist", {})
 
 
 class ContextIsAdminPolicyTestCase(test.TestCase):
index ad165587db597d25b0b388c1fb6fc6aaf32bf54b..6610f7a35522394d3e89f98c78507a0bd642f719 100644 (file)
@@ -62,7 +62,7 @@ class QuotaIntegrationTestCase(test.TestCase):
         cinder.tests.image.fake.FakeImageService_reset()
 
     def _create_volume(self, size=10):
-        """Create a test volume"""
+        """Create a test volume."""
         vol = {}
         vol['user_id'] = self.user_id
         vol['project_id'] = self.project_id
@@ -198,9 +198,9 @@ class BaseResourceTestCase(test.TestCase):
     def test_quota_with_project_no_class(self):
         self.flags(quota_volumes=10)
         resource = quota.BaseResource('test_resource', 'quota_volumes')
-        driver = FakeDriver(by_project=dict(
-                test_project=dict(test_resource=15),
-                ))
+        driver = FakeDriver(
+            by_project=dict(
+                test_project=dict(test_resource=15), ))
         context = FakeContext('test_project', None)
         quota_value = resource.quota(driver, context)
 
@@ -209,9 +209,9 @@ class BaseResourceTestCase(test.TestCase):
     def test_quota_no_project_with_class(self):
         self.flags(quota_volumes=10)
         resource = quota.BaseResource('test_resource', 'quota_volumes')
-        driver = FakeDriver(by_class=dict(
-                test_class=dict(test_resource=20),
-                ))
+        driver = FakeDriver(
+            by_class=dict(
+                test_class=dict(test_resource=20), ))
         context = FakeContext(None, 'test_class')
         quota_value = resource.quota(driver, context)
 
@@ -221,11 +221,8 @@ class BaseResourceTestCase(test.TestCase):
         self.flags(quota_volumes=10)
         resource = quota.BaseResource('test_resource', 'quota_volumes')
         driver = FakeDriver(by_project=dict(
-                test_project=dict(test_resource=15),
-                ),
-                            by_class=dict(
-                test_class=dict(test_resource=20),
-                ))
+            test_project=dict(test_resource=15), ),
+            by_class=dict(test_class=dict(test_resource=20), ))
         context = FakeContext('test_project', 'test_class')
         quota_value = resource.quota(driver, context)
 
@@ -235,9 +232,8 @@ class BaseResourceTestCase(test.TestCase):
         self.flags(quota_volumes=10)
         resource = quota.BaseResource('test_resource', 'quota_volumes')
         driver = FakeDriver(by_project=dict(
-                test_project=dict(test_resource=15),
-                override_project=dict(test_resource=20),
-                ))
+            test_project=dict(test_resource=15),
+            override_project=dict(test_resource=20), ))
         context = FakeContext('test_project', 'test_class')
         quota_value = resource.quota(driver, context,
                                      project_id='override_project')
@@ -248,9 +244,8 @@ class BaseResourceTestCase(test.TestCase):
         self.flags(quota_volumes=10)
         resource = quota.BaseResource('test_resource', 'quota_volumes')
         driver = FakeDriver(by_class=dict(
-                test_class=dict(test_resource=15),
-                override_class=dict(test_resource=20),
-                ))
+            test_class=dict(test_resource=15),
+            override_class=dict(test_resource=20), ))
         context = FakeContext('test_project', 'test_class')
         quota_value = resource.quota(driver, context,
                                      quota_class='override_class')
@@ -290,15 +285,13 @@ class QuotaEngineTestCase(test.TestCase):
         resources = [
             quota.AbsoluteResource('test_resource1'),
             quota.AbsoluteResource('test_resource2'),
-            quota.AbsoluteResource('test_resource3'),
-            ]
+            quota.AbsoluteResource('test_resource3'), ]
         quota_obj.register_resources(resources)
 
-        self.assertEqual(quota_obj._resources, dict(
-                test_resource1=resources[0],
-                test_resource2=resources[1],
-                test_resource3=resources[2],
-                ))
+        self.assertEqual(quota_obj._resources,
+                         dict(test_resource1=resources[0],
+                              test_resource2=resources[1],
+                              test_resource3=resources[2], ))
 
     def test_sync_predeclared(self):
         quota_obj = quota.QuotaEngine()
@@ -321,8 +314,7 @@ class QuotaEngineTestCase(test.TestCase):
             quota.ReservableResource('test_resource1', spam),
             quota.ReservableResource('test_resource2', spam),
             quota.ReservableResource('test_resource3', spam),
-            quota.ReservableResource('test_resource4', spam),
-            ]
+            quota.ReservableResource('test_resource4', spam), ]
         quota_obj.register_resources(resources[:2])
 
         self.assertEqual(resources[0].sync, spam)
@@ -332,27 +324,32 @@ class QuotaEngineTestCase(test.TestCase):
 
     def test_get_by_project(self):
         context = FakeContext('test_project', 'test_class')
-        driver = FakeDriver(by_project=dict(
+        driver = FakeDriver(
+            by_project=dict(
                 test_project=dict(test_resource=42)))
         quota_obj = quota.QuotaEngine(quota_driver_class=driver)
         result = quota_obj.get_by_project(context, 'test_project',
                                           'test_resource')
 
-        self.assertEqual(driver.called, [
-                ('get_by_project', context, 'test_project', 'test_resource'),
-                ])
+        self.assertEqual(driver.called,
+                         [('get_by_project',
+                           context,
+                           'test_project',
+                           'test_resource'), ])
         self.assertEqual(result, 42)
 
     def test_get_by_class(self):
         context = FakeContext('test_project', 'test_class')
-        driver = FakeDriver(by_class=dict(
+        driver = FakeDriver(
+            by_class=dict(
                 test_class=dict(test_resource=42)))
         quota_obj = quota.QuotaEngine(quota_driver_class=driver)
         result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
 
-        self.assertEqual(driver.called, [
-                ('get_by_class', context, 'test_class', 'test_resource'),
-                ])
+        self.assertEqual(driver.called, [('get_by_class',
+                                          context,
+                                          'test_class',
+                                          'test_resource'), ])
         self.assertEqual(result, 42)
 
     def _make_quota_obj(self, driver):
@@ -361,8 +358,7 @@ class QuotaEngineTestCase(test.TestCase):
             quota.AbsoluteResource('test_resource4'),
             quota.AbsoluteResource('test_resource3'),
             quota.AbsoluteResource('test_resource2'),
-            quota.AbsoluteResource('test_resource1'),
-            ]
+            quota.AbsoluteResource('test_resource1'), ]
         quota_obj.register_resources(resources)
 
         return quota_obj
@@ -373,9 +369,9 @@ class QuotaEngineTestCase(test.TestCase):
         quota_obj = self._make_quota_obj(driver)
         result = quota_obj.get_defaults(context)
 
-        self.assertEqual(driver.called, [
-                ('get_defaults', context, quota_obj._resources),
-                ])
+        self.assertEqual(driver.called, [('get_defaults',
+                                          context,
+                                          quota_obj._resources), ])
         self.assertEqual(result, quota_obj._resources)
 
     def test_get_class_quotas(self):
@@ -386,11 +382,13 @@ class QuotaEngineTestCase(test.TestCase):
         result2 = quota_obj.get_class_quotas(context, 'test_class', False)
 
         self.assertEqual(driver.called, [
-                ('get_class_quotas', context, quota_obj._resources,
-                 'test_class', True),
-                ('get_class_quotas', context, quota_obj._resources,
-                 'test_class', False),
-                ])
+            ('get_class_quotas',
+             context,
+             quota_obj._resources,
+             'test_class', True),
+            ('get_class_quotas',
+             context, quota_obj._resources,
+             'test_class', False), ])
         self.assertEqual(result1, quota_obj._resources)
         self.assertEqual(result2, quota_obj._resources)
 
@@ -405,11 +403,20 @@ class QuotaEngineTestCase(test.TestCase):
                                                usages=False)
 
         self.assertEqual(driver.called, [
-                ('get_project_quotas', context, quota_obj._resources,
-                 'test_project', None, True, True),
-                ('get_project_quotas', context, quota_obj._resources,
-                 'test_project', 'test_class', False, False),
-                ])
+            ('get_project_quotas',
+             context,
+             quota_obj._resources,
+             'test_project',
+             None,
+             True,
+             True),
+            ('get_project_quotas',
+             context,
+             quota_obj._resources,
+             'test_project',
+             'test_class',
+             False,
+             False), ])
         self.assertEqual(result1, quota_obj._resources)
         self.assertEqual(result2, quota_obj._resources)
 
@@ -452,19 +459,21 @@ class QuotaEngineTestCase(test.TestCase):
                               test_resource3=2, test_resource4=1)
 
         self.assertEqual(driver.called, [
-                ('limit_check', context, quota_obj._resources, dict(
-                        test_resource1=4,
-                        test_resource2=3,
-                        test_resource3=2,
-                        test_resource4=1,
-                        )),
-                ])
+            ('limit_check',
+             context,
+             quota_obj._resources,
+             dict(
+                 test_resource1=4,
+                 test_resource2=3,
+                 test_resource3=2,
+                 test_resource4=1,)), ])
 
     def test_reserve(self):
         context = FakeContext(None, None)
-        driver = FakeDriver(reservations=[
-                'resv-01', 'resv-02', 'resv-03', 'resv-04',
-                ])
+        driver = FakeDriver(reservations=['resv-01',
+                                          'resv-02',
+                                          'resv-03',
+                                          'resv-04', ])
         quota_obj = self._make_quota_obj(driver)
         result1 = quota_obj.reserve(context, test_resource1=4,
                                     test_resource2=3, test_resource3=2,
@@ -474,25 +483,32 @@ class QuotaEngineTestCase(test.TestCase):
                                     test_resource3=3, test_resource4=4)
 
         self.assertEqual(driver.called, [
-                ('reserve', context, quota_obj._resources, dict(
-                        test_resource1=4,
-                        test_resource2=3,
-                        test_resource3=2,
-                        test_resource4=1,
-                        ), None),
-                ('reserve', context, quota_obj._resources, dict(
-                        test_resource1=1,
-                        test_resource2=2,
-                        test_resource3=3,
-                        test_resource4=4,
-                        ), 3600),
-                ])
-        self.assertEqual(result1, [
-                'resv-01', 'resv-02', 'resv-03', 'resv-04',
-                ])
-        self.assertEqual(result2, [
-                'resv-01', 'resv-02', 'resv-03', 'resv-04',
-                ])
+            ('reserve',
+             context,
+             quota_obj._resources,
+             dict(
+                 test_resource1=4,
+                 test_resource2=3,
+                 test_resource3=2,
+                 test_resource4=1, ),
+             None),
+            ('reserve',
+             context,
+             quota_obj._resources,
+             dict(
+                 test_resource1=1,
+                 test_resource2=2,
+                 test_resource3=3,
+                 test_resource4=4, ),
+             3600), ])
+        self.assertEqual(result1, ['resv-01',
+                                   'resv-02',
+                                   'resv-03',
+                                   'resv-04', ])
+        self.assertEqual(result2, ['resv-01',
+                                   'resv-02',
+                                   'resv-03',
+                                   'resv-04', ])
 
     def test_commit(self):
         context = FakeContext(None, None)
@@ -500,9 +516,12 @@ class QuotaEngineTestCase(test.TestCase):
         quota_obj = self._make_quota_obj(driver)
         quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
 
-        self.assertEqual(driver.called, [
-                ('commit', context, ['resv-01', 'resv-02', 'resv-03']),
-                ])
+        self.assertEqual(driver.called,
+                         [('commit',
+                           context,
+                           ['resv-01',
+                            'resv-02',
+                            'resv-03']), ])
 
     def test_rollback(self):
         context = FakeContext(None, None)
@@ -510,9 +529,12 @@ class QuotaEngineTestCase(test.TestCase):
         quota_obj = self._make_quota_obj(driver)
         quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
 
-        self.assertEqual(driver.called, [
-                ('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
-                ])
+        self.assertEqual(driver.called,
+                         [('rollback',
+                           context,
+                           ['resv-01',
+                            'resv-02',
+                            'resv-03']), ])
 
     def test_destroy_all_by_project(self):
         context = FakeContext(None, None)
@@ -520,9 +542,10 @@ class QuotaEngineTestCase(test.TestCase):
         quota_obj = self._make_quota_obj(driver)
         quota_obj.destroy_all_by_project(context, 'test_project')
 
-        self.assertEqual(driver.called, [
-                ('destroy_all_by_project', context, 'test_project'),
-                ])
+        self.assertEqual(driver.called,
+                         [('destroy_all_by_project',
+                           context,
+                           'test_project'), ])
 
     def test_expire(self):
         context = FakeContext(None, None)
@@ -530,9 +553,7 @@ class QuotaEngineTestCase(test.TestCase):
         quota_obj = self._make_quota_obj(driver)
         quota_obj.expire(context)
 
-        self.assertEqual(driver.called, [
-                ('expire', context),
-                ])
+        self.assertEqual(driver.called, [('expire', context), ])
 
     def test_resources(self):
         quota_obj = self._make_quota_obj(None)
@@ -567,20 +588,18 @@ class DbQuotaDriverTestCase(test.TestCase):
         # Use our pre-defined resources
         result = self.driver.get_defaults(None, quota.QUOTAS._resources)
 
-        self.assertEqual(result, dict(
+        self.assertEqual(
+            result,
+            dict(
                 volumes=10,
-                gigabytes=1000,
-                ))
+                gigabytes=1000, ))
 
     def _stub_quota_class_get_all_by_name(self):
         # Stub out quota_class_get_all_by_name
         def fake_qcgabn(context, quota_class):
             self.calls.append('quota_class_get_all_by_name')
             self.assertEqual(quota_class, 'test_class')
-            return dict(
-                gigabytes=500,
-                volumes=10,
-                )
+            return dict(gigabytes=500, volumes=10, )
         self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
 
     def test_get_class_quotas(self):
@@ -589,10 +608,7 @@ class DbQuotaDriverTestCase(test.TestCase):
                                               'test_class')
 
         self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
-        self.assertEqual(result, dict(
-                volumes=10,
-                gigabytes=500,
-                ))
+        self.assertEqual(result, dict(volumes=10, gigabytes=500, ))
 
     def test_get_class_quotas_no_defaults(self):
         self._stub_quota_class_get_all_by_name()
@@ -600,28 +616,19 @@ class DbQuotaDriverTestCase(test.TestCase):
                                               'test_class', False)
 
         self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
-        self.assertEqual(result, dict(
-                volumes=10,
-                gigabytes=500,
-                ))
+        self.assertEqual(result, dict(volumes=10, gigabytes=500, ))
 
     def _stub_get_by_project(self):
         def fake_qgabp(context, project_id):
             self.calls.append('quota_get_all_by_project')
             self.assertEqual(project_id, 'test_project')
-            return dict(
-                volumes=10,
-                gigabytes=50,
-                reserved=0
-                )
+            return dict(volumes=10, gigabytes=50, reserved=0)
 
         def fake_qugabp(context, project_id):
             self.calls.append('quota_usage_get_all_by_project')
             self.assertEqual(project_id, 'test_project')
-            return dict(
-                volumes=dict(in_use=2, reserved=0),
-                gigabytes=dict(in_use=10, reserved=0),
-                )
+            return dict(volumes=dict(in_use=2, reserved=0),
+                        gigabytes=dict(in_use=10, reserved=0), )
 
         self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
         self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
@@ -634,23 +641,15 @@ class DbQuotaDriverTestCase(test.TestCase):
             FakeContext('test_project', 'test_class'),
             quota.QUOTAS._resources, 'test_project')
 
-        self.assertEqual(self.calls, [
-                'quota_get_all_by_project',
-                'quota_usage_get_all_by_project',
-                'quota_class_get_all_by_name',
-                ])
-        self.assertEqual(result, dict(
-                volumes=dict(
-                    limit=10,
-                    in_use=2,
-                    reserved=0,
-                    ),
-                gigabytes=dict(
-                    limit=50,
-                    in_use=10,
-                    reserved=0,
-                    ),
-                ))
+        self.assertEqual(self.calls, ['quota_get_all_by_project',
+                                      'quota_usage_get_all_by_project',
+                                      'quota_class_get_all_by_name', ])
+        self.assertEqual(result, dict(volumes=dict(limit=10,
+                                                   in_use=2,
+                                                   reserved=0, ),
+                                      gigabytes=dict(limit=50,
+                                                     in_use=10,
+                                                     reserved=0, ), ))
 
     def test_get_project_quotas_alt_context_no_class(self):
         self._stub_get_by_project()
@@ -658,22 +657,14 @@ class DbQuotaDriverTestCase(test.TestCase):
             FakeContext('other_project', 'other_class'),
             quota.QUOTAS._resources, 'test_project')
 
-        self.assertEqual(self.calls, [
-                'quota_get_all_by_project',
-                'quota_usage_get_all_by_project',
-                ])
-        self.assertEqual(result, dict(
-                volumes=dict(
-                    limit=10,
-                    in_use=2,
-                    reserved=0,
-                    ),
-                gigabytes=dict(
-                    limit=50,
-                    in_use=10,
-                    reserved=0,
-                    ),
-                ))
+        self.assertEqual(self.calls, ['quota_get_all_by_project',
+                                      'quota_usage_get_all_by_project', ])
+        self.assertEqual(result, dict(volumes=dict(limit=10,
+                                                   in_use=2,
+                                                   reserved=0, ),
+                                      gigabytes=dict(limit=50,
+                                                     in_use=10,
+                                                     reserved=0, ), ))
 
     def test_get_project_quotas_alt_context_with_class(self):
         self._stub_get_by_project()
@@ -681,23 +672,15 @@ class DbQuotaDriverTestCase(test.TestCase):
             FakeContext('other_project', 'other_class'),
             quota.QUOTAS._resources, 'test_project', quota_class='test_class')
 
-        self.assertEqual(self.calls, [
-                'quota_get_all_by_project',
-                'quota_usage_get_all_by_project',
-                'quota_class_get_all_by_name',
-                ])
-        self.assertEqual(result, dict(
-                volumes=dict(
-                    limit=10,
-                    in_use=2,
-                    reserved=0,
-                    ),
-                gigabytes=dict(
-                    limit=50,
-                    in_use=10,
-                    reserved=0,
-                    ),
-                ))
+        self.assertEqual(self.calls, ['quota_get_all_by_project',
+                                      'quota_usage_get_all_by_project',
+                                      'quota_class_get_all_by_name', ])
+        self.assertEqual(result, dict(volumes=dict(limit=10,
+                                                   in_use=2,
+                                                   reserved=0, ),
+                                      gigabytes=dict(limit=50,
+                                                     in_use=10,
+                                                     reserved=0, ), ))
 
     def test_get_project_quotas_no_defaults(self):
         self._stub_get_by_project()
@@ -705,23 +688,16 @@ class DbQuotaDriverTestCase(test.TestCase):
             FakeContext('test_project', 'test_class'),
             quota.QUOTAS._resources, 'test_project', defaults=False)
 
-        self.assertEqual(self.calls, [
-                'quota_get_all_by_project',
-                'quota_usage_get_all_by_project',
-                'quota_class_get_all_by_name',
-                ])
-        self.assertEqual(result, dict(
-                gigabytes=dict(
-                    limit=50,
-                    in_use=10,
-                    reserved=0,
-                    ),
-                volumes=dict(
-                    limit=10,
-                    in_use=2,
-                    reserved=0,
-                    ),
-                ))
+        self.assertEqual(self.calls, ['quota_get_all_by_project',
+                                      'quota_usage_get_all_by_project',
+                                      'quota_class_get_all_by_name', ])
+        self.assertEqual(result,
+                         dict(gigabytes=dict(limit=50,
+                                             in_use=10,
+                                             reserved=0, ),
+                              volumes=dict(limit=10,
+                                           in_use=2,
+                                           reserved=0, ), ))
 
     def test_get_project_quotas_no_usages(self):
         self._stub_get_by_project()
@@ -729,18 +705,10 @@ class DbQuotaDriverTestCase(test.TestCase):
             FakeContext('test_project', 'test_class'),
             quota.QUOTAS._resources, 'test_project', usages=False)
 
-        self.assertEqual(self.calls, [
-                'quota_get_all_by_project',
-                'quota_class_get_all_by_name',
-                ])
-        self.assertEqual(result, dict(
-                volumes=dict(
-                    limit=10,
-                    ),
-                gigabytes=dict(
-                    limit=50,
-                    ),
-                ))
+        self.assertEqual(self.calls, ['quota_get_all_by_project',
+                                      'quota_class_get_all_by_name', ])
+        self.assertEqual(result, dict(volumes=dict(limit=10, ),
+                                      gigabytes=dict(limit=50, ), ))
 
     def _stub_get_project_quotas(self):
         def fake_get_project_quotas(context, resources, project_id,
@@ -794,10 +762,7 @@ class DbQuotaDriverTestCase(test.TestCase):
                                          True)
 
         self.assertEqual(self.calls, ['get_project_quotas'])
-        self.assertEqual(result, dict(
-                volumes=10,
-                gigabytes=1000,
-                ))
+        self.assertEqual(result, dict(volumes=10, gigabytes=1000, ))
 
     def _stub_quota_reserve(self):
         def fake_quota_reserve(context, resources, quotas, deltas, expire,
@@ -825,10 +790,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      dict(volumes=2))
 
         expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 0, 0),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 0, 0), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
     def test_reserve_int_expire(self):
@@ -839,10 +802,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      dict(volumes=2), expire=3600)
 
         expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 0, 0),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 0, 0), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
     def test_reserve_timedelta_expire(self):
@@ -854,10 +815,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      dict(volumes=2), expire=expire_delta)
 
         expire = timeutils.utcnow() + expire_delta
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 0, 0),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 0, 0), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
     def test_reserve_datetime_expire(self):
@@ -868,10 +827,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      quota.QUOTAS._resources,
                                      dict(volumes=2), expire=expire)
 
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 0, 0),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 0, 0), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
     def test_reserve_until_refresh(self):
@@ -883,10 +840,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      quota.QUOTAS._resources,
                                      dict(volumes=2), expire=expire)
 
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 500, 0),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 500, 0), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
     def test_reserve_max_age(self):
@@ -898,10 +853,8 @@ class DbQuotaDriverTestCase(test.TestCase):
                                      quota.QUOTAS._resources,
                                      dict(volumes=2), expire=expire)
 
-        self.assertEqual(self.calls, [
-                'get_project_quotas',
-                ('quota_reserve', expire, 0, 86400),
-                ])
+        self.assertEqual(self.calls, ['get_project_quotas',
+                                      ('quota_reserve', expire, 0, 86400), ])
         self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
 
 
@@ -1061,117 +1014,96 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
 
     def test_quota_reserve_create_usages(self):
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5,
+                      gigabytes=10 * 1024, )
+        deltas = dict(volumes=2,
+                      gigabytes=2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 0, 0)
 
         self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
-        self.compare_usage(self.usages_created, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=0,
-                     reserved=2,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=0,
-                     reserved=2 * 1024,
-                     until_refresh=None),
-                ])
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages_created['volumes'],
-                     project_id='test_project',
-                     delta=2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages_created['gigabytes'],
-                     delta=2 * 1024),
-                ])
+        self.compare_usage(self.usages_created,
+                           [dict(resource='volumes',
+                                 project_id='test_project',
+                                 in_use=0,
+                                 reserved=2,
+                                 until_refresh=None),
+                            dict(resource='gigabytes',
+                                 project_id='test_project',
+                                 in_use=0,
+                                 reserved=2 * 1024,
+                                 until_refresh=None), ])
+        self.compare_reservation(
+            result,
+            [dict(resource='volumes',
+                  usage_id=self.usages_created['volumes'],
+                  project_id='test_project',
+                  delta=2),
+             dict(resource='gigabytes',
+                  usage_id=self.usages_created['gigabytes'],
+                  delta=2 * 1024), ])
 
     def test_quota_reserve_negative_in_use(self):
         self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1)
         self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5,
+                      gigabytes=10 * 1024, )
+        deltas = dict(volumes=2,
+                      gigabytes=2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 5, 0)
 
         self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2,
-                     until_refresh=5),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2 * 1024,
-                     until_refresh=5),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2,
+                                              until_refresh=5),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2 * 1024,
+                                              until_refresh=5), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     delta=2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       delta=2 * 1024), ])
 
     def test_quota_reserve_until_refresh(self):
         self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1)
         self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=2, gigabytes=2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 5, 0)
 
         self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2,
-                     until_refresh=5),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2 * 1024,
-                     until_refresh=5),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2,
+                                              until_refresh=5),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2 * 1024,
+                                              until_refresh=5), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     delta=2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       delta=2 * 1024), ])
 
     def test_quota_reserve_max_age(self):
         max_age = 3600
@@ -1182,149 +1114,114 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
         self.init_usage('test_project', 'gigabytes', 3, 0,
                         created_at=record_created, updated_at=record_created)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=2, gigabytes=2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 0, max_age)
 
         self.assertEqual(self.sync_called, set(['volumes', 'gigabytes']))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=2,
-                     reserved=2 * 1024,
-                     until_refresh=None),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2,
+                                              until_refresh=None),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=2,
+                                              reserved=2 * 1024,
+                                              until_refresh=None), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     delta=2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       delta=2 * 1024), ])
 
     def test_quota_reserve_no_refresh(self):
         self.init_usage('test_project', 'volumes', 3, 0)
         self.init_usage('test_project', 'gigabytes', 3, 0)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=2, gigabytes=2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 0, 0)
 
         self.assertEqual(self.sync_called, set([]))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=3,
-                     reserved=2,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=3,
-                     reserved=2 * 1024,
-                     until_refresh=None),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=3,
+                                              reserved=2,
+                                              until_refresh=None),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=3,
+                                              reserved=2 * 1024,
+                                              until_refresh=None), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     delta=2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       delta=2 * 1024), ])
 
     def test_quota_reserve_unders(self):
         self.init_usage('test_project', 'volumes', 1, 0)
         self.init_usage('test_project', 'gigabytes', 1 * 1024, 0)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=-2,
-            gigabytes=-2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 0, 0)
 
         self.assertEqual(self.sync_called, set([]))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=1,
-                     reserved=0,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=1 * 1024,
-                     reserved=0,
-                     until_refresh=None),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=1,
+                                              reserved=0,
+                                              until_refresh=None),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=1 * 1024,
+                                              reserved=0,
+                                              until_refresh=None), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=-2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     delta=-2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=-2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       delta=-2 * 1024), ])
 
     def test_quota_reserve_overs(self):
         self.init_usage('test_project', 'volumes', 4, 0)
         self.init_usage('test_project', 'gigabytes', 10 * 1024, 0)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=2,
-            gigabytes=2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=2, gigabytes=2 * 1024, )
         self.assertRaises(exception.OverQuota,
                           sqa_api.quota_reserve,
                           context, self.resources, quotas,
                           deltas, self.expire, 0, 0)
 
         self.assertEqual(self.sync_called, set([]))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=4,
-                     reserved=0,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=10 * 1024,
-                     reserved=0,
-                     until_refresh=None),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=4,
+                                              reserved=0,
+                                              until_refresh=None),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=10 * 1024,
+                                              reserved=0,
+                                              until_refresh=None), ])
         self.assertEqual(self.usages_created, {})
         self.assertEqual(self.reservations_created, {})
 
@@ -1332,38 +1229,29 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
         self.init_usage('test_project', 'volumes', 10, 0)
         self.init_usage('test_project', 'gigabytes', 20 * 1024, 0)
         context = FakeContext('test_project', 'test_class')
-        quotas = dict(
-            volumes=5,
-            gigabytes=10 * 1024,
-            )
-        deltas = dict(
-            volumes=-2,
-            gigabytes=-2 * 1024,
-            )
+        quotas = dict(volumes=5, gigabytes=10 * 1024, )
+        deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
         result = sqa_api.quota_reserve(context, self.resources, quotas,
                                        deltas, self.expire, 0, 0)
 
         self.assertEqual(self.sync_called, set([]))
-        self.compare_usage(self.usages, [
-                dict(resource='volumes',
-                     project_id='test_project',
-                     in_use=10,
-                     reserved=0,
-                     until_refresh=None),
-                dict(resource='gigabytes',
-                     project_id='test_project',
-                     in_use=20 * 1024,
-                     reserved=0,
-                     until_refresh=None),
-                ])
+        self.compare_usage(self.usages, [dict(resource='volumes',
+                                              project_id='test_project',
+                                              in_use=10,
+                                              reserved=0,
+                                              until_refresh=None),
+                                         dict(resource='gigabytes',
+                                              project_id='test_project',
+                                              in_use=20 * 1024,
+                                              reserved=0,
+                                              until_refresh=None), ])
         self.assertEqual(self.usages_created, {})
-        self.compare_reservation(result, [
-                dict(resource='volumes',
-                     usage_id=self.usages['volumes'],
-                     project_id='test_project',
-                     delta=-2),
-                dict(resource='gigabytes',
-                     usage_id=self.usages['gigabytes'],
-                     project_id='test_project',
-                     delta=-2 * 1024),
-                ])
+        self.compare_reservation(result,
+                                 [dict(resource='volumes',
+                                       usage_id=self.usages['volumes'],
+                                       project_id='test_project',
+                                       delta=-2),
+                                  dict(resource='gigabytes',
+                                       usage_id=self.usages['gigabytes'],
+                                       project_id='test_project',
+                                       delta=-2 * 1024), ])
index 5aa011a326db174f4e96bb5fb95defa60efc5fea..695838b4c9fa86ebce16946c7105c1331d5c00bf 100644 (file)
@@ -46,22 +46,18 @@ class RBDTestCase(test.TestCase):
         self.driver = RBDDriver(execute=fake_execute)
 
     def test_good_locations(self):
-        locations = [
-            'rbd://fsid/pool/image/snap',
-            'rbd://%2F/%2F/%2F/%2F',
-            ]
+        locations = ['rbd://fsid/pool/image/snap',
+                     'rbd://%2F/%2F/%2F/%2F', ]
         map(self.driver._parse_location, locations)
 
     def test_bad_locations(self):
-        locations = [
-            'rbd://image',
-            'http://path/to/somewhere/else',
-            'rbd://image/extra',
-            'rbd://image/',
-            'rbd://fsid/pool/image/',
-            'rbd://fsid/pool/image/snap/',
-            'rbd://///',
-            ]
+        locations = ['rbd://image',
+                     'http://path/to/somewhere/else',
+                     'rbd://image/extra',
+                     'rbd://image/',
+                     'rbd://fsid/pool/image/',
+                     'rbd://fsid/pool/image/snap/',
+                     'rbd://///', ]
         for loc in locations:
             self.assertRaises(exception.ImageUnacceptable,
                               self.driver._parse_location,
@@ -142,13 +138,14 @@ class ManagedRBDTestCase(DriverTestCase):
         image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
         volume_id = 1
         # creating volume testdata
-        db.volume_create(self.context, {'id': volume_id,
-                            'updated_at': timeutils.utcnow(),
-                            'display_description': 'Test Desc',
-                            'size': 20,
-                            'status': 'creating',
-                            'instance_uuid': None,
-                            'host': 'dummy'})
+        db.volume_create(self.context,
+                         {'id': volume_id,
+                          'updated_at': timeutils.utcnow(),
+                          'display_description': 'Test Desc',
+                          'size': 20,
+                          'status': 'creating',
+                          'instance_uuid': None,
+                          'host': 'dummy'})
         try:
             if clone_works:
                 self.volume.create_volume(self.context,
index 4bda2beee1b701288987fd6424332f79191bfab4..310863b069632943a99966967444d4abc8647dcf 100644 (file)
@@ -42,8 +42,7 @@ test_service_opts = [
                help="Host to bind test service to"),
     cfg.IntOpt("test_service_listen_port",
                default=0,
-               help="Port number to bind test service to"),
-    ]
+               help="Port number to bind test service to"), ]
 
 flags.FLAGS.register_opts(test_service_opts)
 
@@ -131,15 +130,15 @@ class ServiceTestCase(test.TestCase):
                           'report_count': 0,
                           'availability_zone': 'nova'}
         service_ref = {'host': host,
-                          'binary': binary,
-                          'topic': topic,
-                          'report_count': 0,
-                          'availability_zone': 'nova',
-                          'id': 1}
+                       'binary': binary,
+                       'topic': topic,
+                       'report_count': 0,
+                       'availability_zone': 'nova',
+                       'id': 1}
 
         service.db.service_get_by_args(mox.IgnoreArg(),
-                                      host,
-                                      binary).AndRaise(exception.NotFound())
+                                       host,
+                                       binary).AndRaise(exception.NotFound())
         service.db.service_create(mox.IgnoreArg(),
                                   service_create).AndReturn(service_ref)
         service.db.service_get(mox.IgnoreArg(),
@@ -164,15 +163,15 @@ class ServiceTestCase(test.TestCase):
                           'report_count': 0,
                           'availability_zone': 'nova'}
         service_ref = {'host': host,
-                          'binary': binary,
-                          'topic': topic,
-                          'report_count': 0,
-                          'availability_zone': 'nova',
-                          'id': 1}
+                       'binary': binary,
+                       'topic': topic,
+                       'report_count': 0,
+                       'availability_zone': 'nova',
+                       'id': 1}
 
         service.db.service_get_by_args(mox.IgnoreArg(),
-                                      host,
-                                      binary).AndRaise(exception.NotFound())
+                                       host,
+                                       binary).AndRaise(exception.NotFound())
         service.db.service_create(mox.IgnoreArg(),
                                   service_create).AndReturn(service_ref)
         service.db.service_get(mox.IgnoreArg(),
index 9824fa6e972e6224bd78df0f4ace436f6ccd99db..86184426093d2f7d29df8b9b080dc1af5a5fe4bc 100644 (file)
@@ -472,8 +472,9 @@ class StorwizeSVCManagementSimulator:
             rows.append(["IO_group_name", "io_grp0"])
             rows.append(["status", "online"])
             rows.append(["mdisk_grp_id", "0"])
-            rows.append(["mdisk_grp_name",
-                    self._flags["storwize_svc_volpool_name"]])
+            rows.append([
+                "mdisk_grp_name",
+                self._flags["storwize_svc_volpool_name"]])
             rows.append(["capacity", cap])
             rows.append(["type", "striped"])
             rows.append(["formatted", "no"])
@@ -900,14 +901,14 @@ class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
             LOG.debug(_('Run CLI command: %s') % cmd)
             ret = self.fake_storage.execute_command(cmd, check_exit_code)
             (stdout, stderr) = ret
-            LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') %
-                        {'out': stdout, 'err': stderr})
+            LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') % {
+                'out': stdout, 'err': stderr})
 
         except exception.ProcessExecutionError as e:
             with excutils.save_and_reraise_exception():
                 LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
                             'stderr: %(err)s') % {'out': e.stdout,
-                            'err': e.stderr})
+                                                  'err': e.stderr})
 
         return ret
 
@@ -964,25 +965,25 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Check for missing san_ip
         self.flags(san_ip=None)
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
 
         if self.USESIM != 1:
             # Check for invalid ip
             self.flags(san_ip="-1.-1.-1.-1")
             self.assertRaises(socket.gaierror,
-                        self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
 
             # Check for unreachable IP
             self.flags(san_ip="1.1.1.1")
             self.assertRaises(socket.error,
-                        self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
 
     def test_storwize_svc_connectivity(self):
         # Make sure we detect if the pool doesn't exist
         no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
         self.flags(storwize_svc_volpool_name=no_exist_pool)
         self.assertRaises(exception.InvalidInput,
-                self.driver.check_for_setup_error)
+                          self.driver.check_for_setup_error)
         FLAGS.reset()
 
         # Check the case where the user didn't configure IP addresses
@@ -990,56 +991,56 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         if self.USESIM == 1:
             self.sim.error_injection("lsnodecanister", "header_mismatch")
             self.assertRaises(exception.VolumeBackendAPIException,
-                    self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
             self.sim.error_injection("lsnodecanister", "remove_field")
             self.assertRaises(exception.VolumeBackendAPIException,
-                    self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
             self.sim.error_injection("lsportip", "ip_no_config")
             self.assertRaises(exception.VolumeBackendAPIException,
-                    self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
             self.sim.error_injection("lsportip", "header_mismatch")
             self.assertRaises(exception.VolumeBackendAPIException,
-                    self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
             self.sim.error_injection("lsportip", "remove_field")
             self.assertRaises(exception.VolumeBackendAPIException,
-                    self.driver.check_for_setup_error)
+                              self.driver.check_for_setup_error)
 
         # Check with bad parameters
         self.flags(san_password=None)
         self.flags(san_private_key=None)
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_vol_rsize="invalid")
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_vol_warning="invalid")
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_vol_autoexpand="invalid")
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_vol_grainsize=str(42))
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_flashcopy_timeout=str(601))
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         self.flags(storwize_svc_vol_compression=True)
         self.flags(storwize_svc_vol_rsize="-1")
         self.assertRaises(exception.InvalidInput,
-                self.driver._check_flags)
+                          self.driver._check_flags)
         FLAGS.reset()
 
         # Finally, check with good parameters
@@ -1059,7 +1060,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Test timeout and volume cleanup
         self.flags(storwize_svc_flashcopy_timeout=str(1))
         self.assertRaises(exception.InvalidSnapshot,
-                self.driver.create_snapshot, snapshot)
+                          self.driver.create_snapshot, snapshot)
         is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
         self.assertEqual(is_volume_defined, False)
         FLAGS.reset()
@@ -1068,21 +1069,21 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         if self.USESIM == 1:
             self.sim.error_injection("lsfcmap", "bogus_prepare")
             self.assertRaises(exception.VolumeBackendAPIException,
-                self.driver.create_snapshot, snapshot)
+                              self.driver.create_snapshot, snapshot)
 
         # Test prestartfcmap, startfcmap, and rmfcmap failing
         if self.USESIM == 1:
             self.sim.error_injection("prestartfcmap", "bad_id")
             self.assertRaises(exception.ProcessExecutionError,
-                self.driver.create_snapshot, snapshot)
+                              self.driver.create_snapshot, snapshot)
             self.sim.error_injection("lsfcmap", "speed_up")
             self.sim.error_injection("startfcmap", "bad_id")
             self.assertRaises(exception.ProcessExecutionError,
-                self.driver.create_snapshot, snapshot)
+                              self.driver.create_snapshot, snapshot)
             self.sim.error_injection("prestartfcmap", "bad_id")
             self.sim.error_injection("rmfcmap", "bad_id")
             self.assertRaises(exception.ProcessExecutionError,
-                self.driver.create_snapshot, snapshot)
+                              self.driver.create_snapshot, snapshot)
 
         # Test successful snapshot
         self.driver.create_snapshot(snapshot)
@@ -1119,7 +1120,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         if self.USESIM == 1:
             self.sim.error_injection("prestartfcmap", "bad_id")
             self.assertRaises(exception.ProcessExecutionError,
-                self.driver.create_volume_from_snapshot, volume2, snapshot)
+                              self.driver.create_volume_from_snapshot,
+                              volume2,
+                              snapshot)
 
         # Succeed
         if self.USESIM == 1:
@@ -1141,7 +1144,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         self.driver.create_volume(volume3)
         snapshot["name"] = volume3["name"]
         self.assertRaises(exception.InvalidSnapshot,
-                self.driver.create_snapshot, snapshot)
+                          self.driver.create_snapshot,
+                          snapshot)
         self.driver._delete_volume(volume1, True)
         self.driver._delete_volume(volume3, True)
 
@@ -1150,7 +1154,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
         snapshot["volume_name"] = "no_exist"
         self.assertRaises(exception.VolumeNotFound,
-                self.driver.create_snapshot, snapshot)
+                          self.driver.create_snapshot,
+                          snapshot)
 
     def test_storwize_svc_volumes(self):
         # Create a first volume
@@ -1176,7 +1181,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
 
         # Try to create the volume again (should fail)
         self.assertRaises(exception.ProcessExecutionError,
-                self.driver.create_volume, volume)
+                          self.driver.create_volume,
+                          volume)
 
         # Try to delete a volume that doesn't exist (should not fail)
         vol_no_exist = {"name": "i_dont_exist"}
@@ -1270,7 +1276,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         if self.USESIM == 1:
             self.sim.error_injection("mkvdisk", "no_compression")
             self.assertRaises(exception.ProcessExecutionError,
-                    self._create_test_vol)
+                              self._create_test_vol)
         FLAGS.reset()
 
     def test_storwize_svc_unicode_host_and_volume_names(self):
@@ -1328,7 +1334,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
 
         # Try to delete the 1st volume (should fail because it is mapped)
         self.assertRaises(exception.ProcessExecutionError,
-                self.driver.delete_volume, volume1)
+                          self.driver.delete_volume,
+                          volume1)
 
         # Test no preferred node
         self.driver.terminate_connection(volume1, conn)
@@ -1346,7 +1353,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Try to remove connection from host that doesn't exist (should fail)
         conn_no_exist = {"initiator": "i_dont_exist"}
         self.assertRaises(exception.VolumeBackendAPIException,
-                self.driver.terminate_connection, volume1, conn_no_exist)
+                          self.driver.terminate_connection,
+                          volume1,
+                          conn_no_exist)
 
         # Try to remove connection from volume that isn't mapped (should print
         # message but NOT fail)
index 06e38a42b7c09d103e3479486a4a9ee7b47f2327..c8c79b2519ece583ad54fbc6f9c02d58a120118c 100644 (file)
@@ -20,7 +20,7 @@ from cinder.tests import utils as test_utils
 
 class TestUtilsTestCase(test.TestCase):
     def test_get_test_admin_context(self):
-        """get_test_admin_context's return value behaves like admin context"""
+        """get_test_admin_context's return value behaves like admin context."""
         ctxt = test_utils.get_test_admin_context()
 
         # TODO(soren): This should verify the full interface context
index 52c5ec7a34a85199c55896f7e71fd5a4a2323c29..e559d8e44f50232b61c34e261518b21f161b79fa 100644 (file)
@@ -350,8 +350,9 @@ class GenericUtilsTestCase(test.TestCase):
             self.assertEqual(reloaded_data, fake_contents)
             self.reload_called = True
 
-        data = utils.read_cached_file("/this/is/a/fake", cache_data,
-                                                reload_func=test_reload)
+        data = utils.read_cached_file("/this/is/a/fake",
+                                      cache_data,
+                                      reload_func=test_reload)
         self.assertEqual(data, fake_contents)
         self.assertTrue(self.reload_called)
 
@@ -445,7 +446,8 @@ class MonkeyPatchTestCase(test.TestCase):
         self.flags(
             monkey_patch=True,
             monkey_patch_modules=[self.example_package + 'example_a' + ':'
-            + self.example_package + 'example_decorator'])
+                                  + self.example_package
+                                  + 'example_decorator'])
 
     def test_monkey_patch(self):
         utils.monkey_patch()
@@ -467,19 +469,19 @@ class MonkeyPatchTestCase(test.TestCase):
         self.assertEqual(ret_b, 8)
         package_a = self.example_package + 'example_a.'
         self.assertTrue(package_a + 'example_function_a'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                        in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
 
         self.assertTrue(package_a + 'ExampleClassA.example_method'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                        in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
         self.assertTrue(package_a + 'ExampleClassA.example_method_add'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                        in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
         package_b = self.example_package + 'example_b.'
         self.assertFalse(package_b + 'example_function_b'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                         in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
         self.assertFalse(package_b + 'ExampleClassB.example_method'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                         in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
         self.assertFalse(package_b + 'ExampleClassB.example_method_add'
-            in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
+                         in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
 
 
 class AuditPeriodTest(test.TestCase):
@@ -501,149 +503,126 @@ class AuditPeriodTest(test.TestCase):
 
     def test_hour(self):
         begin, end = utils.last_completed_audit_period(unit='hour')
-        self.assertEquals(begin, datetime.datetime(
-                                           hour=7,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           hour=8,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin,
+                          datetime.datetime(hour=7,
+                                            day=5,
+                                            month=3,
+                                            year=2012))
+        self.assertEquals(end, datetime.datetime(hour=8,
+                                                 day=5,
+                                                 month=3,
+                                                 year=2012))
 
     def test_hour_with_offset_before_current(self):
         begin, end = utils.last_completed_audit_period(unit='hour@10')
-        self.assertEquals(begin, datetime.datetime(
-                                           minute=10,
-                                           hour=7,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           minute=10,
-                                           hour=8,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(minute=10,
+                                                   hour=7,
+                                                   day=5,
+                                                   month=3,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(minute=10,
+                                                 hour=8,
+                                                 day=5,
+                                                 month=3,
+                                                 year=2012))
 
     def test_hour_with_offset_after_current(self):
         begin, end = utils.last_completed_audit_period(unit='hour@30')
-        self.assertEquals(begin, datetime.datetime(
-                                           minute=30,
-                                           hour=6,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           minute=30,
-                                           hour=7,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(minute=30,
+                                                   hour=6,
+                                                   day=5,
+                                                   month=3,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(minute=30,
+                                                 hour=7,
+                                                 day=5,
+                                                 month=3,
+                                                 year=2012))
 
     def test_day(self):
         begin, end = utils.last_completed_audit_period(unit='day')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=4,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           day=5,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=4,
+                                                   month=3,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(day=5,
+                                                 month=3,
+                                                 year=2012))
 
     def test_day_with_offset_before_current(self):
         begin, end = utils.last_completed_audit_period(unit='day@6')
-        self.assertEquals(begin, datetime.datetime(
-                                           hour=6,
-                                           day=4,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           hour=6,
-                                           day=5,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(hour=6,
+                                                   day=4,
+                                                   month=3,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(hour=6,
+                                                 day=5,
+                                                 month=3,
+                                                 year=2012))
 
     def test_day_with_offset_after_current(self):
         begin, end = utils.last_completed_audit_period(unit='day@10')
-        self.assertEquals(begin, datetime.datetime(
-                                           hour=10,
-                                           day=3,
-                                           month=3,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           hour=10,
-                                           day=4,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(hour=10,
+                                                   day=3,
+                                                   month=3,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(hour=10,
+                                                 day=4,
+                                                 month=3,
+                                                 year=2012))
 
     def test_month(self):
         begin, end = utils.last_completed_audit_period(unit='month')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=1,
-                                           month=2,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           day=1,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=1,
+                                                   month=2,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(day=1,
+                                                 month=3,
+                                                 year=2012))
 
     def test_month_with_offset_before_current(self):
         begin, end = utils.last_completed_audit_period(unit='month@2')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=2,
-                                           month=2,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           day=2,
-                                           month=3,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=2,
+                                                   month=2,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(day=2,
+                                                 month=3,
+                                                 year=2012))
 
     def test_month_with_offset_after_current(self):
         begin, end = utils.last_completed_audit_period(unit='month@15')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=15,
-                                           month=1,
-                                           year=2012))
-        self.assertEquals(end, datetime.datetime(
-                                           day=15,
-                                           month=2,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=15,
+                                                   month=1,
+                                                   year=2012))
+        self.assertEquals(end, datetime.datetime(day=15,
+                                                 month=2,
+                                                 year=2012))
 
     def test_year(self):
         begin, end = utils.last_completed_audit_period(unit='year')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=1,
-                                           month=1,
-                                           year=2011))
-        self.assertEquals(end, datetime.datetime(
-                                           day=1,
-                                           month=1,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=1,
+                                                   month=1,
+                                                   year=2011))
+        self.assertEquals(end, datetime.datetime(day=1,
+                                                 month=1,
+                                                 year=2012))
 
     def test_year_with_offset_before_current(self):
         begin, end = utils.last_completed_audit_period(unit='year@2')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=1,
-                                           month=2,
-                                           year=2011))
-        self.assertEquals(end, datetime.datetime(
-                                           day=1,
-                                           month=2,
-                                           year=2012))
+        self.assertEquals(begin, datetime.datetime(day=1,
+                                                   month=2,
+                                                   year=2011))
+        self.assertEquals(end, datetime.datetime(day=1,
+                                                 month=2,
+                                                 year=2012))
 
     def test_year_with_offset_after_current(self):
         begin, end = utils.last_completed_audit_period(unit='year@6')
-        self.assertEquals(begin, datetime.datetime(
-                                           day=1,
-                                           month=6,
-                                           year=2010))
-        self.assertEquals(end, datetime.datetime(
-                                           day=1,
-                                           month=6,
-                                           year=2011))
+        self.assertEquals(begin, datetime.datetime(day=1,
+                                                   month=6,
+                                                   year=2010))
+        self.assertEquals(end, datetime.datetime(day=1,
+                                                 month=6,
+                                                 year=2011))
 
 
 class FakeSSHClient(object):
index c7a88c5a3e6c58d4aadcd0af8a3d4a38d334139e..16aca269f441d2605496cf25f760bf81dae2f71e 100644 (file)
@@ -20,40 +20,40 @@ from cinder import version
 
 
 class VersionTestCase(test.TestCase):
-    """Test cases for Versions code"""
+    """Test cases for Versions code."""
     def setUp(self):
-        """setup test with unchanging values"""
+        """Setup test with unchanging values."""
         super(VersionTestCase, self).setUp()
         self.version = version
         self.version.FINAL = False
         self.version.CINDER_VERSION = ['2012', '10']
         self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION
         self.version.version_info = {'branch_nick': u'LOCALBRANCH',
-                                    'revision_id': 'LOCALREVISION',
-                                    'revno': 0}
+                                     'revision_id': 'LOCALREVISION',
+                                     'revno': 0}
 
     def test_version_string_is_good(self):
-        """Ensure version string works"""
+        """Ensure version string works."""
         self.assertEqual("2012.10-dev", self.version.version_string())
 
     def test_canonical_version_string_is_good(self):
-        """Ensure canonical version works"""
+        """Ensure canonical version works."""
         self.assertEqual("2012.10", self.version.canonical_version_string())
 
     def test_final_version_strings_are_identical(self):
-        """Ensure final version strings match only at release"""
+        """Ensure final version strings match only at release."""
         self.assertNotEqual(self.version.canonical_version_string(),
-                        self.version.version_string())
+                            self.version.version_string())
         self.version.FINAL = True
         self.assertEqual(self.version.canonical_version_string(),
-                        self.version.version_string())
+                         self.version.version_string())
 
     def test_vcs_version_string_is_good(self):
-        """Ensure uninstalled code generates local """
+        """Ensure uninstalled code generates local."""
         self.assertEqual("LOCALBRANCH:LOCALREVISION",
-                        self.version.vcs_version_string())
+                         self.version.vcs_version_string())
 
     def test_version_string_with_vcs_is_good(self):
-        """Ensure uninstalled code get version string"""
+        """Ensure uninstalled code get version string."""
         self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
-                        self.version.version_string_with_vcs())
+                         self.version.version_string_with_vcs())
index c5188d8e28bf620c7805efc05a868fae30ac268e..513118aae6272465fb93bdcb6b0aac461419d785 100644 (file)
@@ -201,8 +201,8 @@ class VolumeTestCase(test.TestCase):
         self.volume.create_volume(self.context, volume_id)
 
         self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
-        self.volume.driver.delete_volume(mox.IgnoreArg()) \
-                                              .AndRaise(exception.VolumeIsBusy)
+        self.volume.driver.delete_volume(
+            mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy)
         self.mox.ReplayAll()
         res = self.volume.delete_volume(self.context, volume_id)
         self.assertEqual(True, res)
@@ -226,9 +226,9 @@ class VolumeTestCase(test.TestCase):
                          db.volume_get(
                              context.get_admin_context(),
                              volume_dst['id']).id)
-        self.assertEqual(snapshot_id, db.volume_get(
-                context.get_admin_context(),
-                volume_dst['id']).snapshot_id)
+        self.assertEqual(snapshot_id,
+                         db.volume_get(context.get_admin_context(),
+                                       volume_dst['id']).snapshot_id)
 
         self.volume.delete_volume(self.context, volume_dst['id'])
         self.volume.delete_snapshot(self.context, snapshot_id)
@@ -454,8 +454,8 @@ class VolumeTestCase(test.TestCase):
         self.volume.create_snapshot(self.context, volume_id, snapshot_id)
 
         self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
-        self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
-                                            .AndRaise(exception.SnapshotIsBusy)
+        self.volume.driver.delete_snapshot(
+            mox.IgnoreArg()).AndRaise(exception.SnapshotIsBusy)
         self.mox.ReplayAll()
         self.volume.delete_snapshot(self.context, snapshot_id)
         snapshot_ref = db.snapshot_get(self.context, snapshot_id)
@@ -486,13 +486,14 @@ class VolumeTestCase(test.TestCase):
         image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
         volume_id = 1
         # creating volume testdata
-        db.volume_create(self.context, {'id': volume_id,
-                            'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                            'display_description': 'Test Desc',
-                            'size': 20,
-                            'status': 'creating',
-                            'instance_uuid': None,
-                            'host': 'dummy'})
+        db.volume_create(self.context,
+                         {'id': volume_id,
+                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+                          'display_description': 'Test Desc',
+                          'size': 20,
+                          'status': 'creating',
+                          'instance_uuid': None,
+                          'host': 'dummy'})
         try:
             self.volume.create_volume(self.context,
                                       volume_id,
@@ -526,12 +527,13 @@ class VolumeTestCase(test.TestCase):
         image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
         # creating volume testdata
         volume_id = 1
-        db.volume_create(self.context, {'id': volume_id,
-                             'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                             'display_description': 'Test Desc',
-                             'size': 20,
-                             'status': 'creating',
-                             'host': 'dummy'})
+        db.volume_create(self.context,
+                         {'id': volume_id,
+                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+                          'display_description': 'Test Desc',
+                          'size': 20,
+                          'status': 'creating',
+                          'host': 'dummy'})
 
         self.assertRaises(exception.ImageNotFound,
                           self.volume.create_volume,
@@ -557,19 +559,20 @@ class VolumeTestCase(test.TestCase):
         image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
         # creating volume testdata
         volume_id = 1
-        db.volume_create(self.context, {'id': volume_id,
-                             'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                             'display_description': 'Test Desc',
-                             'size': 20,
-                             'status': 'uploading',
-                             'instance_uuid': None,
-                             'host': 'dummy'})
+        db.volume_create(self.context,
+                         {'id': volume_id,
+                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+                          'display_description': 'Test Desc',
+                          'size': 20,
+                          'status': 'uploading',
+                          'instance_uuid': None,
+                          'host': 'dummy'})
 
         try:
             # start test
             self.volume.copy_volume_to_image(self.context,
-                                                volume_id,
-                                                image_id)
+                                             volume_id,
+                                             image_id)
 
             volume = db.volume_get(self.context, volume_id)
             self.assertEqual(volume['status'], 'available')
@@ -591,21 +594,21 @@ class VolumeTestCase(test.TestCase):
         image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
         # creating volume testdata
         volume_id = 1
-        db.volume_create(self.context,
-                         {'id': volume_id,
-                         'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                         'display_description': 'Test Desc',
-                         'size': 20,
-                         'status': 'uploading',
-                         'instance_uuid':
-                            'b21f957d-a72f-4b93-b5a5-45b1161abb02',
-                         'host': 'dummy'})
+        db.volume_create(
+            self.context,
+            {'id': volume_id,
+             'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+             'display_description': 'Test Desc',
+             'size': 20,
+             'status': 'uploading',
+             'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
+             'host': 'dummy'})
 
         try:
             # start test
             self.volume.copy_volume_to_image(self.context,
-                                                volume_id,
-                                                image_id)
+                                             volume_id,
+                                             image_id)
 
             volume = db.volume_get(self.context, volume_id)
             self.assertEqual(volume['status'], 'in-use')
@@ -626,12 +629,13 @@ class VolumeTestCase(test.TestCase):
         image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
         # creating volume testdata
         volume_id = 1
-        db.volume_create(self.context, {'id': volume_id,
-                             'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
-                             'display_description': 'Test Desc',
-                             'size': 20,
-                             'status': 'in-use',
-                             'host': 'dummy'})
+        db.volume_create(self.context,
+                         {'id': volume_id,
+                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+                          'display_description': 'Test Desc',
+                          'size': 20,
+                          'status': 'in-use',
+                          'host': 'dummy'})
 
         try:
             # start test
@@ -663,7 +667,7 @@ class VolumeTestCase(test.TestCase):
         try:
             volume_id = None
             volume_api = cinder.volume.api.API(
-                                            image_service=_FakeImageService())
+                image_service=_FakeImageService())
             volume = volume_api.create(self.context, 2, 'name', 'description',
                                        image_id=1)
             volume_id = volume['id']
index 773a90f18535859c59fb1072178ed1b34b797d2a..f1a36d2ac0017191df3ac1b2731dad01d31c8891 100644 (file)
@@ -50,12 +50,13 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
                                                         'value1')
         vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1',
                                                         'value1')
-        vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key2',
+        vol_metadata = db.volume_glance_metadata_create(ctxt, 2,
+                                                        'key2',
                                                         'value2')
 
         expected_metadata_1 = {'volume_id': '1',
-                              'key': 'key1',
-                              'value': 'value1'}
+                               'key': 'key1',
+                               'value': 'value1'}
 
         metadata = db.volume_glance_metadata_get(ctxt, 1)
         self.assertEqual(len(metadata), 1)
@@ -106,8 +107,8 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
         db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1)
 
         expected_meta = {'snapshot_id': '100',
-                          'key': 'key1',
-                          'value': 'value1'}
+                         'key': 'key1',
+                         'value': 'value1'}
 
         for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100):
             for (key, value) in expected_meta.items():
index 4cd571f6e1ef84045aeb5d572d57fa16bdb91c6d..9126757e6f55acfbc87d023e6fcfa669d5f6ec0b 100644 (file)
@@ -109,56 +109,56 @@ class VolumeRpcAPITestCase(test.TestCase):
 
     def test_create_volume(self):
         self._test_volume_api('create_volume',
-                            rpc_method='cast',
-                            volume=self.fake_volume,
-                            host='fake_host1',
-                            snapshot_id='fake_snapshot_id',
-                            image_id='fake_image_id')
+                              rpc_method='cast',
+                              volume=self.fake_volume,
+                              host='fake_host1',
+                              snapshot_id='fake_snapshot_id',
+                              image_id='fake_image_id')
 
     def test_delete_volume(self):
         self._test_volume_api('delete_volume',
-                            rpc_method='cast',
-                            volume=self.fake_volume)
+                              rpc_method='cast',
+                              volume=self.fake_volume)
 
     def test_create_snapshot(self):
         self._test_volume_api('create_snapshot',
-                            rpc_method='cast',
-                            volume=self.fake_volume,
-                            snapshot=self.fake_snapshot)
+                              rpc_method='cast',
+                              volume=self.fake_volume,
+                              snapshot=self.fake_snapshot)
 
     def test_delete_snapshot(self):
         self._test_volume_api('delete_snapshot',
-                            rpc_method='cast',
-                            snapshot=self.fake_snapshot,
-                            host='fake_host')
+                              rpc_method='cast',
+                              snapshot=self.fake_snapshot,
+                              host='fake_host')
 
     def test_attach_volume(self):
         self._test_volume_api('attach_volume',
-                            rpc_method='call',
-                            volume=self.fake_volume,
-                            instance_uuid='fake_uuid',
-                            mountpoint='fake_mountpoint')
+                              rpc_method='call',
+                              volume=self.fake_volume,
+                              instance_uuid='fake_uuid',
+                              mountpoint='fake_mountpoint')
 
     def test_detach_volume(self):
         self._test_volume_api('detach_volume',
-                            rpc_method='call',
-                            volume=self.fake_volume)
+                              rpc_method='call',
+                              volume=self.fake_volume)
 
     def test_copy_volume_to_image(self):
         self._test_volume_api('copy_volume_to_image',
-                            rpc_method='cast',
-                            volume=self.fake_volume,
-                            image_id='fake_image_id')
+                              rpc_method='cast',
+                              volume=self.fake_volume,
+                              image_id='fake_image_id')
 
     def test_initialize_connection(self):
         self._test_volume_api('initialize_connection',
-                            rpc_method='call',
-                            volume=self.fake_volume,
-                            connector='fake_connector')
+                              rpc_method='call',
+                              volume=self.fake_volume,
+                              connector='fake_connector')
 
     def test_terminate_connection(self):
         self._test_volume_api('terminate_connection',
-                            rpc_method='call',
-                            volume=self.fake_volume,
-                            connector='fake_connector',
-                            force=False)
+                              rpc_method='call',
+                              volume=self.fake_volume,
+                              connector='fake_connector',
+                              force=False)
index db61bf925ac0cc1f4a5805c8fb7e60cf5de94eb7..3395cc447dd619c5274cfe1f37e5d6ea7ece3335 100644 (file)
@@ -33,21 +33,20 @@ LOG = logging.getLogger(__name__)
 
 
 class VolumeTypeTestCase(test.TestCase):
-    """Test cases for volume type code"""
+    """Test cases for volume type code."""
     def setUp(self):
         super(VolumeTypeTestCase, self).setUp()
 
         self.ctxt = context.get_admin_context()
         self.vol_type1_name = str(int(time.time()))
-        self.vol_type1_specs = dict(
-                    type="physical drive",
-                    drive_type="SAS",
-                    size="300",
-                    rpm="7200",
-                    visible="True")
+        self.vol_type1_specs = dict(type="physical drive",
+                                    drive_type="SAS",
+                                    size="300",
+                                    rpm="7200",
+                                    visible="True")
 
     def test_volume_type_create_then_destroy(self):
-        """Ensure volume types can be created and deleted"""
+        """Ensure volume types can be created and deleted."""
         prev_all_vtypes = volume_types.get_all_types(self.ctxt)
 
         volume_types.create(self.ctxt,
@@ -75,14 +74,14 @@ class VolumeTypeTestCase(test.TestCase):
                          'drive type was not deleted')
 
     def test_get_all_volume_types(self):
-        """Ensures that all volume types can be retrieved"""
+        """Ensures that all volume types can be retrieved."""
         session = sql_session.get_session()
         total_volume_types = session.query(models.VolumeTypes).count()
         vol_types = volume_types.get_all_types(self.ctxt)
         self.assertEqual(total_volume_types, len(vol_types))
 
     def test_get_default_volume_type(self):
-        """ Ensures default volume type can be retrieved """
+        """Ensures default volume type can be retrieved."""
         volume_types.create(self.ctxt,
                             fake_flags.def_vol_type,
                             {})
@@ -91,26 +90,26 @@ class VolumeTypeTestCase(test.TestCase):
                          fake_flags.def_vol_type)
 
     def test_default_volume_type_missing_in_db(self):
-        """ Ensures proper exception raised if default volume type
-        is not in database. """
+        """Ensures proper exception raised if default volume type
+        is not in database."""
         session = sql_session.get_session()
         default_vol_type = volume_types.get_default_volume_type()
         self.assertEqual(default_vol_type, {})
 
     def test_non_existent_vol_type_shouldnt_delete(self):
-        """Ensures that volume type creation fails with invalid args"""
+        """Ensures that volume type creation fails with invalid args."""
         self.assertRaises(exception.VolumeTypeNotFoundByName,
                           volume_types.destroy, self.ctxt, "sfsfsdfdfs")
 
     def test_repeated_vol_types_shouldnt_raise(self):
-        """Ensures that volume duplicates don't raise"""
+        """Ensures that volume duplicates don't raise."""
         new_name = self.vol_type1_name + "dup"
         volume_types.create(self.ctxt, new_name)
         volume_types.destroy(self.ctxt, new_name)
         volume_types.create(self.ctxt, new_name)
 
     def test_invalid_volume_types_params(self):
-        """Ensures that volume type creation fails with invalid args"""
+        """Ensures that volume type creation fails with invalid args."""
         self.assertRaises(exception.InvalidVolumeType,
                           volume_types.destroy, self.ctxt, None)
         self.assertRaises(exception.InvalidVolumeType,
@@ -120,7 +119,7 @@ class VolumeTypeTestCase(test.TestCase):
                           self.ctxt, None)
 
     def test_volume_type_get_by_id_and_name(self):
-        """Ensure volume types get returns same entry"""
+        """Ensure volume types get returns same entry."""
         volume_types.create(self.ctxt,
                             self.vol_type1_name,
                             self.vol_type1_specs)
@@ -131,7 +130,7 @@ class VolumeTypeTestCase(test.TestCase):
         self.assertEqual(new, new2)
 
     def test_volume_type_search_by_extra_spec(self):
-        """Ensure volume types get by extra spec returns correct type"""
+        """Ensure volume types get by extra spec returns correct type."""
         volume_types.create(self.ctxt, "type1", {"key1": "val1",
                                                  "key2": "val2"})
         volume_types.create(self.ctxt, "type2", {"key2": "val2",
@@ -139,29 +138,32 @@ class VolumeTypeTestCase(test.TestCase):
         volume_types.create(self.ctxt, "type3", {"key3": "another_value",
                                                  "key4": "val4"})
 
-        vol_types = volume_types.get_all_types(self.ctxt,
-                        search_opts={'extra_specs': {"key1": "val1"}})
+        vol_types = volume_types.get_all_types(
+            self.ctxt,
+            search_opts={'extra_specs': {"key1": "val1"}})
         LOG.info("vol_types: %s" % vol_types)
         self.assertEqual(len(vol_types), 1)
         self.assertTrue("type1" in vol_types.keys())
         self.assertEqual(vol_types['type1']['extra_specs'],
                          {"key1": "val1", "key2": "val2"})
 
-        vol_types = volume_types.get_all_types(self.ctxt,
-                        search_opts={'extra_specs': {"key2": "val2"}})
+        vol_types = volume_types.get_all_types(
+            self.ctxt,
+            search_opts={'extra_specs': {"key2": "val2"}})
         LOG.info("vol_types: %s" % vol_types)
         self.assertEqual(len(vol_types), 2)
         self.assertTrue("type1" in vol_types.keys())
         self.assertTrue("type2" in vol_types.keys())
 
-        vol_types = volume_types.get_all_types(self.ctxt,
-                        search_opts={'extra_specs': {"key3": "val3"}})
+        vol_types = volume_types.get_all_types(
+            self.ctxt,
+            search_opts={'extra_specs': {"key3": "val3"}})
         LOG.info("vol_types: %s" % vol_types)
         self.assertEqual(len(vol_types), 1)
         self.assertTrue("type2" in vol_types.keys())
 
     def test_volume_type_search_by_extra_spec_multiple(self):
-        """Ensure volume types get by extra spec returns correct type"""
+        """Ensure volume types get by extra spec returns correct type."""
         volume_types.create(self.ctxt, "type1", {"key1": "val1",
                                                  "key2": "val2",
                                                  "key3": "val3"})
@@ -171,9 +173,10 @@ class VolumeTypeTestCase(test.TestCase):
                                                  "key3": "val3",
                                                  "key4": "val4"})
 
-        vol_types = volume_types.get_all_types(self.ctxt,
-                        search_opts={'extra_specs': {"key1": "val1",
-                                                     "key3": "val3"}})
+        vol_types = volume_types.get_all_types(
+            self.ctxt,
+            search_opts={'extra_specs': {"key1": "val1",
+                                         "key3": "val3"}})
         LOG.info("vol_types: %s" % vol_types)
         self.assertEqual(len(vol_types), 2)
         self.assertTrue("type1" in vol_types.keys())
index e7241086fbb47458ea0e94d5ae6e7eb5f9f3976e..af0a332f3ffe50eb8ebfb69cf3772922b1c7c7e4 100644 (file)
@@ -30,8 +30,8 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
         self.context = context.get_admin_context()
         self.vol_type1 = dict(name="TEST: Regular volume test")
         self.vol_type1_specs = dict(vol_extra1="value1",
-                                  vol_extra2="value2",
-                                  vol_extra3=3)
+                                    vol_extra2="value2",
+                                    vol_extra3=3)
         self.vol_type1['extra_specs'] = self.vol_type1_specs
         ref = db.volume_type_create(self.context, self.vol_type1)
         self.volume_type1_id = ref.id
@@ -53,31 +53,31 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
     def test_volume_type_specs_get(self):
         expected_specs = self.vol_type1_specs.copy()
         actual_specs = db.volume_type_extra_specs_get(
-                              context.get_admin_context(),
-                              self.volume_type1_id)
+            context.get_admin_context(),
+            self.volume_type1_id)
         self.assertEquals(expected_specs, actual_specs)
 
     def test_volume_type_extra_specs_delete(self):
         expected_specs = self.vol_type1_specs.copy()
         del expected_specs['vol_extra2']
         db.volume_type_extra_specs_delete(context.get_admin_context(),
-                                      self.volume_type1_id,
-                                      'vol_extra2')
+                                          self.volume_type1_id,
+                                          'vol_extra2')
         actual_specs = db.volume_type_extra_specs_get(
-                              context.get_admin_context(),
-                              self.volume_type1_id)
+            context.get_admin_context(),
+            self.volume_type1_id)
         self.assertEquals(expected_specs, actual_specs)
 
     def test_volume_type_extra_specs_update(self):
         expected_specs = self.vol_type1_specs.copy()
         expected_specs['vol_extra3'] = "4"
         db.volume_type_extra_specs_update_or_create(
-                              context.get_admin_context(),
-                              self.volume_type1_id,
-                              dict(vol_extra3=4))
+            context.get_admin_context(),
+            self.volume_type1_id,
+            dict(vol_extra3=4))
         actual_specs = db.volume_type_extra_specs_get(
-                              context.get_admin_context(),
-                              self.volume_type1_id)
+            context.get_admin_context(),
+            self.volume_type1_id)
         self.assertEquals(expected_specs, actual_specs)
 
     def test_volume_type_extra_specs_create(self):
@@ -85,37 +85,37 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
         expected_specs['vol_extra4'] = 'value4'
         expected_specs['vol_extra5'] = 'value5'
         db.volume_type_extra_specs_update_or_create(
-                              context.get_admin_context(),
-                              self.volume_type1_id,
-                              dict(vol_extra4="value4",
-                                   vol_extra5="value5"))
+            context.get_admin_context(),
+            self.volume_type1_id,
+            dict(vol_extra4="value4",
+                 vol_extra5="value5"))
         actual_specs = db.volume_type_extra_specs_get(
-                              context.get_admin_context(),
-                              self.volume_type1_id)
+            context.get_admin_context(),
+            self.volume_type1_id)
         self.assertEquals(expected_specs, actual_specs)
 
     def test_volume_type_get_with_extra_specs(self):
         volume_type = db.volume_type_get(
-                            context.get_admin_context(),
-                            self.volume_type1_id)
+            context.get_admin_context(),
+            self.volume_type1_id)
         self.assertEquals(volume_type['extra_specs'],
                           self.vol_type1_specs)
 
         volume_type = db.volume_type_get(
-                            context.get_admin_context(),
-                            self.vol_type2_id)
+            context.get_admin_context(),
+            self.vol_type2_id)
         self.assertEquals(volume_type['extra_specs'], {})
 
     def test_volume_type_get_by_name_with_extra_specs(self):
         volume_type = db.volume_type_get_by_name(
-                            context.get_admin_context(),
-                            self.vol_type1['name'])
+            context.get_admin_context(),
+            self.vol_type1['name'])
         self.assertEquals(volume_type['extra_specs'],
                           self.vol_type1_specs)
 
         volume_type = db.volume_type_get_by_name(
-                            context.get_admin_context(),
-                            self.vol_type2_noextra['name'])
+            context.get_admin_context(),
+            self.vol_type2_noextra['name'])
         self.assertEquals(volume_type['extra_specs'], {})
 
     def test_volume_type_get_all(self):
index 9691a5dcc0edee45c28127ba19866b98f94a1733..30242f2a43ae60f8ad4c4cde7551f336af38cf72 100644 (file)
@@ -52,7 +52,7 @@ class UsageInfoTestCase(test.TestCase):
         super(UsageInfoTestCase, self).tearDown()
 
     def _create_volume(self, params={}):
-        """Create a test volume"""
+        """Create a test volume."""
         vol = {}
         vol['snapshot_id'] = self.snapshot_id
         vol['user_id'] = self.user_id
index 97062d3456e5d6aa096ff993bf25f8ddab36ef20..430eb40f4b6512495bc9e98081d330e5e92033b2 100644 (file)
@@ -74,32 +74,34 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
 
     def tearDown(self):
         try:
-            if self._volume_data_2 and \
-            self._wutils.volume_exists(
-                    self._volume_data_2['name']):
+            if (self._volume_data_2 and
+                    self._wutils.volume_exists(self._volume_data_2['name'])):
                 self._wutils.delete_volume(self._volume_data_2['name'])
-            if self._volume_data and \
-            self._wutils.volume_exists(
-                    self._volume_data['name']):
+
+            if (self._volume_data and
+                    self._wutils.volume_exists(
+                        self._volume_data['name'])):
                 self._wutils.delete_volume(self._volume_data['name'])
-            if self._snapshot_data and \
-            self._wutils.snapshot_exists(
-                    self._snapshot_data['name']):
+            if (self._snapshot_data and
+                    self._wutils.snapshot_exists(
+                        self._snapshot_data['name'])):
                 self._wutils.delete_snapshot(self._snapshot_data['name'])
-            if self._connector_data and \
-            self._wutils.initiator_id_exists(
-                    "%s%s" % (FLAGS.iscsi_target_prefix,
-                              self._volume_data['name']),
-                              self._connector_data['initiator']):
+            if (self._connector_data and
+                    self._wutils.initiator_id_exists(
+                        "%s%s" % (FLAGS.iscsi_target_prefix,
+                                  self._volume_data['name']),
+                        self._connector_data['initiator'])):
                 target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
                                         self._volume_data['name'])
                 initiator_name = self._connector_data['initiator']
                 self._wutils.delete_initiator_id(target_name, initiator_name)
-            if self._volume_data and \
-            self._wutils.export_exists("%s%s" % (FLAGS.iscsi_target_prefix,
-                                                 self._volume_data['name'])):
-                self._wutils.delete_export("%s%s" % (FLAGS.iscsi_target_prefix,
-                                                   self._volume_data['name']))
+            if (self._volume_data and
+                    self._wutils.export_exists("%s%s" %
+                                               (FLAGS.iscsi_target_prefix,
+                                                self._volume_data['name']))):
+                self._wutils.delete_export(
+                    "%s%s" % (FLAGS.iscsi_target_prefix,
+                              self._volume_data['name']))
 
         finally:
             super(TestWindowsDriver, self).tearDown()
@@ -178,9 +180,10 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
         retval = self._drv.create_export({}, self._volume_data)
 
         volume_name = self._volume_data['name']
-        self.assertEquals(retval,
-            {'provider_location':
-                "%s%s" % (FLAGS.iscsi_target_prefix, volume_name)})
+        self.assertEquals(
+            retval,
+            {'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
+                                            volume_name)})
 
     def test_initialize_connection(self):
         #Create a volume
index d2a4ab48020cec54b31e61da221b95db1bf07e0e..b4cb11e122c03030ca45162e40237e5076620d28 100644 (file)
@@ -83,9 +83,7 @@ class DriverTestCase(unittest.TestCase):
             size=1, display_name='name', display_description='desc'))
         mock.VerifyAll()
 
-        self.assertEquals(dict(
-                provider_location='sr_uuid/vdi_uuid'
-            ), result)
+        self.assertEquals(dict(provider_location='sr_uuid/vdi_uuid'), result)
 
     def test_delete_volume(self):
         mock = mox.Mox()
index 120dcd227ad476840feed9f859b58f75c9a5aaf3..6484253034ed10a3c96065a462ad1e21610ace27 100644 (file)
@@ -29,15 +29,11 @@ from cinder.volume.drivers import xiv
 FLAGS = flags.FLAGS
 
 FAKE = "fake"
-VOLUME = {
-        'size': 16,
-        'name': FAKE,
-        'id': 1
-        }
+VOLUME = {'size': 16,
+          'name': FAKE,
+          'id': 1}
 
-CONNECTOR = {
-        'initiator': "iqn.2012-07.org.fake:01:948f189c4695",
-        }
+CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
 
 
 class XIVFakeProxyDriver(object):
@@ -82,21 +78,18 @@ class XIVFakeProxyDriver(object):
 
         self.volumes[volume['name']]['attached'] = connector
 
-        return {
-                'driver_volume_type': 'iscsi',
-                'data': {
-                    'target_discovered': True,
-                    'target_portal': self.xiv_portal,
-                    'target_iqn': self.xiv_iqn,
-                    'target_lun': lun_id,
-                    'volume_id': volume['id'],
-                    'multipath': True,
-                    # part of a patch to nova-compute to enable iscsi multipath
-                    'provider_location': "%s,1 %s %s" % (
-                        self.xiv_portal,
-                        self.xiv_iqn,
-                        lun_id),
-                    },
+        return {'driver_volume_type': 'iscsi',
+                'data': {'target_discovered': True,
+                         'target_discovered': True,
+                         'target_portal': self.xiv_portal,
+                         'target_iqn': self.xiv_iqn,
+                         'target_lun': lun_id,
+                         'volume_id': volume['id'],
+                         'multipath': True,
+                         'provider_location': "%s,1 %s %s" % (
+                             self.xiv_portal,
+                             self.xiv_iqn,
+                             lun_id), },
                 }
 
     def terminate_connection(self, volume, connector):
@@ -110,8 +103,8 @@ class XIVFakeProxyDriver(object):
         if not self.volume_exists(volume):
             raise self.exception.VolumeNotFound()
 
-        return self.volumes[volume['name']].get('attached', None) \
-                == connector
+        return (self.volumes[volume['name']].get('attached', None)
+                == connector)
 
 
 class XIVVolumeDriverTest(test.TestCase):
@@ -126,18 +119,14 @@ class XIVVolumeDriverTest(test.TestCase):
     def test_initialized_should_set_xiv_info(self):
         """Test that the san flags are passed to the XIV proxy."""
 
-        self.assertEquals(
-                self.driver.xiv_proxy.xiv_info['xiv_user'],
-                FLAGS.san_login)
-        self.assertEquals(
-                self.driver.xiv_proxy.xiv_info['xiv_pass'],
-                FLAGS.san_password)
-        self.assertEquals(
-                self.driver.xiv_proxy.xiv_info['xiv_address'],
-                FLAGS.san_ip)
-        self.assertEquals(
-                self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
-                FLAGS.san_clustername)
+        self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_user'],
+                          FLAGS.san_login)
+        self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_pass'],
+                          FLAGS.san_password)
+        self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_address'],
+                          FLAGS.san_ip)
+        self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
+                          FLAGS.san_clustername)
 
     def test_setup_should_fail_if_credentials_are_invalid(self):
         """Test that the xiv_proxy validates credentials."""
@@ -186,8 +175,10 @@ class XIVVolumeDriverTest(test.TestCase):
 
         self.driver.do_setup(None)
         self.assertRaises(exception.VolumeBackendAPIException,
-                self.driver.create_volume,
-                    {'name': FAKE, 'id': 1, 'size': 12000})
+                          self.driver.create_volume,
+                          {'name': FAKE,
+                           'id': 1,
+                           'size': 12000})
 
     def test_initialize_connection(self):
         """Test that inititialize connection attaches volume to host."""
@@ -197,7 +188,7 @@ class XIVVolumeDriverTest(test.TestCase):
         self.driver.initialize_connection(VOLUME, CONNECTOR)
 
         self.assertTrue(
-                self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
+            self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
 
         self.driver.terminate_connection(VOLUME, CONNECTOR)
         self.driver.delete_volume(VOLUME)
@@ -207,7 +198,9 @@ class XIVVolumeDriverTest(test.TestCase):
 
         self.driver.do_setup(None)
         self.assertRaises(exception.VolumeNotFound,
-                self.driver.initialize_connection, VOLUME, CONNECTOR)
+                          self.driver.initialize_connection,
+                          VOLUME,
+                          CONNECTOR)
 
     def test_terminate_connection(self):
         """Test terminating a connection."""
@@ -217,10 +210,8 @@ class XIVVolumeDriverTest(test.TestCase):
         self.driver.initialize_connection(VOLUME, CONNECTOR)
         self.driver.terminate_connection(VOLUME, CONNECTOR)
 
-        self.assertFalse(
-                self.driver.xiv_proxy.is_volume_attached(
-                    VOLUME,
-                    CONNECTOR))
+        self.assertFalse(self.driver.xiv_proxy.is_volume_attached(VOLUME,
+                                                                  CONNECTOR))
 
         self.driver.delete_volume(VOLUME)
 
@@ -229,7 +220,9 @@ class XIVVolumeDriverTest(test.TestCase):
 
         self.driver.do_setup(None)
         self.assertRaises(exception.VolumeNotFound,
-                self.driver.terminate_connection, VOLUME, CONNECTOR)
+                          self.driver.terminate_connection,
+                          VOLUME,
+                          CONNECTOR)
 
     def test_terminate_connection_should_fail_on_non_attached_volume(self):
         """Test that terminate won't work for volumes that are not attached."""
@@ -238,6 +231,8 @@ class XIVVolumeDriverTest(test.TestCase):
         self.driver.create_volume(VOLUME)
 
         self.assertRaises(exception.VolumeNotFoundForInstance,
-                self.driver.terminate_connection, VOLUME, CONNECTOR)
+                          self.driver.terminate_connection,
+                          VOLUME,
+                          CONNECTOR)
 
         self.driver.delete_volume(VOLUME)
index 642e0cffaba432f546681139b8383a44fe973fa9..88c3bb73101b8d9509d369bec283f27813c39453 100644 (file)
@@ -105,7 +105,7 @@ class FakeRequest(object):
                        ('/api/vcontrollers.xml', self._list_controllers),
                        ('/api/servers.xml', self._list_servers),
                        ('/api/volumes/*/servers.xml',
-                                    self._list_vol_attachments)]
+                        self._list_vol_attachments)]
                }
 
         ops_list = ops[self.method]
@@ -139,8 +139,8 @@ class FakeRequest(object):
 
     def _login(self):
         params = self._get_parameters(self.body)
-        if params['user'] == RUNTIME_VARS['user'] and\
-           params['password'] == RUNTIME_VARS['password']:
+        if (params['user'] == RUNTIME_VARS['user'] and
+                params['password'] == RUNTIME_VARS['password']):
             return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
         else:
             return RUNTIME_VARS['bad_login']
@@ -246,8 +246,10 @@ class FakeRequest(object):
                     <created-at type='datetime'>2012-01-28...</created-at>
                     <modified-at type='datetime'>2012-01-28...</modified-at>
                 </volume>"""
-        return self._generate_list_resp(header, footer, body,
-                        RUNTIME_VARS['volumes'])
+        return self._generate_list_resp(header,
+                                        footer,
+                                        body,
+                                        RUNTIME_VARS['volumes'])
 
     def _list_controllers(self):
         header = """<show-vcontrollers-response>
@@ -267,8 +269,10 @@ class FakeRequest(object):
                     <chap-username>test_chap_user</chap-username>
                     <chap-target-secret>test_chap_secret</chap-target-secret>
                 </vcontroller>"""
-        return self._generate_list_resp(header, footer, body,
-                        RUNTIME_VARS['controllers'])
+        return self._generate_list_resp(header,
+                                        footer,
+                                        body,
+                                        RUNTIME_VARS['controllers'])
 
     def _list_servers(self):
         header = """<show-servers-response>
@@ -317,7 +321,8 @@ class FakeRequest(object):
                 for server in attachments:
                     srv_params = self._get_server_obj(server)
                     resp += body % (server,
-                        srv_params['display_name'], srv_params['iqn'])
+                                    srv_params['display_name'],
+                                    srv_params['iqn'])
                 resp += footer
                 return resp
 
@@ -353,7 +358,7 @@ class FakeHTTPSConnection(FakeHTTPConnection):
 
 
 class ZadaraVPSADriverTestCase(test.TestCase):
-    """Test case for Zadara VPSA volume driver"""
+    """Test case for Zadara VPSA volume driver."""
 
     def setUp(self):
         LOG.debug('Enter: setUp')
@@ -428,7 +433,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
         self.driver.check_for_setup_error()
 
     def test_volume_attach_detach(self):
-        """Test volume attachment and detach"""
+        """Test volume attachment and detach."""
         volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
         connector = dict(initiator='test_iqn.1')
 
@@ -450,7 +455,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
         self.driver.delete_volume(volume)
 
     def test_volume_attach_multiple_detach(self):
-        """Test multiple volume attachment and detach"""
+        """Test multiple volume attachment and detach."""
         volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
         connector1 = dict(initiator='test_iqn.1')
         connector2 = dict(initiator='test_iqn.2')
@@ -467,7 +472,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
         self.driver.delete_volume(volume)
 
     def test_wrong_attach_params(self):
-        """Test different wrong attach scenarios"""
+        """Test different wrong attach scenarios."""
         volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
         volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
         volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
@@ -480,7 +485,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
                           volume1, connector1)
 
     def test_wrong_detach_params(self):
-        """Test different wrong detachment scenarios"""
+        """Test different wrong detachment scenarios."""
 
         volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
         volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
@@ -505,7 +510,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
                           volume1, connector2)
 
     def test_wrong_login_reply(self):
-        """Test wrong login reply"""
+        """Test wrong login reply."""
 
         RUNTIME_VARS['login'] = """<hash>
                     <access-key>%s</access-key>
@@ -530,13 +535,13 @@ class ZadaraVPSADriverTestCase(test.TestCase):
                           self.driver.do_setup, None)
 
     def test_ssl_use(self):
-        """Coverage test for SSL connection"""
+        """Coverage test for SSL connection."""
         self.flags(zadara_vpsa_use_ssl=True)
         self.driver.do_setup(None)
         self.flags(zadara_vpsa_use_ssl=False)
 
     def test_bad_http_response(self):
-        """Coverage test for non-good HTTP response"""
+        """Coverage test for non-good HTTP response."""
         RUNTIME_VARS['status'] = 400
 
         volume = {'name': 'test_volume_01', 'size': 1}
@@ -544,7 +549,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
                           self.driver.create_volume, volume)
 
     def test_delete_without_detach(self):
-        """Test volume deletion without detach"""
+        """Test volume deletion without detach."""
 
         volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
         connector1 = dict(initiator='test_iqn.1')
index 0ff394ddd7ac10ea30200b42c1fa990f1238dc22..44a9adb6a0c5967cfdfe1ad9dfb5da1cb3027ee5 100644 (file)
@@ -43,9 +43,9 @@ class BaseTestCase(cinder.test.TestCase):
         super(BaseTestCase, self).tearDown()
 
         has_errors = len([test for (test, msgs) in self._currentResult.errors
-            if test.id() == self.id()]) > 0
+                          if test.id() == self.id()]) > 0
         failed = len([test for (test, msgs) in self._currentResult.failures
-            if test.id() == self.id()]) > 0
+                      if test.id() == self.id()]) > 0
 
         if not has_errors and not failed:
             self._save_mock_proxies()
@@ -61,7 +61,7 @@ class BaseTestCase(cinder.test.TestCase):
             test_name = test_name[len(prefix):]
         file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
         return os.path.join(os.path.dirname(mockproxy.__file__),
-                "stubs", file_name)
+                            "stubs", file_name)
 
     def _load_mock(self, name):
         path = self._get_stub_file_path(self.id(), name)
@@ -72,9 +72,9 @@ class BaseTestCase(cinder.test.TestCase):
 
     def _load_mock_or_create_proxy(self, module_name):
         m = None
-        if not gen_test_mocks_key in os.environ or \
-                os.environ[gen_test_mocks_key].lower() \
-                    not in ['true', 'yes', '1']:
+        if (not gen_test_mocks_key in os.environ or
+                os.environ[gen_test_mocks_key].lower()
+                not in ['true', 'yes', '1']):
             m = self._load_mock(module_name)
         else:
             module = __import__(module_name)
index 938b240a8f24d3335af6eac06eb94ea93f9edffb..a93dbc6f9bb296f7412b216751ab35a9b8faee97 100644 (file)
@@ -20,23 +20,17 @@ Stubouts, mocks and fixtures for windows volume test suite
 
 
 def get_fake_volume_info(name):
-    return {
-        'name': name,
-        'size': 1,
-        'provider_location': 'iqn.2010-10.org.openstack:' + name,
-        'id': 1,
-        'provider_auth': None
-        }
+    return {'name': name,
+            'size': 1,
+            'provider_location': 'iqn.2010-10.org.openstack:' + name,
+            'id': 1,
+            'provider_auth': None}
 
 
 def get_fake_snapshot_info(volume_name, snapshot_name):
-    return {
-        'name': snapshot_name,
-        'volume_name': volume_name,
-    }
+    return {'name': snapshot_name,
+            'volume_name': volume_name, }
 
 
 def get_fake_connector_info(initiator):
-    return {
-        'initiator': initiator,
-    }
+    return {'initiator': initiator, }
index ff04ea7095f01971a0a3af33a3a582eddf45b926..a612d470b66ceb53bcb77d70fef1e38be0382b57 100644 (file)
@@ -44,7 +44,7 @@ def serialize_obj(obj):
 
 
 def serialize_args(*args, **kwargs):
-    """Workaround for float string conversion issues in Python 2.6"""
+    """Workaround for float string conversion issues in Python 2.6."""
     return serialize_obj((args, kwargs))
 
 
@@ -113,8 +113,10 @@ class MockProxy(object):
         self._recorded_values = {}
 
     def _get_proxy_object(self, obj):
-        if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
-            isinstance(obj, list) or isinstance(obj, dict):
+        if (hasattr(obj, '__dict__') or
+                isinstance(obj, tuple) or
+                isinstance(obj, list) or
+                isinstance(obj, dict)):
             p = MockProxy(obj)
         else:
             p = obj
@@ -125,8 +127,9 @@ class MockProxy(object):
             return object.__getattribute__(self, name)
         else:
             attr = getattr(self._wrapped, name)
-            if inspect.isfunction(attr) or inspect.ismethod(attr) or \
-                inspect.isbuiltin(attr):
+            if (inspect.isfunction(attr) or
+                    inspect.ismethod(attr) or
+                    inspect.isbuiltin(attr)):
                 def newfunc(*args, **kwargs):
                     result = attr(*args, **kwargs)
                     p = self._get_proxy_object(result)
@@ -134,8 +137,9 @@ class MockProxy(object):
                     self._add_recorded_ret_value(name, params, p)
                     return p
                 return newfunc
-            elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
-                and not (isinstance(attr, str) or isinstance(attr, unicode))):
+            elif (hasattr(attr, '__dict__') or
+                  (hasattr(attr, '__getitem__') and not
+                  (isinstance(attr, str) or isinstance(attr, unicode)))):
                 p = MockProxy(attr)
             else:
                 p = attr
index 3da6294cb52e63afc4ea9e2996cc62777dac41c9..4ef3212ebb465f660343432e99db7418735b3111 100644 (file)
@@ -48,13 +48,13 @@ class WindowsUtils(object):
         return self.__conn_wmi
 
     def find_vhd_by_name(self, name):
-        ''' Finds a volume by its name.'''
+        '''Finds a volume by its name.'''
 
         wt_disks = self._conn_wmi.WT_Disk(Description=name)
         return wt_disks
 
     def volume_exists(self, name):
-        ''' Checks if a volume exists.'''
+        '''Checks if a volume exists.'''
 
         wt_disks = self.find_vhd_by_name(name)
         if len(wt_disks) > 0:
@@ -62,7 +62,7 @@ class WindowsUtils(object):
         return False
 
     def snapshot_exists(self, name):
-        ''' Checks if a snapshot exists.'''
+        '''Checks if a snapshot exists.'''
 
         wt_snapshots = self.find_snapshot_by_name(name)
         if len(wt_snapshots) > 0:
@@ -70,47 +70,47 @@ class WindowsUtils(object):
         return False
 
     def find_snapshot_by_name(self, name):
-        ''' Finds a snapshot by its name.'''
+        '''Finds a snapshot by its name.'''
 
         wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name)
         return wt_snapshots
 
     def delete_volume(self, name):
-        ''' Deletes a volume.'''
+        '''Deletes a volume.'''
 
         wt_disk = self._conn_wmi.WT_Disk(Description=name)[0]
         wt_disk.Delete_()
         vhdfiles = self._conn_cimv2.query(
-        "Select * from CIM_DataFile where Name = '" +
-        self._get_vhd_path(name) + "'")
+            "Select * from CIM_DataFile where Name = '" +
+            self._get_vhd_path(name) + "'")
         if len(vhdfiles) > 0:
             vhdfiles[0].Delete()
 
     def _get_vhd_path(self, volume_name):
-        ''' Gets the path disk of the volume'''
+        '''Gets the path disk of the volume.'''
 
         base_vhd_folder = FLAGS.windows_iscsi_lun_path
         return os.path.join(base_vhd_folder, volume_name + ".vhd")
 
     def delete_snapshot(self, name):
-        ''' Deletes a snapshot.'''
+        '''Deletes a snapshot.'''
 
         wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0]
         wt_snapshot.Delete_()
         vhdfile = self._conn_cimv2.query(
-        "Select * from CIM_DataFile where Name = '" +
-        self._get_vhd_path(name) + "'")[0]
+            "Select * from CIM_DataFile where Name = '" +
+            self._get_vhd_path(name) + "'")[0]
         vhdfile.Delete()
 
     def find_initiator_ids(self, target_name, initiator_name):
-        ''' Finds a initiator id by its name.'''
+        '''Finds a initiator id by its name.'''
         wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name,
                                                  Method=4,
                                                  Value=initiator_name)
         return wt_idmethod
 
     def initiator_id_exists(self, target_name, initiator_name):
-        ''' Checks if  a initiatorId exists.'''
+        '''Checks if  a initiatorId exists.'''
 
         wt_idmethod = self.find_initiator_ids(target_name, initiator_name)
         if len(wt_idmethod) > 0:
@@ -118,13 +118,13 @@ class WindowsUtils(object):
         return False
 
     def find_exports(self, target_name):
-        ''' Finds a export id by its name.'''
+        '''Finds a export id by its name.'''
 
         wt_host = self._conn_wmi.WT_Host(HostName=target_name)
         return wt_host
 
     def export_exists(self, target_name):
-        ''' Checks if  a export exists.'''
+        '''Checks if  a export exists.'''
 
         wt_host = self.find_exports(target_name)
         if len(wt_host) > 0:
@@ -132,13 +132,13 @@ class WindowsUtils(object):
         return False
 
     def delete_initiator_id(self, target_name, initiator_name):
-        ''' Deletes a initiatorId.'''
+        '''Deletes a initiatorId.'''
 
         wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0]
         wt_init_id.Delete_()
 
     def delete_export(self, target_name):
-        ''' Deletes an export.'''
+        '''Deletes an export.'''
 
         wt_host = self.find_exports(target_name)[0]
         wt_host.RemoveAllWTDisks()
index f9fec02607f7692f529f02adf6c5c8d25fde5af8..9a789bb8d6310236cfb5d5d70d926e8b0bfd1aa1 100644 (file)
@@ -181,10 +181,10 @@ def execute(*cmd, **kwargs):
                 if not ignore_exit_code and _returncode not in check_exit_code:
                     (stdout, stderr) = result
                     raise exception.ProcessExecutionError(
-                            exit_code=_returncode,
-                            stdout=stdout,
-                            stderr=stderr,
-                            cmd=' '.join(cmd))
+                        exit_code=_returncode,
+                        stdout=stdout,
+                        stderr=stderr,
+                        cmd=' '.join(cmd))
             return result
         except exception.ProcessExecutionError:
             if not attempts:
@@ -439,9 +439,9 @@ def last_completed_audit_period(unit=None):
 
     elif unit == 'day':
         end = datetime.datetime(hour=offset,
-                               day=rightnow.day,
-                               month=rightnow.month,
-                               year=rightnow.year)
+                                day=rightnow.day,
+                                month=rightnow.month,
+                                year=rightnow.year)
         if end >= rightnow:
             end = end - datetime.timedelta(days=1)
         begin = end - datetime.timedelta(days=1)
@@ -766,10 +766,10 @@ def bool_from_str(val):
 def is_valid_boolstr(val):
     """Check if the provided string is a valid bool string or not. """
     val = str(val).lower()
-    return val == 'true' or val == 'false' or \
-           val == 'yes' or val == 'no' or \
-           val == 'y' or val == 'n' or \
-           val == '1' or val == '0'
+    return (val == 'true' or val == 'false' or
+            val == 'yes' or val == 'no' or
+            val == 'y' or val == 'n' or
+            val == '1' or val == '0')
 
 
 def is_valid_ipv4(address):
@@ -820,13 +820,14 @@ def monkey_patch():
             if isinstance(module_data[key], pyclbr.Class):
                 clz = importutils.import_class("%s.%s" % (module, key))
                 for method, func in inspect.getmembers(clz, inspect.ismethod):
-                    setattr(clz, method,
+                    setattr(
+                        clz, method,
                         decorator("%s.%s.%s" % (module, key, method), func))
             # set the decorator for the function
             if isinstance(module_data[key], pyclbr.Function):
                 func = importutils.import_class("%s.%s" % (module, key))
                 setattr(sys.modules[module], key,
-                    decorator("%s.%s" % (module, key), func))
+                        decorator("%s.%s" % (module, key), func))
 
 
 def convert_to_list_dict(lst, label):
index 18c341b775d6529b44eb8c67c2180c01cfd76495..9c6d5b929274bb9b7f1ad40130ec51ae6fc9f773 100644 (file)
@@ -38,8 +38,9 @@ from cinder.volume import rpcapi as volume_rpcapi
 from cinder.volume import volume_types
 
 volume_host_opt = cfg.BoolOpt('snapshot_same_host',
-        default=True,
-        help='Create volume from snapshot at the host where snapshot resides')
+                              default=True,
+                              help='Create volume from snapshot at the host '
+                                   'where snapshot resides')
 
 FLAGS = flags.FLAGS
 FLAGS.register_opt(volume_host_opt)
@@ -85,8 +86,8 @@ class API(base.Base):
         super(API, self).__init__(db_driver)
 
     def create(self, context, size, name, description, snapshot=None,
-                image_id=None, volume_type=None, metadata=None,
-                availability_zone=None):
+               image_id=None, volume_type=None, metadata=None,
+               availability_zone=None):
         check_policy(context, 'create')
         if snapshot is not None:
             if snapshot['status'] != "available":
@@ -143,8 +144,8 @@ class API(base.Base):
             elif 'volumes' in overs:
                 consumed = _consumed('volumes')
                 LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
-                           "volume (%(consumed)d volumes already consumed)")
-                           % locals())
+                           "volume (%(consumed)d volumes "
+                           "already consumed)") % locals())
                 raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
 
         if availability_zone is None:
@@ -155,19 +156,17 @@ class API(base.Base):
 
         volume_type_id = volume_type.get('id')
 
-        options = {
-            'size': size,
-            'user_id': context.user_id,
-            'project_id': context.project_id,
-            'snapshot_id': snapshot_id,
-            'availability_zone': availability_zone,
-            'status': "creating",
-            'attach_status': "detached",
-            'display_name': name,
-            'display_description': description,
-            'volume_type_id': volume_type_id,
-            'metadata': metadata,
-            }
+        options = {'size': size,
+                   'user_id': context.user_id,
+                   'project_id': context.project_id,
+                   'snapshot_id': snapshot_id,
+                   'availability_zone': availability_zone,
+                   'status': "creating",
+                   'attach_status': "detached",
+                   'display_name': name,
+                   'display_description': description,
+                   'volume_type_id': volume_type_id,
+                   'metadata': metadata, }
 
         try:
             volume = self.db.volume_create(context, options)
@@ -179,13 +178,11 @@ class API(base.Base):
                 finally:
                     QUOTAS.rollback(context, reservations)
 
-        request_spec = {
-            'volume_properties': options,
-            'volume_type': volume_type,
-            'volume_id': volume['id'],
-            'snapshot_id': volume['snapshot_id'],
-            'image_id': image_id
-        }
+        request_spec = {'volume_properties': options,
+                        'volume_type': volume_type,
+                        'volume_id': volume['id'],
+                        'snapshot_id': volume['snapshot_id'],
+                        'image_id': image_id}
 
         filter_properties = {}
 
@@ -213,18 +210,19 @@ class API(base.Base):
 
             # bypass scheduler and send request directly to volume
             self.volume_rpcapi.create_volume(context,
-                                            volume_ref,
-                                            volume_ref['host'],
-                                            snapshot_id,
-                                            image_id)
+                                             volume_ref,
+                                             volume_ref['host'],
+                                             snapshot_id,
+                                             image_id)
         else:
-            self.scheduler_rpcapi.create_volume(context,
-                                    FLAGS.volume_topic,
-                                    volume_id,
-                                    snapshot_id,
-                                    image_id,
-                                    request_spec=request_spec,
-                                    filter_properties=filter_properties)
+            self.scheduler_rpcapi.create_volume(
+                context,
+                FLAGS.volume_topic,
+                volume_id,
+                snapshot_id,
+                image_id,
+                request_spec=request_spec,
+                filter_properties=filter_properties)
 
     @wrap_check_policy
     def delete(self, context, volume, force=False):
@@ -292,7 +290,7 @@ class API(base.Base):
 
                 for k, v in searchdict.iteritems():
                     if (k not in volume_metadata.keys() or
-                        volume_metadata[k] != v):
+                            volume_metadata[k] != v):
                         return False
                 return True
 
@@ -386,9 +384,9 @@ class API(base.Base):
     @wrap_check_policy
     def attach(self, context, volume, instance_uuid, mountpoint):
         return self.volume_rpcapi.attach_volume(context,
-                                        volume,
-                                        instance_uuid,
-                                        mountpoint)
+                                                volume,
+                                                instance_uuid,
+                                                mountpoint)
 
     @wrap_check_policy
     def detach(self, context, volume):
@@ -397,16 +395,16 @@ class API(base.Base):
     @wrap_check_policy
     def initialize_connection(self, context, volume, connector):
         return self.volume_rpcapi.initialize_connection(context,
-                                                volume,
-                                                connector)
+                                                        volume,
+                                                        connector)
 
     @wrap_check_policy
     def terminate_connection(self, context, volume, connector, force=False):
         self.unreserve_volume(context, volume)
         return self.volume_rpcapi.terminate_connection(context,
-                                                volume,
-                                                connector,
-                                                force)
+                                                       volume,
+                                                       connector,
+                                                       force)
 
     def _create_snapshot(self, context, volume, name, description,
                          force=False):
@@ -416,15 +414,14 @@ class API(base.Base):
             msg = _("must be available")
             raise exception.InvalidVolume(reason=msg)
 
-        options = {
-            'volume_id': volume['id'],
-            'user_id': context.user_id,
-            'project_id': context.project_id,
-            'status': "creating",
-            'progress': '0%',
-            'volume_size': volume['size'],
-            'display_name': name,
-            'display_description': description}
+        options = {'volume_id': volume['id'],
+                   'user_id': context.user_id,
+                   'project_id': context.project_id,
+                   'status': "creating",
+                   'progress': '0%',
+                   'volume_size': volume['size'],
+                   'display_name': name,
+                   'display_description': description}
 
         snapshot = self.db.snapshot_create(context, options)
         self.volume_rpcapi.create_snapshot(context, volume, snapshot)
@@ -506,20 +503,20 @@ class API(base.Base):
 
         recv_metadata = self.image_service.create(context, metadata)
         self.update(context, volume, {'status': 'uploading'})
-        self.volume_rpcapi.copy_volume_to_image(context, volume,
-                                            recv_metadata['id'])
+        self.volume_rpcapi.copy_volume_to_image(context,
+                                                volume,
+                                                recv_metadata['id'])
 
         response = {"id": volume['id'],
-               "updated_at": volume['updated_at'],
-               "status": 'uploading',
-               "display_description": volume['display_description'],
-               "size": volume['size'],
-               "volume_type": volume['volume_type'],
-               "image_id": recv_metadata['id'],
-               "container_format": recv_metadata['container_format'],
-               "disk_format": recv_metadata['disk_format'],
-               "image_name": recv_metadata.get('name', None)
-        }
+                    "updated_at": volume['updated_at'],
+                    "status": 'uploading',
+                    "display_description": volume['display_description'],
+                    "size": volume['size'],
+                    "volume_type": volume['volume_type'],
+                    "image_id": recv_metadata['id'],
+                    "container_format": recv_metadata['container_format'],
+                    "disk_format": recv_metadata['disk_format'],
+                    "image_name": recv_metadata.get('name', None)}
         return response
 
 
index 0dacacfcd016250517d3a38697073674c4a9c3fa..547a53fad953e69f16c43f31f8609ce4f8cb5121 100644 (file)
@@ -56,8 +56,7 @@ volume_opts = [
                help='use this ip for iscsi'),
     cfg.IntOpt('iscsi_port',
                default=3260,
-               help='The port that the iSCSI daemon is listening on'),
-    ]
+               help='The port that the iSCSI daemon is listening on'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(volume_opts)
@@ -93,11 +92,11 @@ class VolumeDriver(object):
     def check_for_setup_error(self):
         """Returns an error if prerequisites aren't met"""
         out, err = self._execute('vgs', '--noheadings', '-o', 'name',
-                                run_as_root=True)
+                                 run_as_root=True)
         volume_groups = out.split()
         if not FLAGS.volume_group in volume_groups:
             exception_message = (_("volume group %s doesn't exist")
-                                  % FLAGS.volume_group)
+                                 % FLAGS.volume_group)
             raise exception.VolumeBackendAPIException(data=exception_message)
 
     def _create_volume(self, volume_name, sizestr):
@@ -307,8 +306,9 @@ class ISCSIDriver(VolumeDriver):
         # cooresponding target admin class
         if not isinstance(self.tgtadm, iscsi.TgtAdm):
             try:
-                iscsi_target = self.db.volume_get_iscsi_target_num(context,
-                                                               volume['id'])
+                iscsi_target = self.db.volume_get_iscsi_target_num(
+                    context,
+                    volume['id'])
             except exception.NotFound:
                 LOG.info(_("Skipping ensure_export. No iscsi_target "
                            "provisioned for volume: %s"), volume['id'])
@@ -320,7 +320,7 @@ class ISCSIDriver(VolumeDriver):
         old_name = None
         volume_name = volume['name']
         if (volume['provider_location'] is not None and
-            volume['name'] not in volume['provider_location']):
+                volume['name'] not in volume['provider_location']):
 
             msg = _('Detected inconsistency in provider_location id')
             LOG.debug(msg)
@@ -443,8 +443,9 @@ class ISCSIDriver(VolumeDriver):
         # cooresponding target admin class
         if not isinstance(self.tgtadm, iscsi.TgtAdm):
             try:
-                iscsi_target = self.db.volume_get_iscsi_target_num(context,
-                                                               volume['id'])
+                iscsi_target = self.db.volume_get_iscsi_target_num(
+                    context,
+                    volume['id'])
             except exception.NotFound:
                 LOG.info(_("Skipping remove_export. No iscsi_target "
                            "provisioned for volume: %s"), volume['id'])
index 1a2e489e420c6faf8c9a704af5aaa3f5f3bceb24..a6220ed88738e5f417fbc7664cc49d1fa08d641c 100644 (file)
@@ -57,15 +57,14 @@ netapp_opts = [
     cfg.StrOpt('netapp_storage_service',
                default=None,
                help=('Storage service to use for provisioning '
-                    '(when volume_type=None)')),
+                     '(when volume_type=None)')),
     cfg.StrOpt('netapp_storage_service_prefix',
                default=None,
                help=('Prefix of storage service name to use for '
-                    'provisioning (volume_type name will be appended)')),
+                     'provisioning (volume_type name will be appended)')),
     cfg.StrOpt('netapp_vfiler',
                default=None,
-               help='Vfiler to use for provisioning'),
-    ]
+               help='Vfiler to use for provisioning'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(netapp_opts)
@@ -148,15 +147,17 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
     def _check_flags(self):
         """Ensure that the flags we care about are set."""
         required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
-                'netapp_server_hostname', 'netapp_server_port']
+                          'netapp_server_hostname', 'netapp_server_port']
         for flag in required_flags:
             if not getattr(FLAGS, flag, None):
                 raise exception.InvalidInput(reason=_('%s is not set') % flag)
         if not (FLAGS.netapp_storage_service or
                 FLAGS.netapp_storage_service_prefix):
-            raise exception.InvalidInput(reason=_('Either '
-                'netapp_storage_service or netapp_storage_service_prefix must '
-                'be set'))
+            raise exception.InvalidInput(
+                reason=_('Either '
+                         'netapp_storage_service or '
+                         'netapp_storage_service_prefix must '
+                         'be set'))
 
     def do_setup(self, context):
         """Setup the NetApp Volume driver.
@@ -166,7 +167,8 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         client.
         """
         self._check_flags()
-        self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
+        self._create_client(
+            wsdl_url=FLAGS.netapp_wsdl_url,
             login=FLAGS.netapp_login, password=FLAGS.netapp_password,
             hostname=FLAGS.netapp_server_hostname,
             port=FLAGS.netapp_server_port, cache=True)
@@ -204,10 +206,10 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         """Discover all of the LUNs in a dataset."""
         server = self.client.service
         res = server.DatasetMemberListInfoIterStart(
-                DatasetNameOrId=dataset.id,
-                IncludeExportsInfo=True,
-                IncludeIndirect=True,
-                MemberType='lun_path')
+            DatasetNameOrId=dataset.id,
+            IncludeExportsInfo=True,
+            IncludeIndirect=True,
+            MemberType='lun_path')
         tag = res.Tag
         suffix = None
         if volume:
@@ -217,7 +219,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
                 res = server.DatasetMemberListInfoIterNext(Tag=tag,
                                                            Maximum=100)
                 if (not hasattr(res, 'DatasetMembers') or
-                            not res.DatasetMembers):
+                        not res.DatasetMembers):
                     break
                 for member in res.DatasetMembers.DatasetMemberInfo:
                     if suffix and not member.MemberName.endswith(suffix):
@@ -324,11 +326,11 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         """
         if ss_type and not self.storage_service_prefix:
             msg = _('Attempt to use volume_type without specifying '
-                'netapp_storage_service_prefix flag.')
+                    'netapp_storage_service_prefix flag.')
             raise exception.VolumeBackendAPIException(data=msg)
         if not (ss_type or self.storage_service):
             msg = _('You must set the netapp_storage_service flag in order to '
-                'create volumes with no volume_type.')
+                    'create volumes with no volume_type.')
             raise exception.VolumeBackendAPIException(data=msg)
         storage_service = self.storage_service
         if ss_type:
@@ -358,11 +360,11 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         metadata.DfmMetadataField = [field1, field2]
 
         res = self.client.service.StorageServiceDatasetProvision(
-                StorageServiceNameOrId=storage_service,
-                DatasetName=dataset_name,
-                AssumeConfirmation=True,
-                StorageSetDetails=details,
-                DatasetMetadata=metadata)
+            StorageServiceNameOrId=storage_service,
+            DatasetName=dataset_name,
+            AssumeConfirmation=True,
+            StorageSetDetails=details,
+            DatasetMetadata=metadata)
 
         ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type)
         self.discovered_datasets.append(ds)
@@ -627,7 +629,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         igroup_infos = igroups[0]['initiator-group-info']
         for igroup_info in igroup_infos:
             if ('iscsi' != igroup_info['initiator-group-type'][0] or
-                'linux' != igroup_info['initiator-group-os-type'][0]):
+                    'linux' != igroup_info['initiator-group-os-type'][0]):
                 continue
             igroup_name = igroup_info['initiator-group-name'][0]
             if not igroup_name.startswith(self.IGROUP_PREFIX):
@@ -678,7 +680,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         request.Name = 'lun-map-list-info'
         request.Args = text.Raw('<path>%s</path>' % (lunpath))
         response = self.client.service.ApiProxy(Target=host_id,
-                                                 Request=request)
+                                                Request=request)
         self._check_fail(request, response)
         igroups = response.Results['initiator-groups']
         if self._api_elem_is_empty(igroups):
@@ -830,7 +832,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
             '<volume-uuid>%s</volume-uuid>'
             '</clone-id-info></clone-id>')
         request.Args = text.Raw(clone_list_status_xml % (clone_op_id,
-                                                          volume_uuid))
+                                                         volume_uuid))
         response = self.client.service.ApiProxy(Target=host_id,
                                                 Request=request)
         self._check_fail(request, response)
@@ -856,7 +858,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         else:
             no_snap = 'true'
         request.Args = text.Raw(clone_start_xml % (src_path, no_snap,
-                                                    dest_path))
+                                                   dest_path))
         response = self.client.service.ApiProxy(Target=host_id,
                                                 Request=request)
         self._check_fail(request, response)
@@ -966,7 +968,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         snap_size = snapshot['volume_size']
         if vol_size != snap_size:
             msg = _('Cannot create volume of size %(vol_size)s from '
-                'snapshot of size %(snap_size)s')
+                    'snapshot of size %(snap_size)s')
             raise exception.VolumeBackendAPIException(data=msg % locals())
         vol_name = snapshot['volume_name']
         snapshot_name = snapshot['name']
@@ -978,7 +980,7 @@ class NetAppISCSIDriver(driver.ISCSIDriver):
         new_type = self._get_ss_type(volume)
         if new_type != old_type:
             msg = _('Cannot create volume of type %(new_type)s from '
-                'snapshot of type %(old_type)s')
+                    'snapshot of type %(old_type)s')
             raise exception.VolumeBackendAPIException(data=msg % locals())
         lun = self._get_lun_details(lun_id)
         extra_gb = vol_size
@@ -1039,7 +1041,7 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
     def _check_flags(self):
         """Ensure that the flags we care about are set."""
         required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
-                'netapp_server_hostname', 'netapp_server_port']
+                          'netapp_server_hostname', 'netapp_server_port']
         for flag in required_flags:
             if not getattr(FLAGS, flag, None):
                 msg = _('%s is not set') % flag
@@ -1053,7 +1055,8 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
         client.
         """
         self._check_flags()
-        self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
+        self._create_client(
+            wsdl_url=FLAGS.netapp_wsdl_url,
             login=FLAGS.netapp_login, password=FLAGS.netapp_password,
             hostname=FLAGS.netapp_server_hostname,
             port=FLAGS.netapp_server_port, cache=True)
@@ -1069,8 +1072,10 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
             meta_dict = {}
             if hasattr(lun, 'Metadata'):
                 meta_dict = self._create_dict_from_meta(lun.Metadata)
-            discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
-                meta_dict)
+            discovered_lun = NetAppLun(lun.Handle,
+                                       lun.Name,
+                                       lun.Size,
+                                       meta_dict)
             self._add_lun_to_table(discovered_lun)
         LOG.debug(_("Success getting LUN list from server"))
 
@@ -1095,8 +1100,11 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
         lun = server.ProvisionLun(Name=name, Size=size,
                                   Metadata=metadata)
         LOG.debug(_("Created LUN with name %s") % name)
-        self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
-             lun.Size, self._create_dict_from_meta(lun.Metadata)))
+        self._add_lun_to_table(
+            NetAppLun(lun.Handle,
+                      lun.Name,
+                      lun.Size,
+                      self._create_dict_from_meta(lun.Metadata)))
 
     def delete_volume(self, volume):
         """Driver entry point for destroying existing volumes."""
@@ -1143,8 +1151,10 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
         msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
         LOG.debug(msg % locals())
 
-        target_details_list = server.GetLunTargetDetails(Handle=handle,
-                InitiatorType="iscsi", InitiatorName=initiator_name)
+        target_details_list = server.GetLunTargetDetails(
+            Handle=handle,
+            InitiatorType="iscsi",
+            InitiatorName=initiator_name)
         msg = _("Succesfully fetched target details for LUN %(handle)s and "
                 "initiator %(initiator_name)s")
         LOG.debug(msg % locals())
@@ -1255,8 +1265,11 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
         lun = server.CloneLun(Handle=handle, NewName=new_name,
                               Metadata=metadata)
         LOG.debug(_("Cloned LUN with new name %s") % new_name)
-        self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
-             lun.Size, self._create_dict_from_meta(lun.Metadata)))
+        self._add_lun_to_table(
+            NetAppLun(lun.Handle,
+                      lun.Name,
+                      lun.Size,
+                      self._create_dict_from_meta(lun.Metadata)))
 
     def _create_metadata_list(self, extra_args):
         """Creates metadata from kwargs."""
index 4a1e41b89b0f5cbab0929e04db464ecad4b7660a..37880d25abaf30fdd68a2a472c202a22800348ac 100644 (file)
@@ -35,8 +35,7 @@ LOG = logging.getLogger(__name__)
 netapp_nfs_opts = [
     cfg.IntOpt('synchronous_snapshot_create',
                default=0,
-               help='Does snapshot creation call returns immediately')
-    ]
+               help='Does snapshot creation call returns immediately')]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(netapp_opts)
@@ -71,7 +70,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
 
         if vol_size != snap_size:
             msg = _('Cannot create volume of size %(vol_size)s from '
-                'snapshot of size %(snap_size)s')
+                    'snapshot of size %(snap_size)s')
             raise exception.CinderException(msg % locals())
 
         self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
@@ -114,9 +113,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
         client = suds.client.Client(FLAGS.netapp_wsdl_url,
                                     username=FLAGS.netapp_login,
                                     password=FLAGS.netapp_password)
-        soap_url = 'http://%s:%s/apis/soap/v1' % (
-                                          FLAGS.netapp_server_hostname,
-                                          FLAGS.netapp_server_port)
+        soap_url = 'http://%s:%s/apis/soap/v1' % (FLAGS.netapp_server_hostname,
+                                                  FLAGS.netapp_server_port)
         client.set_options(location=soap_url)
 
         return client
@@ -144,7 +142,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
                                                     clone_name))
 
         resp = self._client.service.ApiProxy(Target=host_id,
-                                            Request=request)
+                                             Request=request)
 
         if resp.Status == 'passed' and FLAGS.synchronous_snapshot_create:
             clone_id = resp.Results['clone-id'][0]
@@ -161,10 +159,10 @@ class NetAppNFSDriver(nfs.NfsDriver):
         :param clone_operation_id: Identifier of ONTAP clone operation
         """
         clone_list_options = ('<clone-id>'
-                                '<clone-id-info>'
-                                  '<clone-op-id>%d</clone-op-id>'
-                                  '<volume-uuid></volume-uuid>'
-                                '</clone-id>'
+                              '<clone-id-info>'
+                              '<clone-op-id>%d</clone-op-id>'
+                              '<volume-uuid></volume-uuid>'
+                              '</clone-id>'
                               '</clone-id-info>')
 
         request = self._client.factory.create('Request')
@@ -176,7 +174,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
         while resp.Status != 'passed':
             time.sleep(1)
             resp = self._client.service.ApiProxy(Target=host_id,
-                                                Request=request)
+                                                 Request=request)
 
     def _get_provider_location(self, volume_id):
         """
@@ -219,7 +217,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
         request.Args = text.Raw(command_args % export_path)
 
         resp = self._client.service.ApiProxy(Target=host_id,
-                                            Request=request)
+                                             Request=request)
 
         if resp.Status == 'passed':
             return resp.Results['actual-pathname'][0]
index 2def096a28097afdf846f6bd8a0f2a047d2f69d1..ddeb5bdd877ffa0fe55f23650f0066fbcfe8d974 100644 (file)
@@ -56,8 +56,8 @@ class NexentaJSONProxy(object):
 
     def __call__(self, *args):
         data = jsonutils.dumps({'object': self.obj,
-                           'method': self.method,
-                           'params': args})
+                                'method': self.method,
+                                'params': args})
         auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1]
         headers = {'Content-Type': 'application/json',
                    'Authorization': 'Basic %s' % (auth,)}
@@ -67,7 +67,7 @@ class NexentaJSONProxy(object):
         if response_obj.info().status == 'EOF in headers':
             if self.auto and self.url.startswith('http://'):
                 LOG.info(_('Auto switching to HTTPS connection to %s'),
-                                                                      self.url)
+                         self.url)
                 self.url = 'https' + self.url[4:]
                 request = urllib2.Request(self.url, data, headers)
                 response_obj = urllib2.urlopen(request)
index cf810f9e0f1fae486704d8bc6b0a771d293bcf11..21d9cec41cd4a56ced8512c697310e995e0a7e77 100644 (file)
@@ -35,8 +35,8 @@ FLAGS = flags.FLAGS
 
 nexenta_opts = [
     cfg.StrOpt('nexenta_host',
-              default='',
-              help='IP address of Nexenta SA'),
+               default='',
+               help='IP address of Nexenta SA'),
     cfg.IntOpt('nexenta_rest_port',
                default=2000,
                help='HTTP port to connect to Nexenta REST API server'),
@@ -94,7 +94,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         """
         if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
             raise LookupError(_("Volume %s does not exist in Nexenta SA"),
-                                    FLAGS.nexenta_volume)
+                              FLAGS.nexenta_volume)
 
     @staticmethod
     def _get_zvol_name(volume_name):
@@ -198,7 +198,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 raise
             else:
                 LOG.info(_('Ignored target creation error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         try:
             self.nms.stmf.create_targetgroup(target_group_name)
         except nexenta.NexentaException as exc:
@@ -206,7 +206,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 raise
             else:
                 LOG.info(_('Ignored target group creation error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         try:
             self.nms.stmf.add_targetgroup_member(target_group_name,
                                                  target_name)
@@ -215,7 +215,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 raise
             else:
                 LOG.info(_('Ignored target group member addition error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         try:
             self.nms.scsidisk.create_lu(zvol_name, {})
         except nexenta.NexentaException as exc:
@@ -223,7 +223,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 raise
             else:
                 LOG.info(_('Ignored LU creation error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         try:
             self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
                 'target_group': target_group_name,
@@ -233,7 +233,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 raise
             else:
                 LOG.info(_('Ignored LUN mapping entry addition error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         return '%s:%s,1 %s' % (FLAGS.nexenta_host,
                                FLAGS.nexenta_iscsi_target_portal_port,
                                target_name)
@@ -269,12 +269,13 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         except nexenta.NexentaException as exc:
             # We assume that target group is already gone
             LOG.warn(_('Got error trying to destroy target group'
-                ' %(target_group)s, assuming it is already gone: %(exc)s'),
-                {'target_group': target_group_name, 'exc': exc})
+                       ' %(target_group)s, assuming it is '
+                       'already gone: %(exc)s'),
+                     {'target_group': target_group_name, 'exc': exc})
         try:
             self.nms.iscsitarget.delete_target(target_name)
         except nexenta.NexentaException as exc:
             # We assume that target is gone as well
             LOG.warn(_('Got error trying to delete target %(target)s,'
-                ' assuming it is already gone: %(exc)s'),
-                {'target': target_name, 'exc': exc})
+                       ' assuming it is already gone: %(exc)s'),
+                     {'target': target_name, 'exc': exc})
index 8b2fc3be12796f06a205a61ea087044ab47c9c4f..e26d6424d24f2c70c42c48ec77951c2d53bbb323 100644 (file)
@@ -29,8 +29,8 @@ LOG = logging.getLogger(__name__)
 
 volume_opts = [
     cfg.StrOpt('nfs_shares_config',
-                default=None,
-                help='File with the list of available nfs shares'),
+               default=None,
+               help='File with the list of available nfs shares'),
     cfg.StrOpt('nfs_mount_point_base',
                default='$state_path/mnt',
                help='Base dir where nfs expected to be mounted'),
@@ -41,8 +41,7 @@ volume_opts = [
                 default=True,
                 help=('Create volumes as sparsed files which take no space.'
                       'If set to False volume is created as regular file.'
-                      'In such case volume creation takes a lot of time.'))
-]
+                      'In such case volume creation takes a lot of time.'))]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(volume_opts)
@@ -226,7 +225,7 @@ class NfsDriver(driver.VolumeDriver):
 
         if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
             raise exception.NfsNoSuitableShareFound(
-                    volume_size=volume_size_for)
+                volume_size=volume_size_for)
         return greatest_share
 
     def _get_mount_point_for_share(self, nfs_share):
index 234e183907879fc0397ef6a180c29549d7614558..e98a1de36b666aa1f724d422d668445c1b1e8a05 100644 (file)
@@ -42,8 +42,7 @@ rbd_opts = [
     cfg.StrOpt('volume_tmp_dir',
                default=None,
                help='where to store temporary image files if the volume '
-                    'driver does not write them directly to the volume'),
-    ]
+                    'driver does not write them directly to the volume'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(rbd_opts)
@@ -165,8 +164,7 @@ class RBDDriver(driver.VolumeDriver):
                 'auth_enabled': FLAGS.rbd_secret_uuid is not None,
                 'auth_username': FLAGS.rbd_user,
                 'secret_type': 'ceph',
-                'secret_uuid': FLAGS.rbd_secret_uuid,
-                }
+                'secret_uuid': FLAGS.rbd_secret_uuid, }
         }
 
     def terminate_connection(self, volume, connector, **kwargs):
index dc2076ef770097e1ced63f539cd238f5672c717e..a2a98c7585d4f6efaad000b08538aceed047a517 100644 (file)
@@ -117,8 +117,10 @@ class SanISCSIDriver(ISCSIDriver):
                 while attempts > 0:
                     attempts -= 1
                     try:
-                        return utils.ssh_execute(ssh, command,
-                                               check_exit_code=check_exit_code)
+                        return utils.ssh_execute(
+                            ssh,
+                            command,
+                            check_exit_code=check_exit_code)
                     except Exception as e:
                         LOG.error(e)
                         greenthread.sleep(random.randint(20, 500) / 100.0)
index 9da84fae5077ab01fbb5730848a121588da0b001..315ef3c12d987014961e22237d92220641a8eba1 100644 (file)
@@ -24,8 +24,7 @@ LOG = logging.getLogger(__name__)
 solaris_opts = [
     cfg.StrOpt('san_zfs_volume_base',
                default='rpool/',
-               help='The ZFS path under which to create zvols for volumes.'),
-    ]
+               help='The ZFS path under which to create zvols for volumes.'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(solaris_opts)
index 03b908da7be49fb7ff02129fa81914a3bd1c9a81..54325424d7b86814c610c778c0f6b2659d244c70 100644 (file)
@@ -56,8 +56,8 @@ sf_opts = [
                help='Password for SF Cluster Admin'),
 
     cfg.BoolOpt('sf_allow_tenant_qos',
-               default=True,
-               help='Allow tenants to specify QOS on create'), ]
+                default=True,
+                help='Allow tenants to specify QOS on create'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(sf_opts)
index 3a7f1373bac28eb029f809045bb6068224eb482c..b838fa66af09c639439bfd1bad5219548d794688 100644 (file)
@@ -64,24 +64,23 @@ storwize_svc_opts = [
                default='0',
                help='Storage system threshold for volume capacity warnings'),
     cfg.BoolOpt('storwize_svc_vol_autoexpand',
-               default=True,
-               help='Storage system autoexpand parameter for volumes '
-                    '(True/False)'),
+                default=True,
+                help='Storage system autoexpand parameter for volumes '
+                     '(True/False)'),
     cfg.StrOpt('storwize_svc_vol_grainsize',
                default='256',
                help='Storage system grain size parameter for volumes '
                     '(32/64/128/256)'),
     cfg.BoolOpt('storwize_svc_vol_compression',
-               default=False,
-               help='Storage system compression option for volumes'),
+                default=False,
+                help='Storage system compression option for volumes'),
     cfg.BoolOpt('storwize_svc_vol_easytier',
-               default=True,
-               help='Enable Easy Tier for volumes'),
+                default=True,
+                help='Enable Easy Tier for volumes'),
     cfg.StrOpt('storwize_svc_flashcopy_timeout',
                default='120',
                help='Maximum number of seconds to wait for FlashCopy to be '
-                    'prepared. Maximum value is 600 seconds (10 minutes).'),
-]
+                    'prepared. Maximum value is 600 seconds (10 minutes).'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(storwize_svc_opts)
@@ -103,11 +102,11 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
             if ((not ch.isalnum()) and (ch != ' ') and (ch != '.')
                     and (ch != '-') and (ch != '_')):
                 invalid_ch_in_host = invalid_ch_in_host + ch
-        self._string_host_name_filter = string.maketrans(invalid_ch_in_host,
-                                                '-' * len(invalid_ch_in_host))
+        self._string_host_name_filter = string.maketrans(
+            invalid_ch_in_host, '-' * len(invalid_ch_in_host))
 
         self._unicode_host_name_filter = dict((ord(unicode(char)), u'-')
-                                         for char in invalid_ch_in_host)
+                                              for char in invalid_ch_in_host)
 
     def _get_hdr_dic(self, header, row, delim):
         """Return CLI row data as a dictionary indexed by names from header.
@@ -119,11 +118,13 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
 
         attributes = header.split(delim)
         values = row.split(delim)
-        self._driver_assert(len(values) == len(attributes),
+        self._driver_assert(
+            len(values) ==
+            len(attributes),
             _('_get_hdr_dic: attribute headers and values do not match.\n '
               'Headers: %(header)s\n Values: %(row)s')
-                % {'header': str(header),
-                   'row': str(row)})
+            % {'header': str(header),
+               'row': str(row)})
         dic = {}
         for attribute, value in map(None, attributes, values):
             dic[attribute] = value
@@ -143,36 +144,39 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         # Validate that the pool exists
         ssh_cmd = 'lsmdiskgrp -delim ! -nohdr'
         out, err = self._run_ssh(ssh_cmd)
-        self._driver_assert(len(out) > 0,
+        self._driver_assert(
+            len(out) > 0,
             _('check_for_setup_error: failed with unexpected CLI output.\n '
               'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                % {'cmd': ssh_cmd,
-                   'out': str(out),
-                   'err': str(err)})
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
         search_text = '!%s!' % FLAGS.storwize_svc_volpool_name
         if search_text not in out:
             raise exception.InvalidInput(
-                    reason=(_('pool %s doesn\'t exist')
+                reason=(_('pool %s doesn\'t exist')
                         % FLAGS.storwize_svc_volpool_name))
 
         storage_nodes = {}
         # Get the iSCSI names of the Storwize/SVC nodes
         ssh_cmd = 'svcinfo lsnode -delim !'
         out, err = self._run_ssh(ssh_cmd)
-        self._driver_assert(len(out) > 0,
+        self._driver_assert(
+            len(out) > 0,
             _('check_for_setup_error: failed with unexpected CLI output.\n '
               'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                % {'cmd': ssh_cmd,
-                   'out': str(out),
-                   'err': str(err)})
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
 
         nodes = out.strip().split('\n')
-        self._driver_assert(len(nodes) > 0,
+        self._driver_assert(
+            len(nodes) > 0,
             _('check_for_setup_error: failed with unexpected CLI output.\n '
               'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                % {'cmd': ssh_cmd,
-                   'out': str(out),
-                   'err': str(err)})
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
         header = nodes.pop(0)
         for node_line in nodes:
             try:
@@ -204,31 +208,33 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                       'Details: %(msg)s\n'
                       'Command: %(cmd)s\n '
                       'stdout: %(out)s\n stderr: %(err)s')
-                            % {'msg': str(e),
-                               'cmd': ssh_cmd,
-                               'out': str(out),
-                               'err': str(err)})
+                    % {'msg': str(e),
+                       'cmd': ssh_cmd,
+                       'out': str(out),
+                       'err': str(err)})
                 raise exception.VolumeBackendAPIException(
-                        data=exception_message)
+                    data=exception_message)
 
         # Get the iSCSI IP addresses of the Storwize/SVC nodes
         ssh_cmd = 'lsportip -delim !'
         out, err = self._run_ssh(ssh_cmd)
-        self._driver_assert(len(out) > 0,
+        self._driver_assert(
+            len(out) > 0,
             _('check_for_setup_error: failed with unexpected CLI output.\n '
               'Command: %(cmd)s\n '
               'stdout: %(out)s\n stderr: %(err)s')
-                            % {'cmd': ssh_cmd,
-                               'out': str(out),
-                               'err': str(err)})
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
 
         portips = out.strip().split('\n')
-        self._driver_assert(len(portips) > 0,
+        self._driver_assert(
+            len(portips) > 0,
             _('check_for_setup_error: failed with unexpected CLI output.\n '
               'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                % {'cmd': ssh_cmd,
-                   'out': str(out),
-                   'err': str(err)})
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
         header = portips.pop(0)
         for portip_line in portips:
             try:
@@ -254,12 +260,12 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                       'Details: %(msg)s\n'
                       'Command: %(cmd)s\n '
                       'stdout: %(out)s\n stderr: %(err)s')
-                            % {'msg': str(e),
-                               'cmd': ssh_cmd,
-                               'out': str(out),
-                               'err': str(err)})
+                    % {'msg': str(e),
+                       'cmd': ssh_cmd,
+                       'out': str(out),
+                       'err': str(err)})
                 raise exception.VolumeBackendAPIException(
-                        data=exception_message)
+                    data=exception_message)
 
             if port_node_id in storage_nodes:
                 node = storage_nodes[port_node_id]
@@ -269,13 +275,13 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                     node['ipv6'].append(port_ipv6)
             else:
                 raise exception.VolumeBackendAPIException(
-                        data=_('check_for_setup_error: '
-                               'fail to storage configuration: unknown '
-                               'storage node %(node_id)s from CLI output.\n '
-                                'stdout: %(out)s\n stderr: %(err)s\n')
-                              % {'node_id': port_node_id,
-                                 'out': str(out),
-                                 'err': str(err)})
+                    data=_('check_for_setup_error: '
+                           'fail to storage configuration: unknown '
+                           'storage node %(node_id)s from CLI output.\n '
+                           'stdout: %(out)s\n stderr: %(err)s\n') % {
+                               'node_id': port_node_id,
+                               'out': str(out),
+                               'err': str(err)})
 
         iscsi_ipv4_conf = []
         iscsi_ipv6_conf = []
@@ -291,18 +297,19 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                                         'node_id': node['id']})
             if (len(node['ipv4']) == 0) and (len(node['ipv6']) == 0):
                 raise exception.VolumeBackendAPIException(
-                        data=_('check_for_setup_error: '
-                                'fail to storage configuration: storage '
-                                'node %s has no IP addresses configured')
-                                % node['id'])
+                    data=_('check_for_setup_error: '
+                           'fail to storage configuration: storage '
+                           'node %s has no IP addresses configured') %
+                    node['id'])
 
         # Make sure we have at least one IPv4 address with a iSCSI name
         # TODO(ronenkat) need to expand this to support IPv6
-        self._driver_assert(len(iscsi_ipv4_conf) > 0,
+        self._driver_assert(
+            len(iscsi_ipv4_conf) > 0,
             _('could not obtain IP address and iSCSI name from the storage. '
               'Please verify that the storage is configured for iSCSI.\n '
               'Storage nodes: %(nodes)s\n portips: %(portips)s')
-                % {'nodes': nodes, 'portips': portips})
+            % {'nodes': nodes, 'portips': portips})
 
         self.iscsi_ipv4_conf = iscsi_ipv4_conf
         self.iscsi_ipv6_conf = iscsi_ipv6_conf
@@ -322,8 +329,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                           'storwize_svc_volpool_name']
         for flag in required_flags:
             if not getattr(FLAGS, flag, None):
-                raise exception.InvalidInput(
-                        reason=_('%s is not set') % flag)
+                raise exception.InvalidInput(reason=_('%s is not set') % flag)
 
         # Ensure that either password or keyfile were set
         if not (FLAGS.san_password or FLAGS.san_private_key):
@@ -358,16 +364,16 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         # Check that flashcopy_timeout is numeric and 32/64/128/256
         flashcopy_timeout = FLAGS.storwize_svc_flashcopy_timeout
         if not (flashcopy_timeout.isdigit() and int(flashcopy_timeout) > 0 and
-            int(flashcopy_timeout) <= 600):
+                int(flashcopy_timeout) <= 600):
             raise exception.InvalidInput(
                 reason=_('Illegal value %s specified for '
                          'storwize_svc_flashcopy_timeout: '
                          'valid values are between 0 and 600')
-                                         % flashcopy_timeout)
+                % flashcopy_timeout)
 
         # Check that rsize is set
         volume_compression = FLAGS.storwize_svc_vol_compression
-        if ((volume_compression == True) and
+        if ((volume_compression is True) and
                 (FLAGS.storwize_svc_vol_rsize == '-1')):
             raise exception.InvalidInput(
                 reason=_('If compression is set to True, rsize must '
@@ -393,12 +399,12 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
 
         size = int(volume['size'])
 
-        if FLAGS.storwize_svc_vol_autoexpand == True:
+        if FLAGS.storwize_svc_vol_autoexpand is True:
             autoex = '-autoexpand'
         else:
             autoex = ''
 
-        if FLAGS.storwize_svc_vol_easytier == True:
+        if FLAGS.storwize_svc_vol_easytier is True:
             easytier = '-easytier on'
         else:
             easytier = '-easytier off'
@@ -407,39 +413,43 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         if FLAGS.storwize_svc_vol_rsize.strip() == '-1':
             ssh_cmd_se_opt = ''
         else:
-            ssh_cmd_se_opt = ('-rsize %(rsize)s %(autoex)s -warning %(warn)s' %
-                        {'rsize': FLAGS.storwize_svc_vol_rsize,
-                         'autoex': autoex,
-                         'warn': FLAGS.storwize_svc_vol_warning})
+            ssh_cmd_se_opt = (
+                '-rsize %(rsize)s %(autoex)s -warning %(warn)s' %
+                {'rsize': FLAGS.storwize_svc_vol_rsize,
+                 'autoex': autoex,
+                 'warn': FLAGS.storwize_svc_vol_warning})
             if FLAGS.storwize_svc_vol_compression:
                 ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed'
             else:
-                ssh_cmd_se_opt = ssh_cmd_se_opt + (' -grainsize %(grain)s' %
-                       {'grain': FLAGS.storwize_svc_vol_grainsize})
+                ssh_cmd_se_opt = ssh_cmd_se_opt + (
+                    ' -grainsize %(grain)s' %
+                    {'grain': FLAGS.storwize_svc_vol_grainsize})
 
         ssh_cmd = ('mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s '
-                    '-iogrp 0 -size %(size)s -unit '
-                    '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s'
-                    % {'name': name,
-                    'mdiskgrp': FLAGS.storwize_svc_volpool_name,
-                    'size': size, 'unit': units, 'easytier': easytier,
-                    'ssh_cmd_se_opt': ssh_cmd_se_opt})
+                   '-iogrp 0 -size %(size)s -unit '
+                   '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s'
+                   % {'name': name,
+                   'mdiskgrp': FLAGS.storwize_svc_volpool_name,
+                   'size': size, 'unit': units, 'easytier': easytier,
+                   'ssh_cmd_se_opt': ssh_cmd_se_opt})
         out, err = self._run_ssh(ssh_cmd)
-        self._driver_assert(len(out.strip()) > 0,
+        self._driver_assert(
+            len(out.strip()) > 0,
             _('create volume %(name)s - did not find '
               'success message in CLI output.\n '
               'stdout: %(out)s\n stderr: %(err)s')
-                % {'name': name, 'out': str(out), 'err': str(err)})
+            % {'name': name, 'out': str(out), 'err': str(err)})
 
         # Ensure that the output is as expected
         match_obj = re.search('Virtual Disk, id \[([0-9]+)\], '
-                                'successfully created', out)
+                              'successfully created', out)
         # Make sure we got a "successfully created" message with vdisk id
-        self._driver_assert(match_obj is not None,
+        self._driver_assert(
+            match_obj is not None,
             _('create volume %(name)s - did not find '
               'success message in CLI output.\n '
               'stdout: %(out)s\n stderr: %(err)s')
-                % {'name': name, 'out': str(out), 'err': str(err)})
+            % {'name': name, 'out': str(out), 'err': str(err)})
 
         LOG.debug(_('leave: create_volume: volume %(name)s ') % {'name': name})
 
@@ -460,16 +470,18 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         volume_defined = self._is_volume_defined(name)
         # Try to delete volume only if found on the storage
         if volume_defined:
-            out, err = self._run_ssh('rmvdisk %(force)s %(name)s'
-                                    % {'force': force_flag,
-                                       'name': name})
+            out, err = self._run_ssh(
+                'rmvdisk %(force)s %(name)s'
+                % {'force': force_flag,
+                   'name': name})
             # No output should be returned from rmvdisk
-            self._driver_assert(len(out.strip()) == 0,
+            self._driver_assert(
+                len(out.strip()) == 0,
                 _('delete volume %(name)s - non empty output from CLI.\n '
                   'stdout: %(out)s\n stderr: %(err)s')
-                                % {'name': name,
-                                   'out': str(out),
-                                   'err': str(err)})
+                % {'name': name,
+                   'out': str(out),
+                   'err': str(err)})
         else:
             # Log that volume does not exist
             LOG.info(_('warning: tried to delete volume %(name)s but '
@@ -486,7 +498,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         volume_defined = self._is_volume_defined(volume['name'])
         if not volume_defined:
             LOG.error(_('ensure_export: volume %s not found on storage')
-                       % volume['name'])
+                      % volume['name'])
 
     def create_export(self, context, volume):
         model_update = None
@@ -508,7 +520,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """
         LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
                     'connector %(conn)s') % {'vol': str(volume),
-                    'conn': str(connector)})
+                                             'conn': str(connector)})
 
         initiator_name = connector['initiator']
         volume_name = volume['name']
@@ -518,9 +530,10 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         if host_name is None:
             # Host does not exist - add a new host to Storwize/SVC
             host_name = self._create_new_host('host%s' % initiator_name,
-                                               initiator_name)
+                                              initiator_name)
             # Verify that create_new_host succeeded
-            self._driver_assert(host_name is not None,
+            self._driver_assert(
+                host_name is not None,
                 _('_create_new_host failed to return the host name.'))
 
         lun_id = self._map_vol_to_host(volume_name, host_name)
@@ -530,7 +543,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         # TODO(ronenkat): Add support for IPv6
         volume_attributes = self._get_volume_attributes(volume_name)
         if (volume_attributes is not None and
-            'preferred_node_id' in volume_attributes):
+                'preferred_node_id' in volume_attributes):
             preferred_node = volume_attributes['preferred_node_id']
             preferred_node_entry = None
             for node in self.iscsi_ipv4_conf:
@@ -542,7 +555,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                 LOG.error(_('initialize_connection: did not find preferred '
                             'node %(node)s for volume %(vol)s in iSCSI '
                             'configuration') % {'node': preferred_node,
-                            'vol': volume_name})
+                                                'vol': volume_name})
         else:
             # Get 1st node
             preferred_node_entry = self.iscsi_ipv4_conf[0]
@@ -555,8 +568,8 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         properties['target_discovered'] = False
         # We take the first IP address for now. Ideally, OpenStack will
         # support multipath for improved performance.
-        properties['target_portal'] = ('%s:%s' %
-                (preferred_node_entry['ip'][0], '3260'))
+        properties['target_portal'] = (
+            '%s:%s' % (preferred_node_entry['ip'][0], '3260'))
         properties['target_iqn'] = preferred_node_entry['iscsi_name']
         properties['target_lun'] = lun_id
         properties['volume_id'] = volume['id']
@@ -564,8 +577,8 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
                     'connector %(conn)s\n properties: %(prop)s')
                   % {'vol': str(volume),
-                    'conn': str(connector),
-                    'prop': str(properties)})
+                     'conn': str(connector),
+                     'prop': str(properties)})
 
         return {'driver_volume_type': 'iscsi', 'data': properties, }
 
@@ -581,14 +594,15 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """
         LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
                     'connector %(conn)s') % {'vol': str(volume),
-                    'conn': str(connector)})
+                                             'conn': str(connector)})
 
         vol_name = volume['name']
         initiator_name = connector['initiator']
         host_name = self._get_host_from_iscsiname(initiator_name)
         # Verify that _get_host_from_iscsiname returned the host.
         # This should always succeed as we terminate an existing connection.
-        self._driver_assert(host_name is not None,
+        self._driver_assert(
+            host_name is not None,
             _('_get_host_from_iscsiname failed to return the host name '
               'for iscsi name %s') % initiator_name)
 
@@ -599,19 +613,20 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                                      % (host_name, vol_name))
             # Verify CLI behaviour - no output is returned from
             # rmvdiskhostmap
-            self._driver_assert(len(out.strip()) == 0,
+            self._driver_assert(
+                len(out.strip()) == 0,
                 _('delete mapping of volume %(vol)s to host %(host)s '
                   '- non empty output from CLI.\n '
                   'stdout: %(out)s\n stderr: %(err)s')
-                                 % {'vol': vol_name,
-                                    'host': host_name,
-                                    'out': str(out),
-                                    'err': str(err)})
+                % {'vol': vol_name,
+                   'host': host_name,
+                   'out': str(out),
+                   'err': str(err)})
             del mapping_data[vol_name]
         else:
             LOG.error(_('terminate_connection: no mapping of volume '
                         '%(vol)s to host %(host)s found') %
-                        {'vol': vol_name, 'host': host_name})
+                      {'vol': vol_name, 'host': host_name})
 
         # If this host has no more mappings, delete it
         if not mapping_data:
@@ -619,7 +634,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
 
         LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
                     'connector %(conn)s') % {'vol': str(volume),
-                    'conn': str(connector)})
+                                             'conn': str(connector)})
 
     def _flashcopy_cleanup(self, fc_map_id, source, target):
         """Clean up a failed FlashCopy operation."""
@@ -632,11 +647,11 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                         'mapping %(fc_map_id)% '
                         'from %(source)s to %(target)s.\n'
                         'stdout: %(out)s\n stderr: %(err)s')
-                        % {'fc_map_id': fc_map_id,
-                           'source': source,
-                           'target': target,
-                           'out': e.stdout,
-                           'err': e.stderr})
+                      % {'fc_map_id': fc_map_id,
+                         'source': source,
+                         'target': target,
+                         'out': e.stdout,
+                         'err': e.stderr})
 
     def _run_flashcopy(self, source, target):
         """Create a FlashCopy mapping from the source to the target."""
@@ -644,52 +659,56 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         LOG.debug(
             _('enter: _run_flashcopy: execute FlashCopy from source '
               '%(source)s to target %(target)s') % {'source': source,
-              'target': target})
+                                                    'target': target})
 
         fc_map_cli_cmd = ('mkfcmap -source %s -target %s -autodelete '
-                            '-cleanrate 0' % (source, target))
+                          '-cleanrate 0' % (source, target))
         out, err = self._run_ssh(fc_map_cli_cmd)
-        self._driver_assert(len(out.strip()) > 0,
+        self._driver_assert(
+            len(out.strip()) > 0,
             _('create FC mapping from %(source)s to %(target)s - '
               'did not find success message in CLI output.\n'
               ' stdout: %(out)s\n stderr: %(err)s\n')
-                            % {'source': source,
-                                'target': target,
-                                'out': str(out),
-                                'err': str(err)})
+            % {'source': source,
+               'target': target,
+               'out': str(out),
+               'err': str(err)})
 
         # Ensure that the output is as expected
         match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], '
-                                'successfully created', out)
+                              'successfully created', out)
         # Make sure we got a "successfully created" message with vdisk id
-        self._driver_assert(match_obj is not None,
+        self._driver_assert(
+            match_obj is not None,
             _('create FC mapping from %(source)s to %(target)s - '
               'did not find success message in CLI output.\n'
               ' stdout: %(out)s\n stderr: %(err)s\n')
-                            % {'source': source,
-                               'target': target,
-                               'out': str(out),
-                               'err': str(err)})
+            % {'source': source,
+               'target': target,
+               'out': str(out),
+               'err': str(err)})
 
         try:
             fc_map_id = match_obj.group(1)
-            self._driver_assert(fc_map_id is not None,
+            self._driver_assert(
+                fc_map_id is not None,
                 _('create FC mapping from %(source)s to %(target)s - '
                   'did not find mapping id in CLI output.\n'
                   ' stdout: %(out)s\n stderr: %(err)s\n')
-                            % {'source': source,
-                               'target': target,
-                               'out': str(out),
-                               'err': str(err)})
+                % {'source': source,
+                   'target': target,
+                   'out': str(out),
+                   'err': str(err)})
         except IndexError:
-            self._driver_assert(False,
+            self._driver_assert(
+                False,
                 _('create FC mapping from %(source)s to %(target)s - '
                   'did not find mapping id in CLI output.\n'
                   ' stdout: %(out)s\n stderr: %(err)s\n')
-                            % {'source': source,
-                               'target': target,
-                               'out': str(out),
-                               'err': str(err)})
+                % {'source': source,
+                   'target': target,
+                   'out': str(out),
+                   'err': str(err)})
         try:
             out, err = self._run_ssh('prestartfcmap %s' % fc_map_id)
         except exception.ProcessExecutionError as e:
@@ -697,20 +716,20 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                 LOG.error(_('_run_flashcopy: fail to prepare FlashCopy '
                             'from %(source)s to %(target)s.\n'
                             'stdout: %(out)s\n stderr: %(err)s')
-                            % {'source': source,
-                               'target': target,
-                               'out': e.stdout,
-                               'err': e.stderr})
+                          % {'source': source,
+                             'target': target,
+                             'out': e.stdout,
+                             'err': e.stderr})
                 self._flashcopy_cleanup(fc_map_id, source, target)
 
         mapping_ready = False
         wait_time = 5
         # Allow waiting of up to timeout (set as parameter)
         max_retries = (int(FLAGS.storwize_svc_flashcopy_timeout)
-                        / wait_time) + 1
+                       / wait_time) + 1
         for try_number in range(1, max_retries):
             mapping_attributes = self._get_flashcopy_mapping_attributes(
-                                                            fc_map_id)
+                fc_map_id)
             if (mapping_attributes is None or
                     'status' not in mapping_attributes):
                 break
@@ -725,22 +744,22 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                                  % {'status': mapping_attributes['status'],
                                     'id': fc_map_id,
                                     'attr': mapping_attributes})
-                raise exception.VolumeBackendAPIException(
-                        data=exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
             # Need to wait for mapping to be prepared, wait a few seconds
             time.sleep(wait_time)
 
         if not mapping_ready:
             exception_msg = (_('mapping %(id)s prepare failed to complete '
                                'within the alloted %(to)s seconds timeout. '
-                               'Terminating') % {'id': fc_map_id,
-                               'to': FLAGS.storwize_svc_flashcopy_timeout})
+                               'Terminating')
+                             % {'id': fc_map_id,
+                                'to': FLAGS.storwize_svc_flashcopy_timeout})
             LOG.error(_('_run_flashcopy: fail to start FlashCopy '
                         'from %(source)s to %(target)s with '
                         'exception %(ex)s')
-                        % {'source': source,
-                           'target': target,
-                           'ex': exception_msg})
+                      % {'source': source,
+                         'target': target,
+                         'ex': exception_msg})
             self._flashcopy_cleanup(fc_map_id, source, target)
             raise exception.InvalidSnapshot(
                 reason=_('_run_flashcopy: %s') % exception_msg)
@@ -752,15 +771,16 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                 LOG.error(_('_run_flashcopy: fail to start FlashCopy '
                             'from %(source)s to %(target)s.\n'
                             'stdout: %(out)s\n stderr: %(err)s')
-                            % {'source': source,
-                               'target': target,
-                               'out': e.stdout,
-                               'err': e.stderr})
+                          % {'source': source,
+                             'target': target,
+                             'out': e.stdout,
+                             'err': e.stderr})
                 self._flashcopy_cleanup(fc_map_id, source, target)
 
         LOG.debug(_('leave: _run_flashcopy: FlashCopy started from '
-                    '%(source)s to %(target)s') % {'source': source,
-                    'target': target})
+                    '%(source)s to %(target)s')
+                  % {'source': source,
+                     'target': target})
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Create a new snapshot from volume."""
@@ -769,8 +789,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         tgt_volume = volume['name']
 
         LOG.debug(_('enter: create_volume_from_snapshot: snapshot %(tgt)s '
-                    'from volume %(src)s') % {'tgt': tgt_volume,
-                    'src': source_volume})
+                    'from volume %(src)s')
+                  % {'tgt': tgt_volume,
+                     'src': source_volume})
 
         src_volume_attributes = self._get_volume_attributes(source_volume)
         if src_volume_attributes is None:
@@ -778,13 +799,15 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                                'does not exist') % source_volume)
             LOG.error(exception_msg)
             raise exception.SnapshotNotFound(exception_msg,
-                                           volume_id=source_volume)
-
-        self._driver_assert('capacity' in src_volume_attributes,
-                _('create_volume_from_snapshot: cannot get source '
-                  'volume %(src)s capacity from volume attributes '
-                  '%(attr)s') % {'src': source_volume,
-                                 'attr': src_volume_attributes})
+                                             volume_id=source_volume)
+
+        self._driver_assert(
+            'capacity' in src_volume_attributes,
+            _('create_volume_from_snapshot: cannot get source '
+              'volume %(src)s capacity from volume attributes '
+              '%(attr)s')
+            % {'src': source_volume,
+               'attr': src_volume_attributes})
         src_volume_size = src_volume_attributes['capacity']
 
         tgt_volume_attributes = self._get_volume_attributes(tgt_volume)
@@ -822,8 +845,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         tgt_volume_created = False
 
         LOG.debug(_('enter: create_snapshot: snapshot %(tgt)s from '
-                    'volume %(src)s') % {'tgt': tgt_volume,
-                    'src': src_volume})
+                    'volume %(src)s')
+                  % {'tgt': tgt_volume,
+                     'src': src_volume})
 
         src_volume_attributes = self._get_volume_attributes(src_volume)
         if src_volume_attributes is None:
@@ -834,11 +858,13 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
             raise exception.VolumeNotFound(exception_msg,
                                            volume_id=src_volume)
 
-        self._driver_assert('capacity' in src_volume_attributes,
-                _('create_volume_from_snapshot: cannot get source '
-                  'volume %(src)s capacity from volume attributes '
-                  '%(attr)s') % {'src': src_volume,
-                                 'attr': src_volume_attributes})
+        self._driver_assert(
+            'capacity' in src_volume_attributes,
+            _('create_volume_from_snapshot: cannot get source '
+              'volume %(src)s capacity from volume attributes '
+              '%(attr)s')
+            % {'src': src_volume,
+               'attr': src_volume_attributes})
 
         source_volume_size = src_volume_attributes['capacity']
 
@@ -853,21 +879,23 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
             tgt_volume_created = True
         else:
             # Yes, target exists, verify exact same size as source
-            self._driver_assert('capacity' in tgt_volume_attributes,
-                    _('create_volume_from_snapshot: cannot get source '
-                      'volume %(src)s capacity from volume attributes '
-                      '%(attr)s') % {'src': tgt_volume,
-                                     'attr': tgt_volume_attributes})
+            self._driver_assert(
+                'capacity' in tgt_volume_attributes,
+                _('create_volume_from_snapshot: cannot get source '
+                  'volume %(src)s capacity from volume attributes '
+                  '%(attr)s')
+                % {'src': tgt_volume,
+                   'attr': tgt_volume_attributes})
             target_volume_size = tgt_volume_attributes['capacity']
             if target_volume_size != source_volume_size:
                 exception_msg = (
                     _('create_snapshot: source %(src)s and target '
                       'volume %(tgt)s have different capacities '
-                      '(source:%(ssize)s target:%(tsize)s)') %
-                        {'src': src_volume,
-                         'tgt': tgt_volume,
-                         'ssize': source_volume_size,
-                         'tsize': target_volume_size})
+                      '(source:%(ssize)s target:%(tsize)s)')
+                    % {'src': src_volume,
+                       'tgt': tgt_volume,
+                       'ssize': source_volume_size,
+                       'tsize': target_volume_size})
                 LOG.error(exception_msg)
                 raise exception.InvalidSnapshot(reason=exception_msg)
 
@@ -906,7 +934,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """
 
         LOG.debug(_('enter: _get_host_from_iscsiname: iSCSI initiator %s')
-                   % iscsi_name)
+                  % iscsi_name)
 
         # Get list of host in the storage
         ssh_cmd = 'lshost -delim !'
@@ -915,12 +943,13 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         if (len(out.strip()) == 0):
             return None
 
-        err_msg = _('_get_host_from_iscsiname: '
-              'failed with unexpected CLI output.\n'
-              ' command: %(cmd)s\n stdout: %(out)s\n '
-              'stderr: %(err)s') % {'cmd': ssh_cmd,
-                                    'out': str(out),
-                                    'err': str(err)}
+        err_msg = _(
+            '_get_host_from_iscsiname: '
+            'failed with unexpected CLI output.\n'
+            ' command: %(cmd)s\n stdout: %(out)s\n '
+            'stderr: %(err)s') % {'cmd': ssh_cmd,
+                                  'out': str(out),
+                                  'err': str(err)}
         host_lines = out.strip().split('\n')
         self._driver_assert(len(host_lines) > 0, err_msg)
         header = host_lines.pop(0).split('!')
@@ -934,13 +963,14 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         for host in hosts:
             ssh_cmd = 'lshost -delim ! %s' % host
             out, err = self._run_ssh(ssh_cmd)
-            self._driver_assert(len(out) > 0,
-                    _('_get_host_from_iscsiname: '
-                      'Unexpected response from CLI output. '
-                      'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                        % {'cmd': ssh_cmd,
-                           'out': str(out),
-                           'err': str(err)})
+            self._driver_assert(
+                len(out) > 0,
+                _('_get_host_from_iscsiname: '
+                  'Unexpected response from CLI output. '
+                  'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+                % {'cmd': ssh_cmd,
+                   'out': str(out),
+                   'err': str(err)})
             for attrib_line in out.split('\n'):
                 # If '!' not found, return the string and two empty strings
                 attrib_name, foo, attrib_value = attrib_line.partition('!')
@@ -952,7 +982,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                 break
 
         LOG.debug(_('leave: _get_host_from_iscsiname: iSCSI initiator %s')
-                   % iscsi_name)
+                  % iscsi_name)
 
         return hostname
 
@@ -966,7 +996,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
 
         LOG.debug(_('enter: _create_new_host: host %(name)s with iSCSI '
                     'initiator %(init)s') % {'name': host_name,
-                    'init': initiator_name})
+                                             'init': initiator_name})
 
         if isinstance(host_name, unicode):
             host_name = host_name.translate(self._unicode_host_name_filter)
@@ -984,19 +1014,20 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         host_name = '%s_%s' % (host_name, random.randint(10000, 99999))
         out, err = self._run_ssh('mkhost -name "%s" -iscsiname "%s"'
                                  % (host_name, initiator_name))
-        self._driver_assert(len(out.strip()) > 0 and
-                            'successfully created' in out,
-                _('create host %(name)s with iSCSI initiator %(init)s - '
-                  'did not find success message in CLI output.\n '
-                  'stdout: %(out)s\n stderr: %(err)s\n')
-                  % {'name': host_name,
-                     'init': initiator_name,
-                     'out': str(out),
-                     'err': str(err)})
+        self._driver_assert(
+            len(out.strip()) > 0 and
+            'successfully created' in out,
+            _('create host %(name)s with iSCSI initiator %(init)s - '
+              'did not find success message in CLI output.\n '
+              'stdout: %(out)s\n stderr: %(err)s\n')
+            % {'name': host_name,
+               'init': initiator_name,
+               'out': str(out),
+               'err': str(err)})
 
         LOG.debug(_('leave: _create_new_host: host %(host)s with iSCSI '
                     'initiator %(init)s') % {'host': host_name,
-                    'init': initiator_name})
+                                             'init': initiator_name})
 
         return host_name
 
@@ -1021,8 +1052,8 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         LOG.debug(_('enter: _is_volume_defined: volume %s ') % volume_name)
         volume_attributes = self._get_volume_attributes(volume_name)
         LOG.debug(_('leave: _is_volume_defined: volume %(vol)s with %(str)s ')
-                   % {'vol': volume_name,
-                   'str': volume_attributes is not None})
+                  % {'vol': volume_name,
+                     'str': volume_attributes is not None})
         if volume_attributes is None:
             return False
         else:
@@ -1037,35 +1068,37 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         # We expect zero or one line if host does not exist,
         # two lines if it does exist, otherwise error
         out, err = self._run_ssh('lshost -filtervalue name=%s -delim !'
-                                % host_name)
+                                 % host_name)
         if len(out.strip()) == 0:
             return False
 
         lines = out.strip().split('\n')
-        self._driver_assert(len(lines) <= 2,
-                _('_is_host_defined: Unexpected response from CLI output.\n '
-                  'stdout: %(out)s\n stderr: %(err)s\n')
-                % {'out': str(out),
-                   'err': str(err)})
+        self._driver_assert(
+            len(lines) <= 2,
+            _('_is_host_defined: Unexpected response from CLI output.\n '
+              'stdout: %(out)s\n stderr: %(err)s\n')
+            % {'out': str(out),
+               'err': str(err)})
 
         if len(lines) == 2:
             host_info = self._get_hdr_dic(lines[0], lines[1], '!')
             host_name_from_storage = host_info['name']
             # Make sure we got the data for the right host
-            self._driver_assert(host_name_from_storage == host_name,
-                    _('Data received for host %(host1)s instead of host '
-                      '%(host2)s.\n '
-                      'stdout: %(out)s\n stderr: %(err)s\n')
-                      % {'host1': host_name_from_storage,
-                         'host2': host_name,
-                         'out': str(out),
-                         'err': str(err)})
+            self._driver_assert(
+                host_name_from_storage == host_name,
+                _('Data received for host %(host1)s instead of host '
+                  '%(host2)s.\n '
+                  'stdout: %(out)s\n stderr: %(err)s\n')
+                % {'host1': host_name_from_storage,
+                   'host2': host_name,
+                   'out': str(out),
+                   'err': str(err)})
         else:  # 0 or 1 lines
             host_name_from_storage = None
 
         LOG.debug(_('leave: _is_host_defined: host %(host)s with %(str)s ') % {
-                   'host': host_name,
-                   'str': host_name_from_storage is not None})
+            'host': host_name,
+            'str': host_name_from_storage is not None})
 
         if host_name_from_storage is None:
             return False
@@ -1092,8 +1125,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """Create a mapping between a volume to a host."""
 
         LOG.debug(_('enter: _map_vol_to_host: volume %(vol)s to '
-                    'host %(host)s') % {'vol': volume_name,
-                    'host': host_name})
+                    'host %(host)s')
+                  % {'vol': volume_name,
+                     'host': host_name})
 
         # Check if this volume is already mapped to this host
         mapping_data = self._get_hostvdisk_mappings(host_name)
@@ -1118,22 +1152,25 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         # Volume is not mapped to host, create a new LUN
         if not mapped_flag:
             out, err = self._run_ssh('mkvdiskhostmap -host %s -scsi %s %s'
-                                    % (host_name, result_lun, volume_name))
-            self._driver_assert(len(out.strip()) > 0 and
-                                'successfully created' in out,
-                    _('_map_vol_to_host: mapping host %(host)s to '
-                      'volume %(vol)s with LUN '
-                      '%(lun)s - did not find success message in CLI output. '
-                      'stdout: %(out)s\n stderr: %(err)s\n')
-                    % {'host': host_name,
-                      'vol': volume_name,
-                      'lun': result_lun,
-                      'out': str(out),
-                      'err': str(err)})
+                                     % (host_name, result_lun, volume_name))
+            self._driver_assert(
+                len(out.strip()) > 0 and
+                'successfully created' in out,
+                _('_map_vol_to_host: mapping host %(host)s to '
+                  'volume %(vol)s with LUN '
+                  '%(lun)s - did not find success message in CLI output. '
+                  'stdout: %(out)s\n stderr: %(err)s\n')
+                % {'host': host_name,
+                   'vol': volume_name,
+                   'lun': result_lun,
+                   'out': str(out),
+                   'err': str(err)})
 
         LOG.debug(_('leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, '
-                    'host %(host)s') % {'lun': result_lun, 'vol': volume_name,
-                    'host': host_name})
+                    'host %(host)s')
+                  % {'lun': result_lun,
+                     'vol': volume_name,
+                     'host': host_name})
 
         return result_lun
 
@@ -1148,30 +1185,32 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """
 
         LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s')
-                   % fc_map_id)
+                  % fc_map_id)
         # Get the lunid to be used
 
         fc_ls_map_cmd = ('lsfcmap -filtervalue id=%s -delim !' % fc_map_id)
         out, err = self._run_ssh(fc_ls_map_cmd)
-        self._driver_assert(len(out) > 0,
+        self._driver_assert(
+            len(out) > 0,
             _('_get_flashcopy_mapping_attributes: '
               'Unexpected response from CLI output. '
               'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                % {'cmd': fc_ls_map_cmd,
-                   'out': str(out),
-                   'err': str(err)})
+            % {'cmd': fc_ls_map_cmd,
+               'out': str(out),
+               'err': str(err)})
 
         # Get list of FlashCopy mappings
         # We expect zero or one line if mapping does not exist,
         # two lines if it does exist, otherwise error
         lines = out.strip().split('\n')
-        self._driver_assert(len(lines) <= 2,
-                 _('_get_flashcopy_mapping_attributes: '
-                   'Unexpected response from CLI output. '
-                   'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                            % {'cmd': fc_ls_map_cmd,
-                               'out': str(out),
-                               'err': str(err)})
+        self._driver_assert(
+            len(lines) <= 2,
+            _('_get_flashcopy_mapping_attributes: '
+              'Unexpected response from CLI output. '
+              'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+            % {'cmd': fc_ls_map_cmd,
+               'out': str(out),
+               'err': str(err)})
 
         if len(lines) == 2:
             attributes = self._get_hdr_dic(lines[0], lines[1], '!')
@@ -1179,9 +1218,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
             attributes = None
 
         LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping '
-                    '%(id)s, attributes %(attr)s') %
-                   {'id': fc_map_id,
-                    'attr': attributes})
+                    '%(id)s, attributes %(attr)s')
+                  % {'id': fc_map_id,
+                     'attr': attributes})
 
         return attributes
 
@@ -1193,7 +1232,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         """
 
         LOG.debug(_('enter: _get_volume_attributes: volume %s')
-                   % volume_name)
+                  % volume_name)
         # Get the lunid to be used
 
         try:
@@ -1208,13 +1247,14 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                        'err': e.stderr})
             return None
 
-        self._driver_assert(len(out) > 0,
-                    ('_get_volume_attributes: '
-                      'Unexpected response from CLI output. '
-                      'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
-                        % {'cmd': ssh_cmd,
-                           'out': str(out),
-                           'err': str(err)})
+        self._driver_assert(
+            len(out) > 0,
+            ('_get_volume_attributes: '
+             'Unexpected response from CLI output. '
+             'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
+            % {'cmd': ssh_cmd,
+               'out': str(out),
+               'err': str(err)})
         attributes = {}
         for attrib_line in out.split('\n'):
             # If '!' not found, return the string and two empty strings
index a87a3c64a5932635f4ff758f66732cc580a08792..037737440fccdfd2c555597ce3a234111dd751df 100644 (file)
@@ -40,8 +40,8 @@ FLAGS = flags.FLAGS
 
 windows_opts = [
     cfg.StrOpt('windows_iscsi_lun_path',
-              default='C:\iSCSIVirtualDisks',
-              help='Path to store VHD backed volumes'),
+               default='C:\iSCSIVirtualDisks',
+               help='Path to store VHD backed volumes'),
 ]
 
 FLAGS.register_opts(windows_opts)
@@ -147,8 +147,8 @@ class WindowsDriver(driver.ISCSIDriver):
         wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
         wt_disk.Delete_()
         vhdfiles = self._conn_cimv2.query(
-        "Select * from CIM_DataFile where Name = '" +
-        self._get_vhd_path(volume) + "'")
+            "Select * from CIM_DataFile where Name = '" +
+            self._get_vhd_path(volume) + "'")
         if len(vhdfiles) > 0:
             vhdfiles[0].Delete()
 
@@ -203,7 +203,7 @@ class WindowsDriver(driver.ISCSIDriver):
                 raise
             else:
                 LOG.info(_('Ignored target creation error "%s"'
-                                             ' while ensuring export'), exc)
+                           ' while ensuring export'), exc)
         #Get the disk to add
         vol_name = volume['name']
         wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
index eb5fd4f06a42ef4e302c0c0ac69814dfd04d1103..2258a4b092b5208b4b34873bb3e8776485688cae 100644 (file)
@@ -68,8 +68,8 @@ class SrOperations(OperationsBase):
         self.call_xenapi('SR.scan', sr_ref)
 
     def create(self, host_ref, device_config, name_label, name_description,
-                  sr_type, physical_size=None, content_type=None,
-                  shared=False, sm_config=None):
+               sr_type, physical_size=None, content_type=None,
+               shared=False, sm_config=None):
         return self.call_xenapi(
             'SR.create',
             host_ref,
@@ -84,7 +84,7 @@ class SrOperations(OperationsBase):
         )
 
     def introduce(self, sr_uuid, name_label, name_description, sr_type,
-                     content_type=None, shared=False, sm_config=None):
+                  content_type=None, shared=False, sm_config=None):
         return self.call_xenapi(
             'SR.introduce',
             sr_uuid,
@@ -123,17 +123,14 @@ class VdiOperations(OperationsBase):
         return self.get_record(vdi_ref)['uuid']
 
     def create(self, sr_ref, size, vdi_type,
-                   sharable=False, read_only=False, other_config=None):
+               sharable=False, read_only=False, other_config=None):
         return self.call_xenapi('VDI.create',
-            dict(
-                SR=sr_ref,
-                virtual_size=str(size),
-                type=vdi_type,
-                sharable=sharable,
-                read_only=read_only,
-                other_config=other_config or dict()
-            )
-        )
+                                dict(SR=sr_ref,
+                                     virtual_size=str(size),
+                                     type=vdi_type,
+                                     sharable=sharable,
+                                     read_only=read_only,
+                                     other_config=other_config or dict()))
 
     def destroy(self, vdi_ref):
         self.call_xenapi('VDI.destroy', vdi_ref)
@@ -184,11 +181,9 @@ class CompoundOperations(object):
         self.SR.forget(sr_ref)
 
     def create_new_vdi(self, sr_ref, size_in_gigabytes):
-        return self.VDI.create(
-                sr_ref,
-                to_bytes(size_in_gigabytes),
-                'User',
-        )
+        return self.VDI.create(sr_ref,
+                               to_bytes(size_in_gigabytes),
+                               'User', )
 
 
 def to_bytes(size_in_gigs):
index 1b16812bd35062eae159aa4b6ac2840d6b1756c8..c98fa28eb38c4c9ec2404c95c25c62a3ae2a2e22 100644 (file)
@@ -51,14 +51,12 @@ class XIVDriver(san.SanISCSIDriver):
 
         proxy = importutils.import_class(FLAGS.xiv_proxy)
 
-        self.xiv_proxy = proxy({
-                "xiv_user": FLAGS.san_login,
-                "xiv_pass": FLAGS.san_password,
-                "xiv_address": FLAGS.san_ip,
-                "xiv_vol_pool": FLAGS.san_clustername
-                },
-                LOG,
-                exception)
+        self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
+                                "xiv_pass": FLAGS.san_password,
+                                "xiv_address": FLAGS.san_ip,
+                                "xiv_vol_pool": FLAGS.san_clustername},
+                               LOG,
+                               exception)
         san.SanISCSIDriver.__init__(self, *args, **kwargs)
 
     def do_setup(self, context):
@@ -94,23 +92,18 @@ class XIVDriver(san.SanISCSIDriver):
     def initialize_connection(self, volume, connector):
         """Map the created volume."""
 
-        return self.xiv_proxy.initialize_connection(
-                volume,
-                connector)
+        return self.xiv_proxy.initialize_connection(volume, connector)
 
     def terminate_connection(self, volume, connector, **kwargs):
         """Terminate a connection to a volume."""
 
-        return self.xiv_proxy.terminate_connection(
-                volume,
-                connector)
+        return self.xiv_proxy.terminate_connection(volume, connector)
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Create a volume from a snapshot."""
 
-        return self.xiv_proxy.create_volume_from_snapshot(
-                volume,
-                snapshot)
+        return self.xiv_proxy.create_volume_from_snapshot(volume,
+                                                          snapshot)
 
     def create_snapshot(self, snapshot):
         """Create a snapshot."""
index 94be4211e57327c006b0f2722e12f26776b84c94..f4ec814b9f8d77ef7940f66a0e285a171db60af1 100644 (file)
@@ -44,8 +44,8 @@ zadara_opts = [
                default=None,
                help='Zadara VPSA port number'),
     cfg.BoolOpt('zadara_vpsa_use_ssl',
-               default=False,
-               help='Use SSL connection'),
+                default=False,
+                help='Use SSL connection'),
     cfg.StrOpt('zadara_user',
                default=None,
                help='User name for the VPSA'),
@@ -73,12 +73,11 @@ zadara_opts = [
                default='OS_%s',
                help='Default template for VPSA volume names'),
     cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete',
-               default=True,
-               help="Automatically detach from servers on volume delete"),
+                default=True,
+                help="Automatically detach from servers on volume delete"),
     cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete',
-               default=True,
-               help="Don't halt on deletion of non-existing volumes"),
-    ]
+                default=True,
+                help="Don't halt on deletion of non-existing volumes"), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(zadara_opts)
@@ -139,12 +138,12 @@ class ZadaraVPSAConnection(object):
             # Attach/Detach operations
             'attach_volume': ('POST',
                               '/api/servers/%s/volumes.xml'
-                                    % kwargs.get('vpsa_srv'),
+                              % kwargs.get('vpsa_srv'),
                               {'volume_name[]': kwargs.get('vpsa_vol'),
                                'force': 'NO'}),
             'detach_volume': ('POST',
                               '/api/volumes/%s/detach.xml'
-                                    % kwargs.get('vpsa_vol'),
+                              % kwargs.get('vpsa_vol'),
                               {'server_name[]': kwargs.get('vpsa_srv'),
                                'force': 'NO'}),
 
@@ -160,9 +159,8 @@ class ZadaraVPSAConnection(object):
                              {}),
             'list_vol_attachments': ('GET',
                                      '/api/volumes/%s/servers.xml'
-                                            % kwargs.get('vpsa_vol'),
-                                     {}),
-            }
+                                     % kwargs.get('vpsa_vol'),
+                                     {}), }
 
         if cmd not in vpsa_commands.keys():
             raise exception.UnknownCmd(cmd=cmd)
@@ -203,12 +201,12 @@ class ZadaraVPSAConnection(object):
         user = xml_tree.find('user')
         if user is None:
             raise exception.MalformedResponse(cmd=cmd,
-                                        reason='no "user" field')
+                                              reason='no "user" field')
 
         access_key = user.findtext('access-key')
         if access_key is None:
             raise exception.MalformedResponse(cmd=cmd,
-                                        reason='no "access-key" field')
+                                              reason='no "access-key" field')
 
         self.access_key = access_key
 
@@ -219,7 +217,7 @@ class ZadaraVPSAConnection(object):
 
         (method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
         LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"')
-                        % locals())
+                  % locals())
 
         if self.use_ssl:
             connection = httplib.HTTPSConnection(self.host, self.port)
@@ -308,7 +306,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         """Return details of VPSA's active controller."""
         xml_tree = self.vpsa.send_cmd('list_controllers')
         ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
-                                        ('state', 'active'))
+                                      ('state', 'active'))
         if ctrl is not None:
             return dict(target=ctrl.findtext('target'),
                         ip=ctrl.findtext('iscsi-ip'),
@@ -335,9 +333,10 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
 
     def create_volume(self, volume):
         """Create volume."""
-        self.vpsa.send_cmd('create_volume',
-                    name=FLAGS.zadara_vol_name_template % volume['name'],
-                    size=volume['size'])
+        self.vpsa.send_cmd(
+            'create_volume',
+            name=FLAGS.zadara_vol_name_template % volume['name'],
+            size=volume['size'])
 
     def delete_volume(self, volume):
         """
@@ -350,7 +349,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         vpsa_vol = self._get_vpsa_volume_name(name)
         if not vpsa_vol:
             msg = _('Volume %(name)s could not be found. '
-                'It might be already deleted') % locals()
+                    'It might be already deleted') % locals()
             LOG.warning(msg)
             if FLAGS.zadara_vpsa_allow_nonexistent_delete:
                 return
@@ -361,7 +360,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         xml_tree = self.vpsa.send_cmd('list_vol_attachments',
                                       vpsa_vol=vpsa_vol)
         servers = self._xml_parse_helper(xml_tree, 'servers',
-                                ('iqn', None), first=False)
+                                         ('iqn', None), first=False)
         if servers:
             if not FLAGS.zadara_vpsa_auto_detach_on_delete:
                 raise exception.VolumeAttached(volume_id=name)
@@ -370,7 +369,8 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
                 vpsa_srv = server.findtext('name')
                 if vpsa_srv:
                     self.vpsa.send_cmd('detach_volume',
-                                vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+                                       vpsa_srv=vpsa_srv,
+                                       vpsa_vol=vpsa_vol)
 
         # Delete volume
         self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
@@ -417,7 +417,8 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
 
         # Attach volume to server
         self.vpsa.send_cmd('attach_volume',
-                            vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+                           vpsa_srv=vpsa_srv,
+                           vpsa_vol=vpsa_vol)
 
         # Get connection info
         xml_tree = self.vpsa.send_cmd('list_vol_attachments',
@@ -429,8 +430,9 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         target = server.findtext('target')
         lun = server.findtext('lun')
         if target is None or lun is None:
-            raise exception.ZadaraInvalidAttachmentInfo(name=name,
-                            reason='target=%s, lun=%s' % (target, lun))
+            raise exception.ZadaraInvalidAttachmentInfo(
+                name=name,
+                reason='target=%s, lun=%s' % (target, lun))
 
         properties = {}
         properties['target_discovered'] = False
@@ -465,7 +467,8 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
 
         # Detach volume from server
         self.vpsa.send_cmd('detach_volume',
-                            vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol)
+                           vpsa_srv=vpsa_srv,
+                           vpsa_vol=vpsa_vol)
 
     def create_volume_from_snapshot(self, volume, snapshot):
         raise NotImplementedError()
index 670b0e7edb3e6a7b3a1d1a917b9ffbf76d3a917e..a5ed8c7243f1fe906b5e3dbb8714bc92c6f4fe7c 100644 (file)
@@ -30,17 +30,16 @@ from cinder import utils
 
 LOG = logging.getLogger(__name__)
 
-iscsi_helper_opt = [
-        cfg.StrOpt('iscsi_helper',
-                    default='tgtadm',
-                    help='iscsi target user-land tool to use'),
-        cfg.StrOpt('volumes_dir',
-                   default='$state_path/volumes',
-                   help='Volume configuration file storage directory'),
-        cfg.StrOpt('iet_conf',
-                   default='/etc/iet/ietd.conf',
-                   help='IET configuration file'),
-]
+iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
+                               default='tgtadm',
+                               help='iscsi target user-land tool to use'),
+                    cfg.StrOpt('volumes_dir',
+                               default='$state_path/volumes',
+                               help='Volume configuration file storage '
+                                    'directory'),
+                    cfg.StrOpt('iet_conf',
+                               default='/etc/iet/ietd.conf',
+                               help='IET configuration file'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(iscsi_helper_opt)
@@ -232,7 +231,7 @@ class IetAdm(TargetAdmin):
             except exception.ProcessExecutionError, e:
                 vol_id = name.split(':')[1]
                 LOG.error(_("Failed to create iscsi target for volume "
-                        "id:%(vol_id)s.") % locals())
+                            "id:%(vol_id)s.") % locals())
                 raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
         return tid
 
index 5f7e4f817a9fe36cd73a831983aa3a57c831c606..666c147c15f0e08076e6f33eef80e66af4552211 100644 (file)
@@ -62,8 +62,7 @@ volume_manager_opts = [
                help='Driver to use for volume creation'),
     cfg.BoolOpt('volume_force_update_capabilities',
                 default=False,
-                help='if True will force update capabilities on each check'),
-    ]
+                help='if True will force update capabilities on each check'), ]
 
 FLAGS = flags.FLAGS
 FLAGS.register_opts(volume_manager_opts)
@@ -71,34 +70,33 @@ FLAGS.register_opts(volume_manager_opts)
 MAPPING = {
     'cinder.volume.driver.RBDDriver': 'cinder.volume.drivers.rbd.RBDDriver',
     'cinder.volume.driver.SheepdogDriver':
-                            'cinder.volume.drivers.sheepdog.SheepdogDriver',
+    'cinder.volume.drivers.sheepdog.SheepdogDriver',
     'cinder.volume.nexenta.volume.NexentaDriver':
-                        'cinder.volume.drivers.nexenta.volume.NexentaDriver',
+    'cinder.volume.drivers.nexenta.volume.NexentaDriver',
     'cinder.volume.san.SanISCSIDriver':
-                        'cinder.volume.drivers.san.san.SanISCSIDriver',
+    'cinder.volume.drivers.san.san.SanISCSIDriver',
     'cinder.volume.san.SolarisISCSIDriver':
-                        'cinder.volume.drivers.san.solaris.SolarisISCSIDriver',
+    'cinder.volume.drivers.san.solaris.SolarisISCSIDriver',
     'cinder.volume.san.HpSanISCSIDriver':
-                    'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
+    'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
     'cinder.volume.netapp.NetAppISCSIDriver':
-                        'cinder.volume.drivers.netapp.NetAppISCSIDriver',
+    'cinder.volume.drivers.netapp.NetAppISCSIDriver',
     'cinder.volume.netapp.NetAppCmodeISCSIDriver':
-                    'cinder.volume.drivers.netapp.NetAppCmodeISCSIDriver',
+    'cinder.volume.drivers.netapp.NetAppCmodeISCSIDriver',
     'cinder.volume.netapp_nfs.NetAppNFSDriver':
-                    'cinder.volume.drivers.netapp_nfs.NetAppNFSDriver',
+    'cinder.volume.drivers.netapp_nfs.NetAppNFSDriver',
     'cinder.volume.nfs.NfsDriver':
-                        'cinder.volume.drivers.nfs.NfsDriver',
+    'cinder.volume.drivers.nfs.NfsDriver',
     'cinder.volume.solidfire.SolidFire':
-                        'cinder.volume.drivers.solidfire.SolidFire',
+    'cinder.volume.drivers.solidfire.SolidFire',
     'cinder.volume.storwize_svc.StorwizeSVCDriver':
-                        'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
+    'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
     'cinder.volume.windows.WindowsDriver':
-                        'cinder.volume.drivers.windows.WindowsDriver',
+    'cinder.volume.drivers.windows.WindowsDriver',
     'cinder.volume.xiv.XIVDriver':
-                            'cinder.volume.drivers.xiv.XIVDriver',
+    'cinder.volume.drivers.xiv.XIVDriver',
     'cinder.volume.zadara.ZadaraVPSAISCSIDriver':
-                        'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver'
-    }
+    'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver'}
 
 
 class VolumeManager(manager.SchedulerDependentManager):
@@ -117,7 +115,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         else:
             self.driver = importutils.import_object(volume_driver)
         super(VolumeManager, self).__init__(service_name='volume',
-                                                    *args, **kwargs)
+                                            *args, **kwargs)
         # NOTE(vish): Implementation specific db handling is done
         #             by the driver.
         self.driver.db = self.db
@@ -165,7 +163,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             vol_name = volume_ref['name']
             vol_size = volume_ref['size']
             LOG.debug(_("volume %(vol_name)s: creating lv of"
-                    " size %(vol_size)sG") % locals())
+                        " size %(vol_size)sG") % locals())
             if snapshot_id is None and image_id is None:
                 model_update = self.driver.create_volume(volume_ref)
             elif snapshot_id is not None:
@@ -176,8 +174,8 @@ class VolumeManager(manager.SchedulerDependentManager):
             else:
                 # create the volume from an image
                 image_service, image_id = \
-                               glance.get_remote_image_service(context,
-                                                               image_id)
+                    glance.get_remote_image_service(context,
+                                                    image_id)
                 image_location = image_service.get_location(context, image_id)
                 image_meta = image_service.show(context, image_id)
                 cloned = self.driver.clone_image(volume_ref, image_location)
@@ -201,7 +199,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         if snapshot_id:
             # Copy any Glance metadata from the original volume
             self.db.volume_glance_metadata_copy_to_volume(context,
-                                               volume_ref['id'], snapshot_id)
+                                                          volume_ref['id'],
+                                                          snapshot_id)
 
         now = timeutils.utcnow()
         self.db.volume_update(context,
@@ -242,7 +241,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             raise exception.VolumeAttached(volume_id=volume_id)
         if volume_ref['host'] != self.host:
             raise exception.InvalidVolume(
-                    reason=_("Volume is not local to this node"))
+                reason=_("Volume is not local to this node"))
 
         self._notify_about_volume_usage(context, volume_ref, "delete.start")
         self._reset_stats()
@@ -306,7 +305,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                 snapshot_ref['id'], {'status': 'available',
                                                      'progress': '100%'})
         self.db.volume_glance_metadata_copy_to_snapshot(context,
-                                                snapshot_ref['id'], volume_id)
+                                                        snapshot_ref['id'],
+                                                        volume_id)
         LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
         return snapshot_id
 
@@ -502,8 +502,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         LOG.info(_("Notification {%s} received"), event)
         self._reset_stats()
 
-    def _notify_about_volume_usage(self, context, volume, event_suffix,
-                                     extra_usage_info=None):
+    def _notify_about_volume_usage(self,
+                                   context,
+                                   volume,
+                                   event_suffix,
+                                   extra_usage_info=None):
         volume_utils.notify_about_volume_usage(
-                context, volume, event_suffix,
-                extra_usage_info=extra_usage_info, host=self.host)
+            context, volume, event_suffix,
+            extra_usage_info=extra_usage_info, host=self.host)
index 640875c04049de373a9d42146a09ff736665dbee..47a64392aca503841f5563f4b2016e355e297280 100644 (file)
@@ -38,60 +38,75 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
     BASE_RPC_API_VERSION = '1.0'
 
     def __init__(self):
-        super(VolumeAPI, self).__init__(topic=FLAGS.volume_topic,
+        super(VolumeAPI, self).__init__(
+            topic=FLAGS.volume_topic,
             default_version=self.BASE_RPC_API_VERSION)
 
     def create_volume(self, ctxt, volume, host,
                       snapshot_id=None, image_id=None):
-        self.cast(ctxt, self.make_msg('create_volume',
-                                      volume_id=volume['id'],
-                                      snapshot_id=snapshot_id,
-                                      image_id=image_id),
-                topic=rpc.queue_get_for(ctxt, self.topic, host))
+        self.cast(ctxt,
+                  self.make_msg('create_volume',
+                                volume_id=volume['id'],
+                                snapshot_id=snapshot_id,
+                                image_id=image_id),
+                  topic=rpc.queue_get_for(ctxt,
+                                          self.topic,
+                                          host))
 
     def delete_volume(self, ctxt, volume):
-        self.cast(ctxt, self.make_msg('delete_volume',
-                                      volume_id=volume['id']),
-                topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+        self.cast(ctxt,
+                  self.make_msg('delete_volume',
+                                volume_id=volume['id']),
+                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
 
     def create_snapshot(self, ctxt, volume, snapshot):
         self.cast(ctxt, self.make_msg('create_snapshot',
                                       volume_id=volume['id'],
                                       snapshot_id=snapshot['id']),
-                topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
 
     def delete_snapshot(self, ctxt, snapshot, host):
         self.cast(ctxt, self.make_msg('delete_snapshot',
                                       snapshot_id=snapshot['id']),
-                topic=rpc.queue_get_for(ctxt, self.topic, host))
+                  topic=rpc.queue_get_for(ctxt, self.topic, host))
 
     def attach_volume(self, ctxt, volume, instance_uuid, mountpoint):
         return self.call(ctxt, self.make_msg('attach_volume',
-                                      volume_id=volume['id'],
-                                      instance_uuid=instance_uuid,
-                                      mountpoint=mountpoint),
-                    topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                                             volume_id=volume['id'],
+                                             instance_uuid=instance_uuid,
+                                             mountpoint=mountpoint),
+                         topic=rpc.queue_get_for(ctxt,
+                                                 self.topic,
+                                                 volume['host']))
 
     def detach_volume(self, ctxt, volume):
         return self.call(ctxt, self.make_msg('detach_volume',
-                                      volume_id=volume['id']),
-                    topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                                             volume_id=volume['id']),
+                         topic=rpc.queue_get_for(ctxt,
+                                                 self.topic,
+                                                 volume['host']))
 
     def copy_volume_to_image(self, ctxt, volume, image_id):
         self.cast(ctxt, self.make_msg('copy_volume_to_image',
                                       volume_id=volume['id'],
                                       image_id=image_id),
-                topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                  topic=rpc.queue_get_for(ctxt,
+                                          self.topic,
+                                          volume['host']))
 
     def initialize_connection(self, ctxt, volume, connector):
         return self.call(ctxt, self.make_msg('initialize_connection',
-                                      volume_id=volume['id'],
-                                      connector=connector),
-                    topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                                             volume_id=volume['id'],
+                                             connector=connector),
+                         topic=rpc.queue_get_for(ctxt,
+                                                 self.topic,
+                                                 volume['host']))
 
     def terminate_connection(self, ctxt, volume, connector, force=False):
         return self.call(ctxt, self.make_msg('terminate_connection',
-                                      volume_id=volume['id'],
-                                      connector=connector,
-                                      force=force),
-                    topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+                                             volume_id=volume['id'],
+                                             connector=connector,
+                                             force=force),
+                         topic=rpc.queue_get_for(ctxt,
+                                                 self.topic,
+                                                 volume['host']))
index ee73a0f6b882c56149f3a87f60a0367c57423f29..d4652e4afa29f73b47f4695bc6ebe71e55c1c190 100644 (file)
@@ -44,40 +44,38 @@ def notify_usage_exists(context, volume_ref, current_period=False):
     extra_usage_info = dict(audit_period_beginning=str(audit_start),
                             audit_period_ending=str(audit_end))
 
-    notify_about_volume_usage(
-            context, volume_ref, 'exists', extra_usage_info=extra_usage_info)
+    notify_about_volume_usage(context, volume_ref,
+                              'exists', extra_usage_info=extra_usage_info)
 
 
 def _usage_from_volume(context, volume_ref, **kw):
     def null_safe_str(s):
         return str(s) if s else ''
 
-    usage_info = dict(
-          tenant_id=volume_ref['project_id'],
-          user_id=volume_ref['user_id'],
-          volume_id=volume_ref['id'],
-          volume_type=volume_ref['volume_type_id'],
-          display_name=volume_ref['display_name'],
-          launched_at=null_safe_str(volume_ref['launched_at']),
-          created_at=null_safe_str(volume_ref['created_at']),
-          status=volume_ref['status'],
-          snapshot_id=volume_ref['snapshot_id'],
-          size=volume_ref['size'])
+    usage_info = dict(tenant_id=volume_ref['project_id'],
+                      user_id=volume_ref['user_id'],
+                      volume_id=volume_ref['id'],
+                      volume_type=volume_ref['volume_type_id'],
+                      display_name=volume_ref['display_name'],
+                      launched_at=null_safe_str(volume_ref['launched_at']),
+                      created_at=null_safe_str(volume_ref['created_at']),
+                      status=volume_ref['status'],
+                      snapshot_id=volume_ref['snapshot_id'],
+                      size=volume_ref['size'])
 
     usage_info.update(kw)
     return usage_info
 
 
 def notify_about_volume_usage(context, volume, event_suffix,
-                                extra_usage_info=None, host=None):
+                              extra_usage_info=None, host=None):
     if not host:
         host = FLAGS.host
 
     if not extra_usage_info:
         extra_usage_info = {}
 
-    usage_info = _usage_from_volume(
-            context, volume, **extra_usage_info)
+    usage_info = _usage_from_volume(context, volume, **extra_usage_info)
 
     notifier_api.notify(context, 'volume.%s' % host,
                         'volume.%s' % event_suffix,
index 222efcad94fab0b05732283dae5db8b1a25c089a..4a3aa1cbaef3f3d809a52cd14616bd43a41f5820 100644 (file)
@@ -45,7 +45,7 @@ class Server(object):
     default_pool_size = 1000
 
     def __init__(self, name, app, host=None, port=None, pool_size=None,
-                       protocol=eventlet.wsgi.HttpProtocol):
+                 protocol=eventlet.wsgi.HttpProtocol):
         """Initialize, but do not start, a WSGI server.
 
         :param name: Pretty name for logging.
@@ -89,7 +89,7 @@ class Server(object):
         """
         if backlog < 1:
             raise exception.InvalidInput(
-                    reason='The backlog must be more than 1')
+                reason='The backlog must be more than 1')
         self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
         self._server = eventlet.spawn(self._start)
         (self.host, self.port) = self._socket.getsockname()
index 63d6189cb0cb14954bba0c7b3d00f8bae802bfef..14df6a69284a6066ec57d3b81e815d3454beb683 100755 (executable)
@@ -116,7 +116,7 @@ function run_pep8 {
   #
 
   # Until all these issues get fixed, ignore.
-  ignore='--ignore=N4,E12,E711,E712,E721,E502'
+  ignore='--ignore=N4,E125,E126,E711,E712'
   ${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
 }
 
index 39db91feb5e5ba0a441e3e892938f7a508602ce1..55f69c5fa284e38c23592fe38b1fc92b3f98216e 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -23,35 +23,35 @@ from cinder import version
 
 requires = common_setup.parse_requirements()
 
-setuptools.setup(name='cinder',
-      version=version.canonical_version_string(),
-      description='block storage service',
-      author='OpenStack',
-      author_email='cinder@lists.launchpad.net',
-      url='http://www.openstack.org/',
-      classifiers=[
-          'Environment :: OpenStack',
-          'Intended Audience :: Information Technology',
-          'Intended Audience :: System Administrators',
-          'License :: OSI Approved :: Apache Software License',
-          'Operating System :: POSIX :: Linux',
-          'Programming Language :: Python',
-          'Programming Language :: Python :: 2',
-          'Programming Language :: Python :: 2.7',
-          ],
-      cmdclass=common_setup.get_cmdclass(),
-      packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
-      install_requires=requires,
-      include_package_data=True,
-      test_suite='nose.collector',
-      setup_requires=['setuptools_git>=0.4'],
-      scripts=['bin/cinder-all',
-               'bin/cinder-api',
-               'bin/cinder-clear-rabbit-queues',
-               'bin/cinder-manage',
-               'bin/cinder-rootwrap',
-               'bin/cinder-scheduler',
-               'bin/cinder-volume',
-               'bin/cinder-volume-usage-audit',
-              ],
-        py_modules=[])
+setuptools.setup(
+    name='cinder',
+    version=version.canonical_version_string(),
+    description='block storage service',
+    author='OpenStack',
+    author_email='cinder@lists.launchpad.net',
+    url='http://www.openstack.org/',
+    classifiers=[
+        'Environment :: OpenStack',
+        'Intended Audience :: Information Technology',
+        'Intended Audience :: System Administrators',
+        'License :: OSI Approved :: Apache Software License',
+        'Operating System :: POSIX :: Linux',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.7',
+    ],
+    cmdclass=common_setup.get_cmdclass(),
+    packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
+    install_requires=requires,
+    include_package_data=True,
+    test_suite='nose.collector',
+    setup_requires=['setuptools_git>=0.4'],
+    scripts=['bin/cinder-all',
+             'bin/cinder-api',
+             'bin/cinder-clear-rabbit-queues',
+             'bin/cinder-manage',
+             'bin/cinder-rootwrap',
+             'bin/cinder-scheduler',
+             'bin/cinder-volume',
+             'bin/cinder-volume-usage-audit'],
+    py_modules=[])
index 8d20189ed5b5a25fa54e41f2665e5278cc313413..680443abd6242f112ee62995f867047a4c0a2b8c 100755 (executable)
@@ -57,10 +57,10 @@ def import_normalize(line):
     # handle "from x import y as z" to "import x.y as z"
     split_line = line.split()
     if (line.startswith("from ") and "," not in line and
-           split_line[2] == "import" and split_line[3] != "*" and
-           split_line[1] != "__future__" and
-           (len(split_line) == 4 or
-           (len(split_line) == 6 and split_line[4] == "as"))):
+            split_line[2] == "import" and split_line[3] != "*" and
+            split_line[1] != "__future__" and
+            (len(split_line) == 4 or
+                (len(split_line) == 6 and split_line[4] == "as"))):
         return "import %s.%s" % (split_line[1], split_line[3])
     else:
         return line
@@ -114,9 +114,9 @@ def cinder_one_import_per_line(logical_line):
     """
     pos = logical_line.find(',')
     parts = logical_line.split()
-    if pos > -1 and (parts[0] == "import" or
-       parts[0] == "from" and parts[2] == "import") and \
-       not is_import_exception(parts[1]):
+    if (pos > -1 and (parts[0] == "import" or
+                      parts[0] == "from" and parts[2] == "import") and
+            not is_import_exception(parts[1])):
         yield pos, "CINDER N301: one import per line"
 
 _missingImport = set([])
@@ -144,8 +144,11 @@ def cinder_import_module_only(logical_line):
                 if parent:
                     if is_import_exception(parent):
                         return
-                    parent_mod = __import__(parent, globals(), locals(),
-                        [mod], -1)
+                    parent_mod = __import__(parent,
+                                            globals(),
+                                            locals(),
+                                            [mod],
+                                            -1)
                     valid = inspect.ismodule(getattr(parent_mod, mod))
                 else:
                     __import__(mod, globals(), locals(), [], -1)
@@ -154,12 +157,14 @@ def cinder_import_module_only(logical_line):
                     if added:
                         sys.path.pop()
                         added = False
-                        return logical_line.find(mod), ("CINDER N304: No "
-                            "relative  imports. '%s' is a relative import"
-                            % logical_line)
-                    return logical_line.find(mod), ("CINDER N302: import only "
-                        "modules. '%s' does not import a module"
-                        % logical_line)
+                        return (logical_line.find(mod),
+                                ("CINDER N304: No "
+                                 "relative  imports. '%s' is a relative import"
+                                % logical_line))
+                    return (logical_line.find(mod),
+                            ("CINDER N302: import only "
+                             "modules. '%s' does not import a module"
+                            % logical_line))
 
         except (ImportError, NameError) as exc:
             if not added:
@@ -171,7 +176,7 @@ def cinder_import_module_only(logical_line):
                 if name not in _missingImport:
                     if VERBOSE_MISSING_IMPORT:
                         print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
-                            (name, exc))
+                                              (name, exc))
                     _missingImport.add(name)
                 added = False
                 sys.path.pop()
@@ -180,19 +185,20 @@ def cinder_import_module_only(logical_line):
         except AttributeError:
             # Invalid import
             return logical_line.find(mod), ("CINDER N303: Invalid import, "
-                "AttributeError raised")
+                                            "AttributeError raised")
 
     # convert "from x import y" to " import x.y"
     # convert "from x import y as z" to " import x.y"
     import_normalize(logical_line)
     split_line = logical_line.split()
 
-    if (logical_line.startswith("import ") and "," not in logical_line and
+    if (logical_line.startswith("import ") and
+            "," not in logical_line and
             (len(split_line) == 2 or
-            (len(split_line) == 4 and split_line[2] == "as"))):
+                (len(split_line) == 4 and split_line[2] == "as"))):
         mod = split_line[1]
         rval = importModuleCheck(mod)
-        if rval != None:
+        if rval is not None:
             yield rval
 
     # TODO(jogo) handle "from x import *"
@@ -210,12 +216,12 @@ def cinder_import_alphabetical(physical_line, line_number, lines):
     # handle import x
     # use .lower since capitalization shouldn't dictate order
     split_line = import_normalize(physical_line.strip()).lower().split()
-    split_previous = import_normalize(lines[line_number - 2]
-            ).strip().lower().split()
+    split_previous = import_normalize(
+        lines[line_number - 2]).strip().lower().split()
     # with or without "as y"
     length = [2, 4]
     if (len(split_line) in length and len(split_previous) in length and
-        split_line[0] == "import" and split_previous[0] == "import"):
+            split_line[0] == "import" and split_previous[0] == "import"):
         if split_line[1] < split_previous[1]:
             return (0,
                     "CINDER N306: imports not in alphabetical order (%s, %s)"
@@ -247,7 +253,7 @@ def cinder_docstring_one_line(physical_line):
     pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])  # start
     end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE])  # end
     if (pos != -1 and end and len(physical_line) > pos + 4):
-        if (physical_line[-5] != '.'):
+        if (physical_line[-5] != '.' and physical_line):
             return pos, "CINDER N402: one line docstring needs a period"
 
 
@@ -266,13 +272,13 @@ def cinder_docstring_multiline_end(physical_line):
 
 
 FORMAT_RE = re.compile("%(?:"
-                            "%|"           # Ignore plain percents
-                            "(\(\w+\))?"   # mapping key
-                            "([#0 +-]?"    # flag
-                             "(?:\d+|\*)?"  # width
-                             "(?:\.\d+)?"   # precision
-                             "[hlL]?"       # length mod
-                             "\w))")        # type
+                       "%|"           # Ignore plain percents
+                       "(\(\w+\))?"   # mapping key
+                       "([#0 +-]?"    # flag
+                       "(?:\d+|\*)?"  # width
+                       "(?:\.\d+)?"   # precision
+                       "[hlL]?"       # length mod
+                       "\w))")        # type
 
 
 class LocalizationError(Exception):
@@ -309,30 +315,36 @@ def check_l18n():
                     break
 
             if not format_string:
-                raise LocalizationError(start,
+                raise LocalizationError(
+                    start,
                     "CINDER N701: Empty localization string")
             if token_type != tokenize.OP:
-                raise LocalizationError(start,
+                raise LocalizationError(
+                    start,
                     "CINDER N701: Invalid localization call")
             if text != ")":
                 if text == "%":
-                    raise LocalizationError(start,
+                    raise LocalizationError(
+                        start,
                         "CINDER N702: Formatting operation should be outside"
                         " of localization method call")
                 elif text == "+":
-                    raise LocalizationError(start,
+                    raise LocalizationError(
+                        start,
                         "CINDER N702: Use bare string concatenation instead"
                         " of +")
                 else:
-                    raise LocalizationError(start,
+                    raise LocalizationError(
+                        start,
                         "CINDER N702: Argument to _ must be just a string")
 
             format_specs = FORMAT_RE.findall(format_string)
             positional_specs = [(key, spec) for key, spec in format_specs
-                                            if not key and spec]
+                                if not key and spec]
             # not spec means %%, key means %(smth)s
             if len(positional_specs) > 1:
-                raise LocalizationError(start,
+                raise LocalizationError(
+                    start,
                     "CINDER N703: Multiple positional placeholders")
 
 
@@ -390,4 +402,4 @@ if __name__ == "__main__":
     finally:
         if len(_missingImport) > 0:
             print >> sys.stderr, ("%i imports missing in this test environment"
-                    % len(_missingImport))
+                                  % len(_missingImport))
index bd324efd535c90014c1d1f89f4c59eb6a8db043c..b6962f01e3264656f243be92817a2c8fbe0211cb 100644 (file)
@@ -143,8 +143,8 @@ class Fedora(Distro):
 
 
 def get_distro():
-    if os.path.exists('/etc/fedora-release') or \
-       os.path.exists('/etc/redhat-release'):
+    if (os.path.exists('/etc/fedora-release') or
+            os.path.exists('/etc/redhat-release')):
         return Fedora()
     else:
         return Distro()
@@ -197,8 +197,9 @@ def install_dependencies(venv=VENV):
     pip_install('-r', TEST_REQUIRES)
 
     # Tell the virtual env how to "import cinder"
-    pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
-                        "cinder.pth")
+    pthfile = os.path.join(venv, "lib",
+                           PY_VERSION, "site-packages",
+                           "cinder.pth")
     f = open(pthfile, 'w')
     f.write("%s\n" % ROOT)
 
@@ -233,8 +234,9 @@ def parse_args():
     """Parses command-line arguments."""
     parser = optparse.OptionParser()
     parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
-        default=False, action="store_true",
-        help="Do not inherit packages from global Python install")
+                      default=False, action="store_true",
+                      help="Do not inherit packages from "
+                           "global Python install")
     return parser.parse_args()
 
 
diff --git a/tox.ini b/tox.ini
index 47a65cce73cb9dc94812860e78b9043ae67511b0..e766f16357fd5937392acab4078bdeafb8491968 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -15,9 +15,9 @@ commands = /bin/bash run_tests.sh -N -P {posargs}
 [testenv:pep8]
 deps = pep8==1.3.3
 commands =
-  python tools/hacking.py --ignore=N4,E12,E711,E712,E721,E502 --repeat --show-source \
+  python tools/hacking.py --ignore=N4,E125,E126,E711,E712 --repeat --show-source \
       --exclude=.venv,.tox,dist,doc,openstack,*egg  .
-  python tools/hacking.py --ignore=N4,E12,E711,E712,E721,E502 --repeat --show-source \
+  python tools/hacking.py --ignore=N4,E125,E126,E711,E712 --repeat --show-source \
       --filename=cinder* bin
 
 [testenv:venv]