]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove E12 errors from tox.ini Flake ignores.
authorJohn Griffith <john.griffith@solidfire.com>
Sat, 1 Jun 2013 17:27:04 +0000 (11:27 -0600)
committerJohn Griffith <john.griffith@solidfire.com>
Thu, 6 Jun 2013 07:56:29 +0000 (01:56 -0600)
This removes the E12 from ignores and fixes up the
existing pep8 errors that we were ignoring.

Change-Id: I5d60f1eed768fcae01a708fcf9ea324844c6376d

46 files changed:
bin/cinder-all
bin/cinder-api
bin/cinder-manage
bin/cinder-volume
bin/cinder-volume-usage-audit
cinder/api/contrib/volume_transfer.py
cinder/api/v2/volumes.py
cinder/api/views/limits.py
cinder/api/views/transfers.py
cinder/backup/__init__.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migration.py
cinder/db/sqlalchemy/models.py
cinder/scheduler/scheduler_options.py
cinder/tests/api/contrib/test_backups.py
cinder/tests/api/contrib/test_scheduler_hints.py
cinder/tests/api/contrib/test_types_extra_specs.py
cinder/tests/api/contrib/test_types_manage.py
cinder/tests/api/contrib/test_volume_transfer.py
cinder/tests/api/test_common.py
cinder/tests/api/v1/test_types.py
cinder/tests/db/test_transfers.py
cinder/tests/integrated/test_volumes.py
cinder/tests/scheduler/test_host_filters.py
cinder/tests/scheduler/test_host_manager.py
cinder/tests/test_db_api.py
cinder/tests/test_emc.py
cinder/tests/test_glusterfs.py
cinder/tests/test_hp3par.py
cinder/tests/test_iscsi.py
cinder/tests/test_nfs.py
cinder/tests/test_policy.py
cinder/tests/test_rbd.py
cinder/tests/test_storwize_svc.py
cinder/tests/test_volume_transfer.py
cinder/transfer/__init__.py
cinder/volume/__init__.py
cinder/volume/driver.py
cinder/volume/drivers/emc/emc_smis_common.py
cinder/volume/drivers/emc/emc_smis_iscsi.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/san/san.py
cinder/volume/drivers/storwize_svc.py
cinder/volume/volume_types.py
tox.ini

index 9591d1574fa7a597bb035351b34ee524d50285e7..aab9a9da31289ee270b351932796b00737c2defa 100755 (executable)
@@ -34,8 +34,9 @@ import os
 import sys
 
 
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
-        sys.argv[0]), os.pardir, os.pardir))
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+                                   os.pardir,
+                                   os.pardir))
 if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
     sys.path.insert(0, possible_topdir)
 
index 0f05b519c8e69c549586102593f57a229460cd9c..22a9f741e697c46c53260c7f65715adee54dc63a 100755 (executable)
@@ -30,8 +30,9 @@ import os
 import sys
 
 
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
-        sys.argv[0]), os.pardir, os.pardir))
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+                                   os.pardir,
+                                   os.pardir))
 if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
     sys.path.insert(0, possible_topdir)
 
index 8e0de60b6ebcb1343e6b26c9737b592ba1866e14..d9df32317e48b1f80e0bb9d940121ffde7499060 100755 (executable)
@@ -691,13 +691,12 @@ class ServiceCommands(object):
         ctxt = context.get_admin_context()
         services = db.service_get_all(ctxt)
         print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
-        print print_format % (
-                    _('Binary'),
-                    _('Host'),
-                    _('Zone'),
-                    _('Status'),
-                    _('State'),
-                    _('Updated At'))
+        print print_format % (_('Binary'),
+                              _('Host'),
+                              _('Zone'),
+                              _('Status'),
+                              _('State'),
+                              _('Updated At'))
         for svc in services:
             alive = utils.service_is_up(svc)
             art = ":-)" if alive else "XXX"
index 9c36238d667d918d809abfce1928e544edc831b5..46d5da24b2f877e1babcf74d33d4b83f88f15958 100755 (executable)
@@ -51,9 +51,8 @@ if __name__ == '__main__':
     if FLAGS.enabled_backends:
         for backend in FLAGS.enabled_backends:
             host = "%s@%s" % (FLAGS.host, backend)
-            server = service.Service.create(
-                                        host=host,
-                                        service_name=backend)
+            server = service.Service.create(host=host,
+                                            service_name=backend)
             launcher.launch_server(server)
     else:
         server = service.Service.create(binary='cinder-volume')
index 13ea447a9f2e83141511f4802af115c0adc99b14..3ff437efe472876aa692ca4486c0a9f1a9025a72 100755 (executable)
@@ -80,8 +80,8 @@ if __name__ == '__main__':
     print _("Found %d volumes") % len(volumes)
     for volume_ref in volumes:
         try:
-            cinder.volume.utils.notify_usage_exists(
-                    admin_context, volume_ref)
+            cinder.volume.utils.notify_usage_exists(admin_context,
+                                                    volume_ref)
         except Exception, e:
             print traceback.format_exc(e)
 
index fd98af34812e9d2dacfb40d00c6bee0f5b7ef8da..970976b8140239ac3fc6726c8478e30169daf267 100644 (file)
@@ -161,8 +161,9 @@ class VolumeTransferController(wsgi.Controller):
 
         name = transfer.get('name', None)
 
-        LOG.audit(_("Creating transfer of volume %(volume_id)s"), locals(),
-                    context=context)
+        LOG.audit(_("Creating transfer of volume %s"),
+                  volume_id,
+                  context=context)
 
         try:
             new_transfer = self.transfer_api.create(context, volume_id, name)
@@ -194,8 +195,8 @@ class VolumeTransferController(wsgi.Controller):
             msg = _("Incorrect request body format")
             raise exc.HTTPBadRequest(explanation=msg)
 
-        LOG.audit(_("Accepting transfer %(transfer_id)s"), locals(),
-                    context=context)
+        LOG.audit(_("Accepting transfer %s"), transfer_id,
+                  context=context)
 
         try:
             accepted_transfer = self.transfer_api.accept(context, transfer_id,
@@ -206,8 +207,9 @@ class VolumeTransferController(wsgi.Controller):
         except exception.InvalidVolume as error:
             raise exc.HTTPBadRequest(explanation=unicode(error))
 
-        transfer = self._view_builder.summary(req,
-              dict(accepted_transfer.iteritems()))
+        transfer = \
+            self._view_builder.summary(req,
+                                       dict(accepted_transfer.iteritems()))
         return transfer
 
     def delete(self, req, id):
index d56fe6b7c2aa8bb22ee8dc7574773e1fd3e5f717..66cb280df64c9430b5044b215a662b4ee6403730 100644 (file)
@@ -33,7 +33,7 @@ from cinder.volume import volume_types
 
 LOG = logging.getLogger(__name__)
 SCHEDULER_HINTS_NAMESPACE =\
-        "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
+    "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
 FLAGS = flags.FLAGS
 
 
@@ -94,8 +94,10 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
 
     def _extract_scheduler_hints(self, volume_node):
         """Marshal the scheduler hints attribute of a parsed request."""
-        node = self.find_first_child_named_in_namespace(volume_node,
-                SCHEDULER_HINTS_NAMESPACE, "scheduler_hints")
+        node =\
+            self.find_first_child_named_in_namespace(volume_node,
+                                                     SCHEDULER_HINTS_NAMESPACE,
+                                                     "scheduler_hints")
         if node:
             scheduler_hints = {}
             for child in self.extract_elements(node):
index 81b1e794ecab049a1a1ee756f760ac39ad5c4a09..89c0fffbf950187adbd757f8041121d3f509ef81 100644 (file)
@@ -71,7 +71,7 @@ class ViewBuilder(object):
             # check for existing key
             for limit in limits:
                 if (limit["uri"] == rate_limit["URI"] and
-                    limit["regex"] == rate_limit["regex"]):
+                        limit["regex"] == rate_limit["regex"]):
                     _rate_limit_key = limit
                     break
 
index 29a5dc1e3e22afec31762648a51ddcf6788d7077..2b8b54e0cbb7c00e8a40a6ed0cc9bbe17ddf1efa 100644 (file)
@@ -79,8 +79,8 @@ class ViewBuilder(common.ViewBuilder):
         transfers_list = [func(request, transfer)['transfer'] for transfer in
                           transfers]
         transfers_links = self._get_collection_links(request,
-                                                   transfers,
-                                                   self._collection_name)
+                                                     transfers,
+                                                     self._collection_name)
         transfers_dict = dict(transfers=transfers_list)
 
         if transfers_links:
index 368e2ffff06cbd9c3afe6f01021c65694a421f86..193b7a5f02affdfa2809ea12a64891b9cc0ce74b 100644 (file)
@@ -20,4 +20,4 @@ import cinder.flags
 import cinder.openstack.common.importutils
 
 API = cinder.openstack.common.importutils.import_class(
-        cinder.flags.FLAGS.backup_api_class)
+    cinder.flags.FLAGS.backup_api_class)
index 82afcd954b5205ac4ba05cab2d403df389820f54..49265239f83fb514644333f8ea82e63f9bb07621 100644 (file)
@@ -2028,7 +2028,7 @@ def backup_destroy(context, backup_id):
 @require_context
 def transfer_get(context, transfer_id, session=None):
     query = model_query(context, models.Transfer,
-                         session=session).\
+                        session=session).\
         filter_by(id=transfer_id)
 
     if not is_admin_context(context):
@@ -2069,8 +2069,8 @@ def transfer_get_all_by_project(context, project_id):
 
     volume = models.Volume
     query = model_query(context, models.Transfer).\
-            options(joinedload('volume')).\
-            filter(volume.project_id == project_id)
+        options(joinedload('volume')).\
+        filter(volume.project_id == project_id)
     results = query.all()
     return _translate_transfers(results)
 
index f36c5f54cb941d7e43b1daf78fe3663738ff7d6b..e2463bc48b1e0089e9f12ce3cd15c614a3f80e1c 100644 (file)
@@ -51,7 +51,7 @@ def patched_with_engine(f, *a, **kw):
 #                on that version or higher, this can be removed
 MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
 if (not hasattr(migrate, '__version__') or
-    dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
+        dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
     migrate_util.with_engine = patched_with_engine
 
 
index 2f9bc85c126d309465a1155cf8d4801f4598b744..7adac5b885e1a9bf429836ab803a173c429d8781 100644 (file)
@@ -251,7 +251,7 @@ class Reservation(BASE, CinderBase):
         "QuotaUsage",
         foreign_keys=usage_id,
         primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
-                         'QuotaUsage.deleted == 0)')
+                    'QuotaUsage.deleted == 0)')
 
 
 class Snapshot(BASE, CinderBase):
index 0b4f1ee9c9d8bcccaaa859a63983b220bb35b06e..6c4f0b3648c1e7823ef22ddc47d8bc887e025642 100644 (file)
@@ -33,9 +33,9 @@ from cinder.openstack.common import timeutils
 
 
 scheduler_json_config_location_opt = cfg.StrOpt(
-        'scheduler_json_config_location',
-        default='',
-        help='Absolute path to scheduler configuration JSON file.')
+    'scheduler_json_config_location',
+    default='',
+    help='Absolute path to scheduler configuration JSON file.')
 
 
 CONF = cfg.CONF
@@ -97,7 +97,7 @@ class SchedulerOptions(object):
 
         last_modified = self._get_file_timestamp(filename)
         if (not last_modified or not self.last_modified or
-            last_modified > self.last_modified):
+                last_modified > self.last_modified):
             self.data = self._load_file(self._get_file_handle(filename))
             self.last_modified = last_modified
         if not self.data:
index 899814a70d925a48285799fedd2405239014a3e4..c757e78bc64543a06041466ae5dfb784889a094a 100644 (file)
@@ -277,7 +277,7 @@ class BackupsAPITestCase(test.TestCase):
 
         self.assertEqual(backup_detail.item(0).attributes.length, 11)
         self.assertEqual(
-                backup_detail.item(0).getAttribute('availability_zone'), 'az1')
+            backup_detail.item(0).getAttribute('availability_zone'), 'az1')
         self.assertEqual(
             backup_detail.item(0).getAttribute('container'), 'volumebackups')
         self.assertEqual(
@@ -288,17 +288,17 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(
             backup_detail.item(0).getAttribute('id'), backup_id1)
         self.assertEqual(
-                int(backup_detail.item(0).getAttribute('object_count')), 0)
+            int(backup_detail.item(0).getAttribute('object_count')), 0)
         self.assertEqual(
-                int(backup_detail.item(0).getAttribute('size')), 0)
+            int(backup_detail.item(0).getAttribute('size')), 0)
         self.assertEqual(
             backup_detail.item(0).getAttribute('status'), 'creating')
         self.assertEqual(
-                int(backup_detail.item(0).getAttribute('volume_id')), 1)
+            int(backup_detail.item(0).getAttribute('volume_id')), 1)
 
         self.assertEqual(backup_detail.item(1).attributes.length, 11)
         self.assertEqual(
-                backup_detail.item(1).getAttribute('availability_zone'), 'az1')
+            backup_detail.item(1).getAttribute('availability_zone'), 'az1')
         self.assertEqual(
             backup_detail.item(1).getAttribute('container'), 'volumebackups')
         self.assertEqual(
@@ -330,13 +330,13 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(
             backup_detail.item(2).getAttribute('id'), backup_id3)
         self.assertEqual(
-                int(backup_detail.item(2).getAttribute('object_count')), 0)
+            int(backup_detail.item(2).getAttribute('object_count')), 0)
         self.assertEqual(
-                int(backup_detail.item(2).getAttribute('size')), 0)
+            int(backup_detail.item(2).getAttribute('size')), 0)
         self.assertEqual(
             backup_detail.item(2).getAttribute('status'), 'creating')
         self.assertEqual(
-                int(backup_detail.item(2).getAttribute('volume_id')), 1)
+            int(backup_detail.item(2).getAttribute('volume_id')), 1)
 
         db.backup_destroy(context.get_admin_context(), backup_id3)
         db.backup_destroy(context.get_admin_context(), backup_id2)
index 4edc9d00c8fef09b3206be8effe6fd346ba73cfb..c48d98dee72e5d70fc379c7f18864fd4098121f5 100644 (file)
@@ -33,7 +33,7 @@ class SchedulerHintsTestCase(test.TestCase):
         super(SchedulerHintsTestCase, self).setUp()
         self.fake_instance = stubs.stub_volume(1, uuid=UUID)
         self.fake_instance['created_at'] =\
-                datetime.datetime(2013, 1, 1, 1, 1, 1)
+            datetime.datetime(2013, 1, 1, 1, 1, 1)
         self.flags(
             osapi_volume_extension=[
                 'cinder.api.contrib.select_extensions'],
@@ -55,8 +55,7 @@ class SchedulerHintsTestCase(test.TestCase):
         req.content_type = 'application/json'
         body = {'id': id,
                 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
-                'volume_id': '1',
-               }
+                'volume_id': '1', }
         req.body = jsonutils.dumps(body)
         res = req.get_response(self.app)
         self.assertEqual(202, res.status_int)
@@ -78,8 +77,7 @@ class SchedulerHintsTestCase(test.TestCase):
         body = {'id': id,
                 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                 'volume_id': '1',
-                'scheduler_hints': {'a': 'b'},
-               }
+                'scheduler_hints': {'a': 'b'}, }
 
         req.body = jsonutils.dumps(body)
         res = req.get_response(self.app)
@@ -90,11 +88,10 @@ class SchedulerHintsTestCase(test.TestCase):
         req.method = 'POST'
         req.content_type = 'application/json'
         body = {'volume': {
-                  'id': id,
-                  'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
-                  'volume_id': '1',
-                  'scheduler_hints': 'a', }
-               }
+            'id': id,
+            'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+            'volume_id': '1',
+            'scheduler_hints': 'a', }}
 
         req.body = jsonutils.dumps(body)
         res = req.get_response(self.app)
index 36ff9f6d17f7d41fa6aa83bb3ea1bd35635453ad..e28a0a9535e2ca46d9786b4de9c1548209c10d07 100644 (file)
@@ -51,12 +51,11 @@ def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
 
 
 def stub_volume_type_extra_specs():
-    specs = {
-            "key1": "value1",
-            "key2": "value2",
-            "key3": "value3",
-            "key4": "value4",
-            "key5": "value5"}
+    specs = {"key1": "value1",
+             "key2": "value2",
+             "key3": "value3",
+             "key4": "value4",
+             "key5": "value5"}
     return specs
 
 
index c007236ec47da7e1124bdb47e5535affc513842d..fee0a92e7ae00a7ba3011da736c2872151345906 100644 (file)
@@ -25,12 +25,11 @@ from cinder.volume import volume_types
 
 
 def stub_volume_type(id):
-    specs = {
-            "key1": "value1",
-            "key2": "value2",
-            "key3": "value3",
-            "key4": "value4",
-            "key5": "value5"}
+    specs = {"key1": "value1",
+             "key2": "value2",
+             "key3": "value3",
+             "key4": "value4",
+             "key5": "value5"}
     return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
 
 
index fc8f37b054117733e74240cef83187a5ccf3499d..4cf24780752830a0fe0f3204fe6c11dfc2c538bc 100644 (file)
@@ -543,9 +543,9 @@ class VolumeTransferAPITestCase(test.TestCase):
     def test_accept_transfer_with_VolumeLimitExceeded(self):
 
         def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls,
-                                                                 context,
-                                                                 transfer,
-                                                                 volume_id):
+                                                                  context,
+                                                                  transfer,
+                                                                  volume_id):
             raise exception.VolumeLimitExceeded(allowed=1)
 
         self.stubs.Set(cinder.transfer.API, 'accept',
index 1a8eba04000a0b792df2c137696605fef4c6ccc2..6c5c7b413b7f81614ebaf1dddc401a4bafda3c27 100644 (file)
@@ -181,7 +181,7 @@ class PaginationParamsTest(test.TestCase):
     def test_valid_marker(self):
         """ Test valid marker param. """
         req = webob.Request.blank(
-                '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
+            '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
         self.assertEqual(common.get_pagination_params(req),
                          {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
 
index 97a10e7917e9fbcfca1c5469960eb425c65d3d48..9ca05fa6bc5c7cd2a05849d99fca4d25e363ab55 100644 (file)
@@ -26,12 +26,11 @@ from cinder.volume import volume_types
 
 
 def stub_volume_type(id):
-    specs = {
-            "key1": "value1",
-            "key2": "value2",
-            "key3": "value3",
-            "key4": "value4",
-            "key5": "value5"}
+    specs = {"key1": "value1",
+             "key2": "value2",
+             "key3": "value3",
+             "key4": "value4",
+             "key5": "value5"}
     return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
 
 
index dbf6ce13edeee3e5827459a500ab156e0530c90d..8f38cedfa063802a4c24986201708fbcfe54dab8 100644 (file)
@@ -77,7 +77,7 @@ class TransfersTableTestCase(test.TestCase):
         self.assertEquals(xfer.volume_id, volume_id1, "Unexpected volume_id")
 
         nctxt = context.RequestContext(user_id='new_user_id',
-                                          project_id='new_project_id')
+                                       project_id='new_project_id')
         self.assertRaises(exception.TransferNotFound,
                           db.transfer_get, nctxt, xfer_id1)
 
@@ -102,7 +102,7 @@ class TransfersTableTestCase(test.TestCase):
                           "Unexpected number of transfer records")
 
         nctxt = context.RequestContext(user_id='new_user_id',
-                                          project_id='new_project_id')
+                                       project_id='new_project_id')
         self.assertRaises(exception.NotAuthorized,
                           db.transfer_get_all_by_project,
                           nctxt, self.ctxt.project_id)
index 59ddd36bb752997ecc08d90edf67b5f7d018750a..6f82b54aada72d3a12d7eb5dd82974a6b2e884bb 100644 (file)
@@ -116,8 +116,8 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs())
 
         create_actions = fake_driver.LoggingVolumeDriver.logs_like(
-                            'create_volume',
-                            id=created_volume_id)
+            'create_volume',
+            id=created_volume_id)
         LOG.debug("Create_Actions: %s" % create_actions)
 
         self.assertEquals(1, len(create_actions))
@@ -127,16 +127,16 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         self.assertEquals(create_action['size'], 1)
 
         export_actions = fake_driver.LoggingVolumeDriver.logs_like(
-                            'create_export',
-                            id=created_volume_id)
+            'create_export',
+            id=created_volume_id)
         self.assertEquals(1, len(export_actions))
         export_action = export_actions[0]
         self.assertEquals(export_action['id'], created_volume_id)
         self.assertEquals(export_action['availability_zone'], 'nova')
 
         delete_actions = fake_driver.LoggingVolumeDriver.logs_like(
-                            'delete_volume',
-                            id=created_volume_id)
+            'delete_volume',
+            id=created_volume_id)
         self.assertEquals(1, len(delete_actions))
         delete_action = export_actions[0]
         self.assertEquals(delete_action['id'], created_volume_id)
index 065752beb5ca899692e0e7987388cde1bad58579..7034879a30d3aa2359d00887696501de5f56ce07 100644 (file)
@@ -62,8 +62,9 @@ class HostFiltersTestCase(test.TestCase):
         stub_out_https_backend(self.stubs)
         self.context = context.RequestContext('fake', 'fake')
         self.json_query = jsonutils.dumps(
-                ['and', ['>=', '$free_capacity_gb', 1024],
-                 ['>=', '$total_capacity_gb', 10 * 1024]])
+            ['and',
+                ['>=', '$free_capacity_gb', 1024],
+                ['>=', '$total_capacity_gb', 10 * 1024]])
         # This has a side effect of testing 'get_filter_classes'
         # when specifying a method (in this case, our standard filters)
         filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
index 2a3558fc9d0a6591c468aa981f595ce6f39482a0..ca3a9f521c9c6156c9c7ed018244ce1a5226afac 100644 (file)
@@ -79,7 +79,7 @@ class HostManagerTestCase(test.TestCase):
 
         self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
         self.host_manager._choose_host_filters(specified_filters).AndReturn(
-                [FakeFilterClass1])
+            [FakeFilterClass1])
 
     def _verify_result(self, info, result):
         for x in info['got_fprops']:
index c7580dffa6d392a00c4ae4f4067c310be6cd82fe..371f80c5e9aaa155bca08843de6b0cc19f4b2081 100644 (file)
@@ -45,12 +45,13 @@ def _quota_reserve(context, project_id):
     for i in range(3):
         resource = 'res%d' % i
         quotas[resource] = db.quota_create(context, project_id, resource, i)
-        resources[resource] = ReservableResource(resource,
-                            get_sync(resource, i), 'quota_res_%d' % i)
+        resources[resource] = ReservableResource(
+            resource,
+            get_sync(resource, i), 'quota_res_%d' % i)
         deltas[resource] = i
     return db.quota_reserve(context, resources, quotas, deltas,
-                    datetime.utcnow(), datetime.utcnow(),
-                    timedelta(days=1), project_id)
+                            datetime.utcnow(), datetime.utcnow(),
+                            timedelta(days=1), project_id)
 
 
 class ModelsObjectComparatorMixin(object):
@@ -58,7 +59,7 @@ class ModelsObjectComparatorMixin(object):
         if ignored_keys is None:
             ignored_keys = []
         return dict([(k, v) for k, v in obj.iteritems()
-                                if k not in ignored_keys])
+                    if k not in ignored_keys])
 
     def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
         obj1 = self._dict_from_object(obj1, ignored_keys)
@@ -255,7 +256,7 @@ class DBAPIVolumeTestCase(BaseTest):
         db.iscsi_target_create_safe(self.ctxt, {'host': host,
                                                 'target_num': 42})
         target_num = db.volume_allocate_iscsi_target(self.ctxt, volume['id'],
-                                                 host)
+                                                     host)
         self.assertEqual(target_num, 42)
 
     def test_volume_attached_invalid_uuid(self):
@@ -278,8 +279,9 @@ class DBAPIVolumeTestCase(BaseTest):
             for j in xrange(3):
                 db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
         for i in xrange(3):
-            self.assertEqual((3, 300), db.volume_data_get_for_host(
-                                                        self.ctxt, 'h%d' % i))
+            self.assertEqual((3, 300),
+                             db.volume_data_get_for_host(
+                                 self.ctxt, 'h%d' % i))
 
     def test_volume_data_get_for_project(self):
         for i in xrange(3):
@@ -289,13 +291,15 @@ class DBAPIVolumeTestCase(BaseTest):
                                              'host': 'h-%d-%d' % (i, j),
                                              })
         for i in xrange(3):
-            self.assertEqual((3, 300), db.volume_data_get_for_project(
-                                                        self.ctxt, 'p%d' % i))
+            self.assertEqual((3, 300),
+                             db.volume_data_get_for_project(
+                                 self.ctxt, 'p%d' % i))
 
     def test_volume_detached(self):
         volume = db.volume_create(self.ctxt, {})
-        db.volume_attached(self.ctxt, volume['id'],
-                        'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
+        db.volume_attached(self.ctxt,
+                           volume['id'],
+                           'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
         db.volume_detached(self.ctxt, volume['id'])
         volume = db.volume_get(self.ctxt, volume['id'])
         self.assertEqual('available', volume['status'])
@@ -306,7 +310,7 @@ class DBAPIVolumeTestCase(BaseTest):
     def test_volume_get(self):
         volume = db.volume_create(self.ctxt, {})
         self._assertEqualObjects(volume, db.volume_get(self.ctxt,
-                                                        volume['id']))
+                                                       volume['id']))
 
     def test_volume_destroy(self):
         volume = db.volume_create(self.ctxt, {})
@@ -315,8 +319,9 @@ class DBAPIVolumeTestCase(BaseTest):
                           self.ctxt, volume['id'])
 
     def test_volume_get_all(self):
-        volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i})
-                            for i in xrange(3)]
+        volumes = [db.volume_create(self.ctxt,
+                   {'host': 'h%d' % i, 'size': i})
+                   for i in xrange(3)]
         self._assertEqualListsOfObjects(volumes, db.volume_get_all(
                                         self.ctxt, None, None, 'host', None))
 
@@ -324,7 +329,7 @@ class DBAPIVolumeTestCase(BaseTest):
         volumes = []
         for i in xrange(3):
             volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
-                                                    for j in xrange(3)])
+                            for j in xrange(3)])
         for i in xrange(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_host(
@@ -337,7 +342,8 @@ class DBAPIVolumeTestCase(BaseTest):
             instance_uuid = str(uuidutils.uuid.uuid1())
             instance_uuids.append(instance_uuid)
             volumes.append([db.volume_create(self.ctxt,
-                        {'instance_uuid': instance_uuid}) for j in xrange(3)])
+                            {'instance_uuid': instance_uuid})
+                            for j in xrange(3)])
         for i in xrange(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_instance_uuid(
@@ -347,7 +353,7 @@ class DBAPIVolumeTestCase(BaseTest):
         volumes = []
         for i in xrange(3):
             volumes.append([db.volume_create(self.ctxt, {
-                        'project_id': 'p%d' % i}) for j in xrange(3)])
+                'project_id': 'p%d' % i}) for j in xrange(3)])
         for i in xrange(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_project(
@@ -361,7 +367,7 @@ class DBAPIVolumeTestCase(BaseTest):
 
     def test_volume_get_iscsi_target_num_nonexistent(self):
         self.assertRaises(exception.ISCSITargetNotFoundForVolume,
-                        db.volume_get_iscsi_target_num, self.ctxt, 42)
+                          db.volume_get_iscsi_target_num, self.ctxt, 42)
 
     def test_volume_update(self):
         volume = db.volume_create(self.ctxt, {'host': 'h1'})
@@ -381,19 +387,19 @@ class DBAPIReservationTestCase(BaseTest):
     def setUp(self):
         super(DBAPIReservationTestCase, self).setUp()
         self.values = {'uuid': 'sample-uuid',
-                'project_id': 'project1',
-                'resource': 'resource',
-                'delta': 42,
-                'expire': datetime.utcnow() + timedelta(days=1),
-                'usage': {'id': 1}}
+                       'project_id': 'project1',
+                       'resource': 'resource',
+                       'delta': 42,
+                       'expire': datetime.utcnow() + timedelta(days=1),
+                       'usage': {'id': 1}}
 
     def test_reservation_create(self):
         reservation = db.reservation_create(self.ctxt, **self.values)
         self._assertEqualObjects(self.values, reservation, ignored_keys=(
-                        'deleted', 'updated_at',
-                        'deleted_at', 'id',
-                        'created_at', 'usage',
-                        'usage_id'))
+            'deleted', 'updated_at',
+            'deleted_at', 'id',
+            'created_at', 'usage',
+            'usage_id'))
         self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
 
     def test_reservation_get(self):
@@ -402,46 +408,59 @@ class DBAPIReservationTestCase(BaseTest):
         self._assertEqualObjects(reservation, reservation_db)
 
     def test_reservation_get_nonexistent(self):
-        self.assertRaises(exception.ReservationNotFound, db.reservation_get,
-                                    self.ctxt, 'non-exitent-resevation-uuid')
+        self.assertRaises(exception.ReservationNotFound,
+                          db.reservation_get,
+                          self.ctxt,
+                          'non-exitent-resevation-uuid')
 
     def test_reservation_commit(self):
         reservations = _quota_reserve(self.ctxt, 'project1')
         expected = {'project_id': 'project1',
-                'res0': {'reserved': 0, 'in_use': 0},
-                'res1': {'reserved': 1, 'in_use': 1},
-                'res2': {'reserved': 2, 'in_use': 2}}
-        self.assertEqual(expected, db.quota_usage_get_all_by_project(
-                                            self.ctxt, 'project1'))
+                    'res0': {'reserved': 0, 'in_use': 0},
+                    'res1': {'reserved': 1, 'in_use': 1},
+                    'res2': {'reserved': 2, 'in_use': 2}}
+        self.assertEqual(expected,
+                         db.quota_usage_get_all_by_project(
+                             self.ctxt, 'project1'))
         db.reservation_get(self.ctxt, reservations[0])
         db.reservation_commit(self.ctxt, reservations, 'project1')
         self.assertRaises(exception.ReservationNotFound,
-            db.reservation_get, self.ctxt, reservations[0])
+                          db.reservation_get,
+                          self.ctxt,
+                          reservations[0])
         expected = {'project_id': 'project1',
-                'res0': {'reserved': 0, 'in_use': 0},
-                'res1': {'reserved': 0, 'in_use': 2},
-                'res2': {'reserved': 0, 'in_use': 4}}
-        self.assertEqual(expected, db.quota_usage_get_all_by_project(
-                                            self.ctxt, 'project1'))
+                    'res0': {'reserved': 0, 'in_use': 0},
+                    'res1': {'reserved': 0, 'in_use': 2},
+                    'res2': {'reserved': 0, 'in_use': 4}}
+        self.assertEqual(expected,
+                         db.quota_usage_get_all_by_project(
+                             self.ctxt,
+                             'project1'))
 
     def test_reservation_rollback(self):
         reservations = _quota_reserve(self.ctxt, 'project1')
         expected = {'project_id': 'project1',
-                'res0': {'reserved': 0, 'in_use': 0},
-                'res1': {'reserved': 1, 'in_use': 1},
-                'res2': {'reserved': 2, 'in_use': 2}}
-        self.assertEqual(expected, db.quota_usage_get_all_by_project(
-                                            self.ctxt, 'project1'))
+                    'res0': {'reserved': 0, 'in_use': 0},
+                    'res1': {'reserved': 1, 'in_use': 1},
+                    'res2': {'reserved': 2, 'in_use': 2}}
+        self.assertEqual(expected,
+                         db.quota_usage_get_all_by_project(
+                             self.ctxt,
+                             'project1'))
         db.reservation_get(self.ctxt, reservations[0])
         db.reservation_rollback(self.ctxt, reservations, 'project1')
         self.assertRaises(exception.ReservationNotFound,
-            db.reservation_get, self.ctxt, reservations[0])
+                          db.reservation_get,
+                          self.ctxt,
+                          reservations[0])
         expected = {'project_id': 'project1',
-                'res0': {'reserved': 0, 'in_use': 0},
-                'res1': {'reserved': 0, 'in_use': 1},
-                'res2': {'reserved': 0, 'in_use': 2}}
-        self.assertEqual(expected, db.quota_usage_get_all_by_project(
-                                            self.ctxt, 'project1'))
+                    'res0': {'reserved': 0, 'in_use': 0},
+                    'res1': {'reserved': 0, 'in_use': 1},
+                    'res2': {'reserved': 0, 'in_use': 2}}
+        self.assertEqual(expected,
+                         db.quota_usage_get_all_by_project(
+                             self.ctxt,
+                             'project1'))
 
     def test_reservation_expire(self):
         self.values['expire'] = datetime.utcnow() + timedelta(days=1)
@@ -449,11 +468,13 @@ class DBAPIReservationTestCase(BaseTest):
         db.reservation_expire(self.ctxt)
 
         expected = {'project_id': 'project1',
-                'res0': {'reserved': 0, 'in_use': 0},
-                'res1': {'reserved': 0, 'in_use': 1},
-                'res2': {'reserved': 0, 'in_use': 2}}
-        self.assertEqual(expected, db.quota_usage_get_all_by_project(
-                                            self.ctxt, 'project1'))
+                    'res0': {'reserved': 0, 'in_use': 0},
+                    'res1': {'reserved': 0, 'in_use': 1},
+                    'res2': {'reserved': 0, 'in_use': 2}}
+        self.assertEqual(expected,
+                         db.quota_usage_get_all_by_project(
+                             self.ctxt,
+                             'project1'))
 
 
 class DBAPIQuotaTestCase(BaseTest):
@@ -478,9 +499,9 @@ class DBAPIQuotaTestCase(BaseTest):
         for i in range(3):
             quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
             self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
-                                                        'res0': 0,
-                                                        'res1': 1,
-                                                        'res2': 2})
+                                         'res0': 0,
+                                         'res1': 1,
+                                         'res2': 2})
 
     def test_quota_update(self):
         db.quota_create(self.ctxt, 'project1', 'resource1', 41)
@@ -492,11 +513,18 @@ class DBAPIQuotaTestCase(BaseTest):
 
     def test_quota_update_nonexistent(self):
         self.assertRaises(exception.ProjectQuotaNotFound,
-            db.quota_update, self.ctxt, 'project1', 'resource1', 42)
+                          db.quota_update,
+                          self.ctxt,
+                          'project1',
+                          'resource1',
+                          42)
 
     def test_quota_get_nonexistent(self):
         self.assertRaises(exception.ProjectQuotaNotFound,
-            db.quota_get, self.ctxt, 'project1', 'resource1')
+                          db.quota_get,
+                          self.ctxt,
+                          'project1',
+                          'resource1')
 
     def test_quota_reserve(self):
         reservations = _quota_reserve(self.ctxt, 'project1')
@@ -511,17 +539,22 @@ class DBAPIQuotaTestCase(BaseTest):
         reservations = _quota_reserve(self.ctxt, 'project1')
         db.quota_destroy_all_by_project(self.ctxt, 'project1')
         self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
-                            {'project_id': 'project1'})
-        self.assertEqual(db.quota_usage_get_all_by_project(
-                            self.ctxt, 'project1'),
-                            {'project_id': 'project1'})
+                         {'project_id': 'project1'})
+        self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt,
+                                                           'project1'),
+                         {'project_id': 'project1'})
         for r in reservations:
             self.assertRaises(exception.ReservationNotFound,
-                            db.reservation_get, self.ctxt, r)
+                              db.reservation_get,
+                              self.ctxt,
+                              r)
 
     def test_quota_usage_get_nonexistent(self):
-        self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
-            self.ctxt, 'p1', 'nonexitent_resource')
+        self.assertRaises(exception.QuotaUsageNotFound,
+                          db.quota_usage_get,
+                          self.ctxt,
+                          'p1',
+                          'nonexitent_resource')
 
     def test_quota_usage_get(self):
         reservations = _quota_reserve(self.ctxt, 'p1')
index c12070629edddfa86636707d947f6a9fb6554fa1..e12a1a94355577d903e22da5859958994128981a 100644 (file)
@@ -348,7 +348,7 @@ class FakeEcomConnection():
         syncs = self._enum_syncsvsvs()
         for sync in syncs:
             if (sync['SyncedElement'] == objectpath['SyncedElement'] and
-                sync['SystemElement'] == objectpath['SystemElement']):
+                    sync['SystemElement'] == objectpath['SystemElement']):
                 foundsync = sync
                 break
         return foundsync
index d908403a909b0ec05b82fc2ef2bc97a1b21e63a1..5b3790f5d58760667d647209ad05322adc21178c 100644 (file)
@@ -71,8 +71,8 @@ class GlusterFsDriverTestCase(test.TestCase):
         self._configuration.glusterfs_sparsed_volumes = True
 
         self.stubs = stubout.StubOutForTesting()
-        self._driver = glusterfs.GlusterfsDriver(
-                                    configuration=self._configuration)
+        self._driver =\
+            glusterfs.GlusterfsDriver(configuration=self._configuration)
         self._driver.shares = {}
 
     def tearDown(self):
index 2b90fd143010266a90bd71168851d85e263a3127..f9939d13b1694f17cb989cd53717c5daae64e597 100644 (file)
@@ -1074,7 +1074,7 @@ ISCSI_PORT_RET = (
 ISCSI_3PAR_RET = (
     'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n'
     '75,fakehost.foo,Generic,iqn.1993-08.org.debian:01:222,---,'
-        '10.10.222.12\r\n'
+    '10.10.222.12\r\n'
     '\r\n'
     'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n'
     '75,fakehost.foo,--,--\r\n'
index 321e805146ffada36d975d79ee9d4500b0e84249..66a467767a4b2d8caa4cc3597b96a0d869f5b15f 100644 (file)
@@ -186,5 +186,5 @@ class LioAdmTestCase(test.TestCase, TargetAdminTestCase):
         self.flags(iscsi_helper='lioadm')
         self.script_template = "\n".join([
             'rtstool create '
-                '/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
+            '/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
             'rtstool delete iqn.2010-10.org.openstack:volume-blaa'])
index b75319d4aba00cafaa722e58d228f7dddc61192c..26b18c392296a8cee75d66080c2702541ce000de 100644 (file)
@@ -430,21 +430,24 @@ class NfsDriverTestCase(test.TestCase):
         drv = self._driver
         self.configuration.nfs_oversub_ratio = -1
         self.assertRaises(exception.NfsException,
-                         drv.do_setup, IsA(context.RequestContext))
+                          drv.do_setup,
+                          IsA(context.RequestContext))
 
     def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
         """do_setup should throw error if nfs_used_ratio is less than 0."""
         drv = self._driver
         self.configuration.nfs_used_ratio = -1
         self.assertRaises(exception.NfsException,
-                         drv.do_setup, IsA(context.RequestContext))
+                          drv.do_setup,
+                          IsA(context.RequestContext))
 
     def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
         """do_setup should throw error if nfs_used_ratio is greater than 1."""
         drv = self._driver
         self.configuration.nfs_used_ratio = 2
         self.assertRaises(exception.NfsException,
-                         drv.do_setup, IsA(context.RequestContext))
+                          drv.do_setup,
+                          IsA(context.RequestContext))
 
     def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
         """do_setup should throw error if nfs client is not installed."""
index 57795198ab480efbc9c9ba0b91e0587a10eea139..5d05273dcd4e603909e02f1222307f1794267306 100644 (file)
@@ -171,7 +171,7 @@ class DefaultPolicyTestCase(test.TestCase):
 
     def _set_brain(self, default_rule):
         brain = cinder.openstack.common.policy.Brain(self.rules,
-                                                         default_rule)
+                                                     default_rule)
         cinder.openstack.common.policy.set_brain(brain)
 
     def tearDown(self):
index 995eb8120c1d76a292603a3e0e31821c66fcddde..16ce5bd5fdd57de5ef98e9edf50ea03bad390b79 100644 (file)
@@ -350,8 +350,7 @@ class RBDTestCase(test.TestCase):
                 'auth_enabled': False,
                 'auth_username': None,
                 'secret_type': 'ceph',
-                'secret_uuid': None,
-                }
+                'secret_uuid': None, }
         }
         actual = self.driver.initialize_connection(dict(name=name), None)
         self.assertDictMatch(expected, actual)
index ee185eaa1405e6b135878041d2dbf06bcd0bf196..04171739da3b07e8a3b916523727f7e47dbf71b9 100644 (file)
@@ -141,7 +141,7 @@ class StorwizeSVCManagementSimulator:
 
     def _state_transition(self, function, fcmap):
         if (function == 'wait' and
-            'wait' not in self._transitions[fcmap['status']]):
+                'wait' not in self._transitions[fcmap['status']]):
             return ('', '')
 
         if fcmap['status'] == 'copying' and function == 'wait':
@@ -1239,7 +1239,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         self.USESIM = True
         if self.USESIM:
             self.driver = StorwizeSVCFakeDriver(
-                                configuration=conf.Configuration(None))
+                configuration=conf.Configuration(None))
             self._def_flags = {'san_ip': 'hostname',
                                'san_login': 'user',
                                'san_password': 'pass',
@@ -1250,8 +1250,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
             self._host_name = 'storwize-svc-test'
             self._host_ip = '1.234.56.78'
             self._host_wwpns = [
-                    str(random.randint(0, 9999999999999999)).zfill(16),
-                    str(random.randint(0, 9999999999999999)).zfill(16)]
+                str(random.randint(0, 9999999999999999)).zfill(16),
+                str(random.randint(0, 9999999999999999)).zfill(16)]
             self._iscsi_name = ('test.initiator.%s' %
                                 str(random.randint(10000, 99999)))
             self.sim = StorwizeSVCManagementSimulator('volpool')
@@ -1259,7 +1259,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
             self.driver.set_fake_storage(self.sim)
         else:
             self.driver = storwize_svc.StorwizeSVCDriver(
-                                configuration=conf.Configuration(None))
+                configuration=conf.Configuration(None))
             self._def_flags = {'san_ip': '1.111.11.11',
                                'san_login': 'user',
                                'san_password': 'password',
@@ -1278,7 +1278,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
             for line in lines:
                 val = line.split('=')
                 if (len(val) == 2 and
-                    val[0].strip().replace(" ", "") == 'port_name'):
+                        val[0].strip().replace(" ", "") == 'port_name'):
                     self._host_wwpns.append(val[1].strip()[3:-1])
             self.assertNotEqual(len(self._host_wwpns), 0)
 
index 7c8d192c2c5c97af0783873ab6641980b3b1ae0b..1e091117f43cf83f35a5c5dbaa6f62a1a063e5dd 100644 (file)
@@ -108,9 +108,9 @@ class VolumeTransferTestCase(test.TestCase):
                           'Unexpected user id')
 
         self.assertEquals(volume['id'], response['volume_id'],
-                         'Unexpected volume id in response.')
+                          'Unexpected volume id in response.')
         self.assertEquals(transfer['id'], response['id'],
-                         'Unexpected transfer id in response.')
+                          'Unexpected transfer id in response.')
 
     def test_transfer_get(self):
         tx_api = transfer_api.API()
index 6979b82a96ac50d141b4cd7319cb4b1ae242d854..6adbf4307074a3c716ac21f0d112b6ca5ceaa5b2 100644 (file)
@@ -20,4 +20,4 @@ import cinder.flags
 import cinder.openstack.common.importutils
 
 API = cinder.openstack.common.importutils.import_class(
-        cinder.flags.FLAGS.transfer_api_class)
+    cinder.flags.FLAGS.transfer_api_class)
index e810a93d48f2ddc8a0d5d8d9bfaf4c1d1e7f7d8a..b7a93efa8c13082edc8853ddba5c494f71c7ea5b 100644 (file)
@@ -18,8 +18,7 @@
 
 # Importing full names to not pollute the namespace and cause possible
 # collisions with use of 'from cinder.volume import <foo>' elsewhere.
-import cinder.flags
-import cinder.openstack.common.importutils
+import cinder.flags as flags
+import cinder.openstack.common.importutils as import_utils
 
-API = cinder.openstack.common.importutils.import_class(
-        cinder.flags.FLAGS.volume_api_class)
+API = import_utils.import_class(flags.FLAGS.volume_api_class)
index 7240ad2f6befa28be114d200176b27ef5c3392ed..ea3a92b1efe6f1e89c5f3a0a52d48c06dd7bb5f2 100644 (file)
@@ -227,7 +227,7 @@ class ISCSIDriver(VolumeDriver):
                                     run_as_root=True)
         for target in out.splitlines():
             if (self.configuration.iscsi_ip_address in target
-                and volume_name in target):
+                    and volume_name in target):
                 return target
         return None
 
index 11d96989edb867b492224f4856a2286777cf4658..78a2e69d574832dbfd95c244d7f7f45c38230b55 100644 (file)
@@ -100,7 +100,7 @@ class EMCSMISCommon():
                      'storage_system': storage_system})
 
         configservice = self._find_storage_configuration_service(
-                            storage_system)
+            storage_system)
         if configservice is None:
             exception_message = (_("Error Create Volume: %(volumename)s. "
                                  "Storage Configuration Service not found for "
@@ -120,10 +120,10 @@ class EMCSMISCommon():
                      'size': volumesize})
 
         rc, job = self.conn.InvokeMethod(
-                    'CreateOrModifyElementFromStoragePool',
-                    configservice, ElementName=volumename, InPool=pool,
-                    ElementType=self._getnum(5, '16'),
-                    Size=self._getnum(volumesize, '64'))
+            'CreateOrModifyElementFromStoragePool',
+            configservice, ElementName=volumename, InPool=pool,
+            ElementType=self._getnum(5, '16'),
+            Size=self._getnum(volumesize, '64'))
 
         LOG.debug(_('Create Volume: %(volumename)s  Return code: %(rc)lu')
                   % {'volumename': volumename,
@@ -205,10 +205,10 @@ class EMCSMISCommon():
 
         # Create a Clone from snapshot
         rc, job = self.conn.InvokeMethod(
-                    'CreateElementReplica', repservice,
-                    ElementName=volumename,
-                    SyncType=self._getnum(8, '16'),
-                    SourceElement=snapshot_instance.path)
+            'CreateElementReplica', repservice,
+            ElementName=volumename,
+            SyncType=self._getnum(8, '16'),
+            SourceElement=snapshot_instance.path)
 
         if rc != 0L:
             rc, errordesc = self._wait_for_job_complete(job)
@@ -247,13 +247,13 @@ class EMCSMISCommon():
                      'sync_name': str(sync_name)})
 
         rc, job = self.conn.InvokeMethod(
-                    'ModifyReplicaSynchronization',
-                    repservice,
-                    Operation=self._getnum(8, '16'),
-                    Synchronization=sync_name)
+            'ModifyReplicaSynchronization',
+            repservice,
+            Operation=self._getnum(8, '16'),
+            Synchronization=sync_name)
 
         LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Return code: %(rc)lu')
+                    'Snapshot: %(snapshotname)s  Return code: %(rc)lu')
                   % {'volumename': volumename,
                      'snapshotname': snapshotname,
                      'rc': rc})
@@ -329,10 +329,10 @@ class EMCSMISCommon():
 
         # Create a Clone from source volume
         rc, job = self.conn.InvokeMethod(
-                    'CreateElementReplica', repservice,
-                    ElementName=volumename,
-                    SyncType=self._getnum(8, '16'),
-                    SourceElement=src_instance.path)
+            'CreateElementReplica', repservice,
+            ElementName=volumename,
+            SyncType=self._getnum(8, '16'),
+            SourceElement=src_instance.path)
 
         if rc != 0L:
             rc, errordesc = self._wait_for_job_complete(job)
@@ -371,10 +371,10 @@ class EMCSMISCommon():
                      'sync_name': str(sync_name)})
 
         rc, job = self.conn.InvokeMethod(
-                    'ModifyReplicaSynchronization',
-                    repservice,
-                    Operation=self._getnum(8, '16'),
-                    Synchronization=sync_name)
+            'ModifyReplicaSynchronization',
+            repservice,
+            Operation=self._getnum(8, '16'),
+            Synchronization=sync_name)
 
         LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s  '
                   'Source Volume: %(srcname)s  Return code: %(rc)lu')
@@ -422,8 +422,8 @@ class EMCSMISCommon():
 
         storage_system = vol_instance['SystemName']
 
-        configservice = self._find_storage_configuration_service(
-                        storage_system)
+        configservice =\
+            self._find_storage_configuration_service(storage_system)
         if configservice is None:
             exception_message = (_("Error Delete Volume: %(volumename)s. "
                                  "Storage Configuration Service not found.")
@@ -443,9 +443,10 @@ class EMCSMISCommon():
                      'name': volumename,
                      'vol_instance': str(vol_instance.path)})
 
-        rc, job = self.conn.InvokeMethod(
-                    'EMCReturnToStoragePool',
-                    configservice, TheElements=[vol_instance.path])
+        rc, job =\
+            self.conn.InvokeMethod('EMCReturnToStoragePool',
+                                   configservice,
+                                   TheElements=[vol_instance.path])
 
         if rc != 0L:
             rc, errordesc = self._wait_for_job_complete(job)
@@ -506,11 +507,11 @@ class EMCSMISCommon():
                      'elementname': snapshotname,
                      'sourceelement': str(vol_instance.path)})
 
-        rc, job = self.conn.InvokeMethod(
-                    'CreateElementReplica', repservice,
-                    ElementName=snapshotname,
-                    SyncType=self._getnum(7, '16'),
-                    SourceElement=vol_instance.path)
+        rc, job =\
+            self.conn.InvokeMethod('CreateElementReplica', repservice,
+                                   ElementName=snapshotname,
+                                   SyncType=self._getnum(7, '16'),
+                                   SourceElement=vol_instance.path)
 
         LOG.debug(_('Create Snapshot: Volume: %(volumename)s  '
                   'Snapshot: %(snapshotname)s  Return code: %(rc)lu')
@@ -550,8 +551,8 @@ class EMCSMISCommon():
                   % {'snapshot': snapshotname,
                      'volume': volumename})
 
-        sync_name, storage_system = self._find_storage_sync_sv_sv(
-                                    snapshotname, volumename, False)
+        sync_name, storage_system =\
+            self._find_storage_sync_sv_sv(snapshotname, volumename, False)
         if sync_name is None:
             LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
                       'not found on the array. No snapshot to delete.')
@@ -578,11 +579,11 @@ class EMCSMISCommon():
                      'service': str(repservice),
                      'sync_name': str(sync_name)})
 
-        rc, job = self.conn.InvokeMethod(
-                    'ModifyReplicaSynchronization',
-                    repservice,
-                    Operation=self._getnum(19, '16'),
-                    Synchronization=sync_name)
+        rc, job =\
+            self.conn.InvokeMethod('ModifyReplicaSynchronization',
+                                   repservice,
+                                   Operation=self._getnum(19, '16'),
+                                   Synchronization=sync_name)
 
         LOG.debug(_('Delete Snapshot: Volume: %(volumename)s  Snapshot: '
                   '%(snapshotname)s  Return code: %(rc)lu')
@@ -651,21 +652,21 @@ class EMCSMISCommon():
                      'initiator': initiators})
 
         if lunmask_ctrl is None:
-            rc, controller = self.conn.InvokeMethod(
-                                'ExposePaths',
-                                configservice, LUNames=[lun_name],
-                                InitiatorPortIDs=initiators,
-                                DeviceAccesses=[self._getnum(2, '16')])
+            rc, controller =\
+                self.conn.InvokeMethod('ExposePaths',
+                                       configservice, LUNames=[lun_name],
+                                       InitiatorPortIDs=initiators,
+                                       DeviceAccesses=[self._getnum(2, '16')])
         else:
             LOG.debug(_('ExposePaths parameter '
                       'LunMaskingSCSIProtocolController: '
                       '%(lunmasking)s')
                       % {'lunmasking': str(lunmask_ctrl)})
-            rc, controller = self.conn.InvokeMethod(
-                                'ExposePaths',
-                                configservice, LUNames=[lun_name],
-                                DeviceAccesses=[self._getnum(2, '16')],
-                                ProtocolControllers=[lunmask_ctrl])
+            rc, controller =\
+                self.conn.InvokeMethod('ExposePaths',
+                                       configservice, LUNames=[lun_name],
+                                       DeviceAccesses=[self._getnum(2, '16')],
+                                       ProtocolControllers=[lunmask_ctrl])
 
         if rc != 0L:
             msg = (_('Error mapping volume %s.') % volumename)
@@ -724,9 +725,11 @@ class EMCSMISCommon():
                      'masking_group': str(masking_group),
                      'vol': str(vol_instance.path)})
 
-        rc, job = self.conn.InvokeMethod(
-                    'AddMembers', configservice,
-                    MaskingGroup=masking_group, Members=[vol_instance.path])
+        rc, job =\
+            self.conn.InvokeMethod('AddMembers',
+                                   configservice,
+                                   MaskingGroup=masking_group,
+                                   Members=[vol_instance.path])
 
         if rc != 0L:
             rc, errordesc = self._wait_for_job_complete(job)
@@ -864,7 +867,7 @@ class EMCSMISCommon():
 
     def _get_storage_type(self, filename=None):
         """Get the storage type from the config file."""
-        if filename == None:
+        if filename is None:
             filename = self.configuration.cinder_emc_config_file
 
         file = open(filename, 'r')
@@ -884,7 +887,7 @@ class EMCSMISCommon():
             raise exception.VolumeBackendAPIException(data=exception_message)
 
     def _get_masking_view(self, filename=None):
-        if filename == None:
+        if filename is None:
             filename = self.configuration.cinder_emc_config_file
 
         file = open(filename, 'r')
@@ -902,7 +905,7 @@ class EMCSMISCommon():
             return None
 
     def _get_ecom_cred(self, filename=None):
-        if filename == None:
+        if filename is None:
             filename = self.configuration.cinder_emc_config_file
 
         file = open(filename, 'r')
@@ -924,7 +927,7 @@ class EMCSMISCommon():
             return None
 
     def _get_ecom_server(self, filename=None):
-        if filename == None:
+        if filename is None:
             filename = self.configuration.cinder_emc_config_file
 
         file = open(filename, 'r')
@@ -1219,9 +1222,9 @@ class EMCSMISCommon():
         for ctrl in controllers:
             if storage_system != ctrl['SystemName']:
                 continue
-            associators = self.conn.Associators(
-                            ctrl,
-                            resultClass='EMC_StorageHardwareID')
+            associators =\
+                self.conn.Associators(ctrl,
+                                      resultClass='EMC_StorageHardwareID')
             for assoc in associators:
                 # if EMC_StorageHardwareID matches the initiator,
                 # we found the existing EMC_LunMaskingSCSIProtocolController
@@ -1253,14 +1256,16 @@ class EMCSMISCommon():
                                                           connector):
         foundCtrl = None
         initiators = self._find_initiator_names(connector)
-        controllers = self.conn.AssociatorNames(
-                        vol_instance.path,
-                        resultClass='EMC_LunMaskingSCSIProtocolController')
+        controllers =\
+            self.conn.AssociatorNames(
+                vol_instance.path,
+                resultClass='EMC_LunMaskingSCSIProtocolController')
 
         for ctrl in controllers:
-            associators = self.conn.Associators(
-                            ctrl,
-                            resultClass='EMC_StorageHardwareID')
+            associators =\
+                self.conn.Associators(
+                    ctrl,
+                    resultClass='EMC_StorageHardwareID')
             for assoc in associators:
                 # if EMC_StorageHardwareID matches the initiator,
                 # we found the existing EMC_LunMaskingSCSIProtocolController
@@ -1369,8 +1374,8 @@ class EMCSMISCommon():
             pass
 
         unitnames = self.conn.ReferenceNames(
-                        vol_instance.path,
-                        ResultClass='CIM_ProtocolControllerForUnit')
+            vol_instance.path,
+            ResultClass='CIM_ProtocolControllerForUnit')
 
         for unitname in unitnames:
             controller = unitname['Antecedent']
@@ -1450,7 +1455,7 @@ class EMCSMISCommon():
                 sp = idarray[2]
 
             if (storage_system == storsystemname and
-                owningsp == sp):
+                    owningsp == sp):
                 foundSystem = system
                 LOG.debug(_("Found Storage Processor System: %s")
                           % (str(system)))
index 53682f9bfb928f20330339ec402a1fe1fc166feb..41117429906d6e2b7a53feef1e6e99b0b368d730 100644 (file)
@@ -35,9 +35,9 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
     def __init__(self, *args, **kwargs):
 
         super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
-        self.common = emc_smis_common.EMCSMISCommon(
-            'iSCSI',
-            configuration=self.configuration)
+        self.common =\
+            emc_smis_common.EMCSMISCommon('iSCSI',
+                                          configuration=self.configuration)
 
     def check_for_setup_error(self):
         pass
index 4ba7ec44c269e129c2f0964e4dc78168e12db8a1..c486b8abd04abedfefc5dea84efad620b283381a 100644 (file)
@@ -195,7 +195,7 @@ class NfsDriver(RemoteFsDriver):
             raise exception.NfsException(msg)
 
         if ((not self.configuration.nfs_used_ratio > 0) and
-                 (self.configuration.nfs_used_ratio <= 1)):
+                (self.configuration.nfs_used_ratio <= 1)):
             msg = _("NFS config 'nfs_used_ratio' invalid.  Must be > 0 "
                     "and <= 1.0: %s") % self.configuration.nfs_used_ratio
             LOG.error(msg)
index dc3c43ba83eae956c9de8ca6ad403efcbd762f9f..37d9d0658fd1d3e0acf8604e89ceaed55d199327 100644 (file)
@@ -165,7 +165,7 @@ class HP3PARCommon(object):
             raise exception.InvalidInput(reason=err)
 
         if ('domain' not in cpg
-            or cpg['domain'] != self.config.hp3par_domain):
+                or cpg['domain'] != self.config.hp3par_domain):
             err = ("CPG's domain '%s' and config option hp3par_domain '%s'"
                    " must be the same" %
                    (cpg['domain'], self.config.hp3par_domain))
index c0ac8ac772523cf477d745af3c4168c0076b0815..8d407c27ee2db093fce0b2fafbe91da781d0a5b5 100644 (file)
@@ -132,16 +132,16 @@ class SanISCSIDriver(ISCSIDriver):
                         greenthread.sleep(random.randint(20, 500) / 100.0)
                 try:
                     raise exception.ProcessExecutionError(
-                            exit_code=last_exception.exit_code,
-                            stdout=last_exception.stdout,
-                            stderr=last_exception.stderr,
-                            cmd=last_exception.cmd)
+                        exit_code=last_exception.exit_code,
+                        stdout=last_exception.stdout,
+                        stderr=last_exception.stderr,
+                        cmd=last_exception.cmd)
                 except AttributeError:
                     raise exception.ProcessExecutionError(
-                            exit_code=-1,
-                            stdout="",
-                            stderr="Error running SSH command",
-                            cmd=command)
+                        exit_code=-1,
+                        stdout="",
+                        stderr="Error running SSH command",
+                        cmd=command)
 
         except Exception as e:
             LOG.error(_("Error running SSH command: %s") % command)
index c29533b5bf27cd0cb2cf41ade7bda91902078529..3cd9a15d977f63b55d1af3cc0795abf64421c63c 100755 (executable)
@@ -132,7 +132,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         for num in range(0, 128):
             ch = str(chr(num))
             if (not ch.isalnum() and ch != ' ' and ch != '.'
-                and ch != '-' and ch != '_'):
+                    and ch != '-' and ch != '_'):
                 invalid_ch_in_host = invalid_ch_in_host + ch
         self._string_host_name_filter = string.maketrans(
             invalid_ch_in_host, '-' * len(invalid_ch_in_host))
@@ -465,9 +465,9 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
                 # If '!' not found, return the string and two empty strings
                 attr_name, foo, attr_val = attr_line.partition('!')
                 if (attr_name == 'iscsi_name' and
-                    'initiator' in connector and
-                    attr_val == connector['initiator']):
-                        return host
+                        'initiator' in connector and
+                        attr_val == connector['initiator']):
+                    return host
                 elif (attr_name == 'WWPN' and
                       'wwpns' in connector and
                       attr_val.lower() in
@@ -1315,7 +1315,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         if opts['protocol'] == 'iSCSI':
             # Implemented in base iSCSI class
             return super(StorwizeSVCDriver, self).copy_image_to_volume(
-                    context, volume, image_service, image_id)
+                context, volume, image_service, image_id)
         else:
             raise NotImplementedError()
 
@@ -1324,7 +1324,7 @@ class StorwizeSVCDriver(san.SanISCSIDriver):
         if opts['protocol'] == 'iSCSI':
             # Implemented in base iSCSI class
             return super(StorwizeSVCDriver, self).copy_volume_to_image(
-                    context, volume, image_service, image_meta)
+                context, volume, image_service, image_meta)
         else:
             raise NotImplementedError()
 
index 2a19142f215cd5e8c6513e6f332e14857afa53cb..4228820bac3646dd4d563f54c7ad71acd00aa2df 100644 (file)
@@ -68,7 +68,7 @@ def get_all_types(context, inactive=0, search_opts={}):
         def _check_extra_specs_match(vol_type, searchdict):
             for k, v in searchdict.iteritems():
                 if (k not in vol_type['extra_specs'].keys()
-                    or vol_type['extra_specs'][k] != v):
+                        or vol_type['extra_specs'][k] != v):
                     return False
             return True
 
@@ -140,7 +140,7 @@ def is_key_value_present(volume_type_id, key, value, volume_type=None):
         volume_type = get_volume_type(context.get_admin_context(),
                                       volume_type_id)
     if (volume_type.get('extra_specs') is None or
-        volume_type['extra_specs'].get(key) != value):
+            volume_type['extra_specs'].get(key) != value):
         return False
     else:
         return True
diff --git a/tox.ini b/tox.ini
index b6eeea83b2ea80d4d588a8cf49f90914592f642d..f5dfda43be39e0d9dd15c3233a17f873e29cb0e0 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -37,6 +37,6 @@ deps = -r{toxinidir}/requirements.txt
 commands = bash tools/lintstack.sh
 
 [flake8]
-ignore = E12,E711,E712,H302,H303,H304,H401,H402,H403,H404,F
+ignore = E711,E712,H302,H303,H304,H401,H402,H403,H404,F
 builtins = _
 exclude = .venv,.tox,dist,doc,openstack,*egg