]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix comments style according to the Hacking Rules
authorYuriy Nesenenko <ynesenenko@mirantis.com>
Wed, 11 Feb 2015 11:19:40 +0000 (13:19 +0200)
committerYuriy Nesenenko <ynesenenko@mirantis.com>
Wed, 18 Feb 2015 12:10:19 +0000 (14:10 +0200)
According to the PEP8(E265) there should be at least
one space before block comment.

Change-Id: Ic51f80210becc375b30f0f4e9eeb54995775c817
Partial-Bug: #1407162

53 files changed:
cinder/api/contrib/backups.py
cinder/api/contrib/qos_specs_manage.py
cinder/api/contrib/scheduler_stats.py
cinder/api/middleware/sizelimit.py
cinder/api/openstack/wsgi.py
cinder/api/v1/snapshots.py
cinder/api/v1/volumes.py
cinder/api/v2/snapshots.py
cinder/api/views/qos_specs.py
cinder/api/xmlutil.py
cinder/backup/api.py
cinder/brick/initiator/connector.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
cinder/exception.py
cinder/image/glance.py
cinder/scheduler/filter_scheduler.py
cinder/scheduler/weights/capacity.py
cinder/tests/api/contrib/test_backups.py
cinder/tests/api/extensions/foxinsocks.py
cinder/tests/api/v1/test_limits.py
cinder/tests/api/v1/test_snapshot_metadata.py
cinder/tests/api/v1/test_snapshots.py
cinder/tests/api/v1/test_volume_metadata.py
cinder/tests/api/v1/test_volumes.py
cinder/tests/api/v2/test_limits.py
cinder/tests/api/v2/test_snapshot_metadata.py
cinder/tests/api/v2/test_snapshots.py
cinder/tests/api/v2/test_volume_metadata.py
cinder/tests/brick/test_brick_lvm.py
cinder/tests/glance/stubs.py
cinder/tests/image/fake.py
cinder/tests/image/test_glance.py
cinder/tests/test_backup.py
cinder/tests/test_backup_ceph.py
cinder/tests/test_dellsc.py
cinder/tests/test_emc_vnxdirect.py
cinder/tests/test_hp3par.py
cinder/tests/test_openvstorage.py
cinder/tests/test_prophetstor_dpl.py
cinder/tests/test_storwize_svc.py
cinder/tests/test_volume_transfer.py
cinder/tests/test_volume_types.py
cinder/tests/test_xio.py
cinder/tests/test_zadara.py
cinder/volume/drivers/hds/nfs.py
cinder/volume/drivers/huawei/__init__.py
cinder/volume/drivers/remotefs.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/sheepdog.py
cinder/volume/flows/api/create_volume.py
cinder/volume/volume_types.py
tox.ini

index 138b9ee2184e2f7f00c7d6a0a2788d0e62786593..0ac54da6356728912f2137072bc6ff9ec3790615 100644 (file)
@@ -341,7 +341,7 @@ class BackupsController(wsgi.Controller):
             raise exc.HTTPBadRequest(explanation=msg)
         context = req.environ['cinder.context']
         import_data = body['backup-record']
-        #Verify that body elements are provided
+        # Verify that body elements are provided
         try:
             backup_service = import_data['backup_service']
             backup_url = import_data['backup_url']
index 03481ed35e4eea1e86e0a3af1caca67180d25a45..b0224009bda13955e980dd65a350028fd1772666 100644 (file)
@@ -217,7 +217,7 @@ class QoSSpecsController(wsgi.Controller):
 
         force = req.params.get('force', None)
 
-        #convert string to bool type in strict manner
+        # Convert string to bool type in strict manner
         force = strutils.bool_from_string(force)
         LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" %
                   {'id': id, 'force': force})
index 9270b02e0e9352fb99423f0c75f98e8b3e84a361..3c59e19e7ee4ab5707e11bdace38d11b40b03814 100644 (file)
@@ -43,7 +43,7 @@ class SchedulerStatsController(wsgi.Controller):
         context = req.environ['cinder.context']
         authorize(context, 'get_pools')
 
-        #TODO(zhiteng) Add filters support
+        # TODO(zhiteng) Add filters support
         detail = req.params.get('detail', False)
         pools = self.scheduler_api.get_pools(context, filters=None)
 
index 58d450c5e6795975c0268c1e0921e4e3b8d5b1a2..7a69826ba09001c27ce1245061a5eb840cb32d99 100644 (file)
@@ -26,7 +26,7 @@ from cinder.openstack.common import log as logging
 from cinder import wsgi
 
 
-#default request size is 112k
+# Default request size is 112k
 max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
                                        default=114688,
                                        help='Max size for body of a request')
index 703f7124339cb3f950b2c561ea32cb095b16a313..2704d929888fbf2c73ff83526e00c84a34dd8986 100644 (file)
@@ -442,7 +442,7 @@ class XMLDictSerializer(DictSerializer):
         self._add_xmlns(node, has_atom)
         return node.toxml('UTF-8')
 
-    #NOTE (ameade): the has_atom should be removed after all of the
+    # NOTE (ameade): the has_atom should be removed after all of the
     # xml serializers and view builders have been updated to the current
     # spec that required all responses include the xmlns:atom, the has_atom
     # flag is to prevent current tests from breaking
@@ -462,7 +462,7 @@ class XMLDictSerializer(DictSerializer):
         if xmlns:
             result.setAttribute('xmlns', xmlns)
 
-        #TODO(bcwaldon): accomplish this without a type-check
+        # TODO(bcwaldon): accomplish this without a type-check
         if isinstance(data, list):
             collections = metadata.get('list_collections', {})
             if nodename in collections:
@@ -481,7 +481,7 @@ class XMLDictSerializer(DictSerializer):
             for item in data:
                 node = self._to_xml_node(doc, metadata, singular, item)
                 result.appendChild(node)
-        #TODO(bcwaldon): accomplish this without a type-check
+        # TODO(bcwaldon): accomplish this without a type-check
         elif isinstance(data, dict):
             collections = metadata.get('dict_collections', {})
             if nodename in collections:
index 59eb85607ec42ba79134175321dc1d734009e5aa..400c1cbc9a5c9536b5c52d9a7c668c007bc4c1f1 100644 (file)
@@ -140,12 +140,12 @@ class SnapshotsController(wsgi.Controller):
         """Returns a list of snapshots, transformed through entity_maker."""
         context = req.environ['cinder.context']
 
-        #pop out limit and offset , they are not search_opts
+        # pop out limit and offset , they are not search_opts
         search_opts = req.GET.copy()
         search_opts.pop('limit', None)
         search_opts.pop('offset', None)
 
-        #filter out invalid option
+        # filter out invalid option
         allowed_search_options = ('status', 'volume_id', 'display_name')
         utils.remove_invalid_filter_options(context, search_opts,
                                             allowed_search_options)
index 723395cbcf5368691c09ab38899811a1aed07d71..4798f5f4de613d3beb65bd92d7c2f027dde106d1 100644 (file)
@@ -265,7 +265,7 @@ class VolumeController(wsgi.Controller):
     def _items(self, req, entity_maker):
         """Returns a list of volumes, transformed through entity_maker."""
 
-        #pop out limit and offset , they are not search_opts
+        # pop out limit and offset , they are not search_opts
         search_opts = req.GET.copy()
         search_opts.pop('limit', None)
         search_opts.pop('offset', None)
index 27060c3081c007a6c647a944e17ff239564d3da6..0549d0150ae5b6487aeb33aa16f234293fa38d14 100644 (file)
@@ -143,12 +143,12 @@ class SnapshotsController(wsgi.Controller):
         """Returns a list of snapshots, transformed through entity_maker."""
         context = req.environ['cinder.context']
 
-        #pop out limit and offset , they are not search_opts
+        # pop out limit and offset , they are not search_opts
         search_opts = req.GET.copy()
         search_opts.pop('limit', None)
         search_opts.pop('offset', None)
 
-        #filter out invalid option
+        # filter out invalid option
         allowed_search_options = ('status', 'volume_id', 'name')
         utils.remove_invalid_filter_options(context, search_opts,
                                             allowed_search_options)
index cd83862dd343573d2ffa1eb0e1809f435904bda4..0241a12effac5b670dfc1eae50b9de4f4f2f4869 100644 (file)
@@ -43,7 +43,7 @@ class ViewBuilder(common.ViewBuilder):
 
     def detail(self, request, qos_spec):
         """Detailed view of a single qos_spec."""
-        #TODO(zhiteng) Add associations to detailed view
+        # TODO(zhiteng) Add associations to detailed view
         return {
             'qos_specs': qos_spec,
             'links': self._get_links(request,
index b52f72e460819902075003ebcff69820edba8313..76d102f5ee1d24656bcca6eb7daaa240e53ed6d4 100644 (file)
@@ -351,7 +351,7 @@ class TemplateElement(object):
     def getAttrib(self, obj):
         """Get attribute."""
         tmpattrib = {}
-        #Now set up all the attributes...
+        # Now set up all the attributes...
         for key, value in self.attrib.items():
             try:
                 tmpattrib[key] = value(obj)
@@ -393,7 +393,7 @@ class TemplateElement(object):
         tagnameList = self._splitTagName(tagname)
         insertIndex = 0
 
-        #If parent is not none and has same tagname
+        # If parent is not none and has same tagname
         if parent is not None:
             for i in range(0, len(tagnameList)):
                 tmpInsertPos = parent.find(tagnameList[i])
@@ -407,19 +407,19 @@ class TemplateElement(object):
         if insertIndex >= len(tagnameList):
             insertIndex = insertIndex - 1
 
-        #Create root elem
+        # Create root elem
         elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap)
         rootelem = elem
         subelem = elem
 
-        #Create subelem
+        # Create subelem
         for i in range((insertIndex + 1), len(tagnameList)):
             subelem = etree.SubElement(elem, tagnameList[i])
             elem = subelem
 
         # If we have a parent, append the node to the parent
         if parent is not None:
-            #If we can merge this element, then insert
+            # If we can merge this element, then insert
             if insertIndex > 0:
                 parent.insert(len(list(parent)), rootelem)
             else:
index 89a1f36528665652d6e7e99c73abe4c6aa0c9b89..33cfb7e09094f713c708d73130e698bfb657b945 100644 (file)
@@ -180,9 +180,9 @@ class API(base.Base):
                 finally:
                     QUOTAS.rollback(context, reservations)
 
-        #TODO(DuncanT): In future, when we have a generic local attach,
-        #               this can go via the scheduler, which enables
-        #               better load balancing and isolation of services
+        # TODO(DuncanT): In future, when we have a generic local attach,
+        #                this can go via the scheduler, which enables
+        #                better load balancing and isolation of services
         self.backup_rpcapi.create_backup(context,
                                          backup['host'],
                                          backup['id'],
index ffed9c66ceb287ebbd7cc98cd7a44da9df834152..5c0d53ef17c91272254d9c020c6df344e95907c8 100644 (file)
@@ -263,7 +263,7 @@ class ISCSIConnector(InitiatorConnector):
         device_info = {'type': 'block'}
 
         if self.use_multipath:
-            #multipath installed, discovering other targets if available
+            # multipath installed, discovering other targets if available
             for ip, iqn in self._discover_iscsi_portals(connection_properties):
                 props = copy.deepcopy(connection_properties)
                 props['target_portal'] = ip
@@ -310,7 +310,7 @@ class ISCSIConnector(InitiatorConnector):
         host_device = next(dev for dev in host_devices if os.path.exists(dev))
 
         if self.use_multipath:
-            #we use the multipath device instead of the single path device
+            # we use the multipath device instead of the single path device
             self._rescan_multipath()
             multipath_device = self._get_multipath_device_name(host_device)
             if multipath_device is not None:
@@ -514,8 +514,8 @@ class ISCSIConnector(InitiatorConnector):
                                   "node.session.auth.password",
                                   connection_properties['auth_password'])
 
-        #duplicate logins crash iscsiadm after load,
-        #so we scan active sessions to see if the node is logged in.
+        # duplicate logins crash iscsiadm after load,
+        # so we scan active sessions to see if the node is logged in.
         out = self._run_iscsiadm_bare(["-m", "session"],
                                       run_as_root=True,
                                       check_exit_code=[0, 1, 21])[0] or ""
@@ -536,8 +536,8 @@ class ISCSIConnector(InitiatorConnector):
                                    ("--login",),
                                    check_exit_code=[0, 255])
             except putils.ProcessExecutionError as err:
-                #as this might be one of many paths,
-                #only set successful logins to startup automatically
+                # as this might be one of many paths,
+                # only set successful logins to startup automatically
                 if err.exit_code in [15]:
                     self._iscsiadm_update(connection_properties,
                                           "node.startup",
@@ -855,7 +855,7 @@ class AoEConnector(InitiatorConnector):
 
         waiting_status = {'tries': 0}
 
-        #NOTE(jbr_): Device path is not always present immediately
+        # NOTE(jbr_): Device path is not always present immediately
         def _wait_for_discovery(aoe_path):
             if os.path.exists(aoe_path):
                 raise loopingcall.LoopingCallDone
index 2ad9f744ccb48a75e78f24270cf60ffd783f64a2..cbb6df13dc53a7333cf43dfa04833a28715a8eb8 100644 (file)
@@ -2424,7 +2424,7 @@ def qos_specs_get_all(context, inactive=False, filters=None):
         ]
     """
     filters = filters or {}
-    #TODO(zhiteng) Add filters for 'consumer'
+    # TODO(zhiteng) Add filters for 'consumer'
 
     read_deleted = "yes" if inactive else "no"
     rows = model_query(context, models.QualityOfServiceSpecs,
index b611fc26a62b163c83ff3facec94cffd8939143b..2ccbc197c8359676dbc5be84df20e1701a78dfad 100644 (file)
@@ -52,20 +52,20 @@ def upgrade(migrate_engine):
         return
 
     try:
-        #Set default volumes
+        # Set default volumes
         qci = quota_classes.insert()
         qci.execute({'created_at': CREATED_AT,
                      'class_name': CLASS_NAME,
                      'resource': 'volumes',
                      'hard_limit': CONF.quota_volumes,
                      'deleted': False, })
-        #Set default snapshots
+        # Set default snapshots
         qci.execute({'created_at': CREATED_AT,
                      'class_name': CLASS_NAME,
                      'resource': 'snapshots',
                      'hard_limit': CONF.quota_snapshots,
                      'deleted': False, })
-        #Set default gigabytes
+        # Set default gigabytes
         qci.execute({'created_at': CREATED_AT,
                      'class_name': CLASS_NAME,
                      'resource': 'gigabytes',
index 37633f45faa575ea5d2fc7d390cd5633769094f3..d886663fba820253abaa2fd8eefceddcf3a23f72 100644 (file)
@@ -389,7 +389,6 @@ class FileNotFound(NotFound):
     message = _("File %(file_path)s could not be found.")
 
 
-#TODO(bcwaldon): EOL this exception!
 class Duplicate(CinderException):
     pass
 
@@ -722,7 +721,7 @@ class BadHTTPResponseStatus(ZadaraException):
     message = _("Bad HTTP response status %(status)s")
 
 
-#SolidFire
+# SolidFire
 class SolidFireAPIException(VolumeBackendAPIException):
     message = _("Bad response from SolidFire API")
 
index ef76aeb0ac35f6146264c049bd04c0d184b1d327..7607daf87fc84b4aa0ae94a0ddd06ab7ed1cd326 100644 (file)
@@ -309,16 +309,16 @@ class GlanceImageService(object):
                image_meta, data=None, purge_props=True):
         """Modify the given image with the new data."""
         image_meta = self._translate_to_glance(image_meta)
-        #NOTE(dosaboy): see comment in bug 1210467
+        # NOTE(dosaboy): see comment in bug 1210467
         if CONF.glance_api_version == 1:
             image_meta['purge_props'] = purge_props
-        #NOTE(bcwaldon): id is not an editable field, but it is likely to be
+        # NOTE(bcwaldon): id is not an editable field, but it is likely to be
         # passed in by calling code. Let's be nice and ignore it.
         image_meta.pop('id', None)
         if data:
             image_meta['data'] = data
         try:
-            #NOTE(dosaboy): the v2 api separates update from upload
+            # NOTE(dosaboy): the v2 api separates update from upload
             if data and CONF.glance_api_version > 1:
                 image_meta = self._client.call(context, 'upload', image_id,
                                                image_meta['data'])
@@ -431,7 +431,7 @@ def _convert_to_string(metadata):
 
 
 def _extract_attributes(image):
-    #NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
+    # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
     # a get(), resulting in a useless request back to glance. This list is
     # therefore sorted, with dependent attributes as the end
     # 'deleted_at' depends on 'deleted'
@@ -513,7 +513,7 @@ def get_remote_image_service(context, image_href):
     :returns: a tuple of the form (image_service, image_id)
 
     """
-    #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
+    # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
     # standalone image ID
     if '/' not in str(image_href):
         image_service = get_default_image_service()
index 583d3d11953033787295eb186ab8b5058b092221..c9dd604c942d79659e3747b34033122df5353bad 100644 (file)
@@ -169,7 +169,7 @@ class FilterScheduler(driver.Scheduler):
         return top_host.obj
 
     def get_pools(self, context, filters):
-        #TODO(zhiteng) Add filters support
+        # TODO(zhiteng) Add filters support
         return self.host_manager.get_pools(context)
 
     def _post_select_populate_filter_properties(self, filter_properties,
index d5068e97d4c2d529af02ee5b8bd9a0b853b79616..88d322145a9efb9a6b84b20531b970857edd4c11 100644 (file)
@@ -74,7 +74,7 @@ class CapacityWeigher(weights.BaseHostWeigher):
         total_space = host_state.total_capacity_gb
         if (free_space == 'infinite' or free_space == 'unknown' or
                 total_space == 'infinite' or total_space == 'unknown'):
-            #(zhiteng) 'infinite' and 'unknown' are treated the same
+            # (zhiteng) 'infinite' and 'unknown' are treated the same
             # here, for sorting purpose.
 
             # As a partial fix for bug #1350638, 'infinite' and 'unknown' are
index 55bf52077b31097d446f19960af450db34db4bbe..8184bb9dfaf7cc8575912a709cce7bc9e189fe7f 100644 (file)
@@ -548,26 +548,26 @@ class BackupsAPITestCase(test.TestCase):
         test_host = 'test_host'
         alt_host = 'strange_host'
         empty_service = []
-        #service host not match with volume's host
+        # service host not match with volume's host
         host_not_match = [{'availability_zone': "fake_az", 'host': alt_host,
                            'disabled': 0, 'updated_at': timeutils.utcnow()}]
-        #service az not match with volume's az
+        # service az not match with volume's az
         az_not_match = [{'availability_zone': "strange_az", 'host': test_host,
                          'disabled': 0, 'updated_at': timeutils.utcnow()}]
-        #service disabled
+        # service disabled
         disabled_service = []
 
-        #dead service that last reported at 20th century
+        # dead service that last reported at 20th century
         dead_service = [{'availability_zone': "fake_az", 'host': alt_host,
                          'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}]
 
-        #first service's host not match but second one works.
+        # first service's host not match but second one works.
         multi_services = [{'availability_zone': "fake_az", 'host': alt_host,
                            'disabled': 0, 'updated_at': timeutils.utcnow()},
                           {'availability_zone': "fake_az", 'host': test_host,
                            'disabled': 0, 'updated_at': timeutils.utcnow()}]
 
-        #Setup mock to run through the following service cases
+        # Setup mock to run through the following service cases
         _mock_service_get_all_by_topic.side_effect = [empty_service,
                                                       host_not_match,
                                                       az_not_match,
@@ -579,32 +579,32 @@ class BackupsAPITestCase(test.TestCase):
                                         host=test_host)['id']
         volume = self.volume_api.get(context.get_admin_context(), volume_id)
 
-        #test empty service
+        # test empty service
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          False)
 
-        #test host not match service
+        # test host not match service
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          False)
 
-        #test az not match service
+        # test az not match service
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          False)
 
-        #test disabled service
+        # test disabled service
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          False)
 
-        #test dead service
+        # test dead service
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          False)
 
-        #test multi services and the last service matches
+        # test multi services and the last service matches
         self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
                                                                     test_host),
                          True)
@@ -1071,7 +1071,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(export.item(0).getAttribute('backup_url'),
                          backup_url)
 
-        #db.backup_destroy(context.get_admin_context(), backup_id)
+        # db.backup_destroy(context.get_admin_context(), backup_id)
 
     def test_export_record_with_bad_backup_id(self):
 
@@ -1266,7 +1266,7 @@ class BackupsAPITestCase(test.TestCase):
         backup_service = 'fake'
         backup_url = 'fake'
 
-        #test with no backup_service
+        # test with no backup_service
         req = webob.Request.blank('/v2/fake/backups/import_record')
         body = {'backup-record': {'backup_url': backup_url}}
         req.body = json.dumps(body)
@@ -1279,7 +1279,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(res_dict['badRequest']['message'],
                          'Incorrect request body format.')
 
-        #test with no backup_url
+        # test with no backup_url
         req = webob.Request.blank('/v2/fake/backups/import_record')
         body = {'backup-record': {'backup_service': backup_service}}
         req.body = json.dumps(body)
@@ -1293,7 +1293,7 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(res_dict['badRequest']['message'],
                          'Incorrect request body format.')
 
-        #test with no backup_url and backup_url
+        # test with no backup_url and backup_url
         req = webob.Request.blank('/v2/fake/backups/import_record')
         body = {'backup-record': {}}
         req.body = json.dumps(body)
index fcc9635d33e68e89fe9fc5d94f2284160c8dca73..f4711a38140251ede01c0f5ed5a47c666b162232 100644 (file)
@@ -45,7 +45,7 @@ class FoxInSocksServerControllerExtension(wsgi.Controller):
 class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
     @wsgi.extends
     def show(self, req, resp_obj, id):
-        #NOTE: This only handles JSON responses.
+        # NOTE: This only handles JSON responses.
         # You can use content type header to test for XML.
         resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
 
@@ -53,7 +53,7 @@ class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
 class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
     @wsgi.extends
     def show(self, req, resp_obj, id):
-        #NOTE: This only handles JSON responses.
+        # NOTE: This only handles JSON responses.
         # You can use content type header to test for XML.
         resp_obj.obj['big_bands'] = 'Pig Bands!'
 
index 92db5852197ed9ea0ab0d3dc5cbfae0f1e75069d..fe47ef39160c554ae567a760f9236124b4946997 100644 (file)
@@ -855,7 +855,7 @@ class LimitsXMLSerializationTest(test.TestCase):
         root = etree.XML(output)
         xmlutil.validate_schema(root, 'limits')
 
-        #verify absolute limits
+        # verify absolute limits
         absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
         self.assertEqual(len(absolutes), 4)
         for limit in absolutes:
@@ -863,7 +863,7 @@ class LimitsXMLSerializationTest(test.TestCase):
             value = limit.get('value')
             self.assertEqual(value, str(fixture['limits']['absolute'][name]))
 
-        #verify rate limits
+        # verify rate limits
         rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
         self.assertEqual(len(rates), 2)
         for i, rate in enumerate(rates):
@@ -890,10 +890,10 @@ class LimitsXMLSerializationTest(test.TestCase):
         root = etree.XML(output)
         xmlutil.validate_schema(root, 'limits')
 
-        #verify absolute limits
+        # verify absolute limits
         absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
         self.assertEqual(len(absolutes), 0)
 
-        #verify rate limits
+        # verify rate limits
         rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
         self.assertEqual(len(rates), 0)
index 89d1faf82601f767869f476133b169922402159d..aa1f190061787b9e0ea623f7f5cbd9253fe49fdb 100644 (file)
@@ -536,19 +536,19 @@ class SnapshotMetaDataTest(test.TestCase):
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
 
-        #test for long key
+        # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for long value
+        # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for empty key.
+        # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPBadRequest,
index c5b0b8c2871103f0c0afe98c4c83629515843865..5da7a98901c84930b5078cb79cf28dd2f20aa4be 100644 (file)
@@ -349,9 +349,9 @@ class SnapshotApiTest(test.TestCase):
             self.assertEqual(1, len(res['snapshots']))
             self.assertEqual(2, res['snapshots'][0]['id'])
 
-        #admin case
+        # admin case
         list_snapshots_with_limit_and_offset(is_admin=True)
-        #non_admin case
+        # non_admin case
         list_snapshots_with_limit_and_offset(is_admin=False)
 
     def test_admin_list_snapshots_all_tenants(self):
index 5cb270c40805fa16dd66388b1849d573c4026640..af70cfcf23e60eed96f50e28e023900680968261 100644 (file)
@@ -519,19 +519,19 @@ class volumeMetaDataTest(test.TestCase):
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
 
-        #test for long key
+        # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for long value
+        # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for empty key.
+        # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPBadRequest,
index 3c16fe6cbbfc376369baecbc930fc333aa4ef310..ef1226704a0548e5bd48f8cb9df36c0ef5401cbb 100644 (file)
@@ -645,9 +645,9 @@ class VolumeApiTest(test.TestCase):
             self.assertEqual(len(volumes), 1)
             self.assertEqual(volumes[0]['id'], 2)
 
-        #admin case
+        # admin case
         volume_detail_limit_offset(is_admin=True)
-        #non_admin case
+        # non_admin case
         volume_detail_limit_offset(is_admin=False)
 
     def test_volume_show_with_admin_metadata(self):
index 459bce894edb05063e7ede1651f63a92d12e2780..0c775a07b91c4ac263d218094b61c7b5398e4609 100644 (file)
@@ -856,7 +856,7 @@ class LimitsXMLSerializationTest(test.TestCase):
         root = etree.XML(output)
         xmlutil.validate_schema(root, 'limits')
 
-        #verify absolute limits
+        # verify absolute limits
         absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
         self.assertEqual(len(absolutes), 4)
         for limit in absolutes:
@@ -864,7 +864,7 @@ class LimitsXMLSerializationTest(test.TestCase):
             value = limit.get('value')
             self.assertEqual(value, str(fixture['limits']['absolute'][name]))
 
-        #verify rate limits
+        # verify rate limits
         rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
         self.assertEqual(len(rates), 2)
         for i, rate in enumerate(rates):
@@ -891,10 +891,10 @@ class LimitsXMLSerializationTest(test.TestCase):
         root = etree.XML(output)
         xmlutil.validate_schema(root, 'limits')
 
-        #verify absolute limits
+        # verify absolute limits
         absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
         self.assertEqual(len(absolutes), 0)
 
-        #verify rate limits
+        # verify rate limits
         rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
         self.assertEqual(len(rates), 0)
index 2fd62ca3707762107cab57ec913bd9bef01fb01f..6caf16433f72ffc63eb7e2c626bd8f08bd2d65f1 100644 (file)
@@ -534,19 +534,19 @@ class SnapshotMetaDataTest(test.TestCase):
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
 
-        #test for long key
+        # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for long value
+        # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for empty key.
+        # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPBadRequest,
index 43fa3a4e197cb72fa37f25aebe30914b5b7fdb7e..7cb05895bfe304ef79b6e7ea4bebe414fe175f10 100644 (file)
@@ -360,9 +360,9 @@ class SnapshotApiTest(test.TestCase):
             self.assertEqual(1, len(res['snapshots']))
             self.assertEqual(2, res['snapshots'][0]['id'])
 
-        #admin case
+        # admin case
         list_snapshots_with_limit_and_offset(is_admin=True)
-        #non_admin case
+        # non_admin case
         list_snapshots_with_limit_and_offset(is_admin=False)
 
     def test_admin_list_snapshots_all_tenants(self):
index ffc745df88b9158a328d4f76132dfbe5cb463f89..d4eb6e1c985a21a69a3697899a95b5dab036e3af 100644 (file)
@@ -520,19 +520,19 @@ class volumeMetaDataTest(test.TestCase):
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
 
-        #test for long key
+        # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for long value
+        # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
                           self.controller.create, req, self.req_id, data)
 
-        #test for empty key.
+        # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
         self.assertRaises(webob.exc.HTTPBadRequest,
index 1e84032f768bc64e1f153d1c376ecc09581cd208..d30ccffa5dce282fac923bad5bd5217974c9f7b6 100644 (file)
@@ -38,7 +38,7 @@ class BrickLvmTestCase(test.TestCase):
         self.configuration.volume_group_name = 'fake-vg'
         super(BrickLvmTestCase, self).setUp()
 
-        #Stub processutils.execute for static methods
+        # Stub processutils.execute for static methods
         self.stubs.Set(processutils, 'execute',
                        self.fake_execute)
         self.vg = brick.LVM(self.configuration.volume_group_name,
index 9a1bbdd27d9d89e0ed8acc9f8d6c16ccb1a73da6..ea0a5b458b148771fbaa056aae00d4123382eb8a 100644 (file)
@@ -25,12 +25,12 @@ class StubGlanceClient(object):
         _images = images or []
         map(lambda image: self.create(**image), _images)
 
-        #NOTE(bcwaldon): HACK to get client.images.* to work
+        # NOTE(bcwaldon): HACK to get client.images.* to work
         self.images = lambda: None
         for fn in ('list', 'get', 'data', 'create', 'update', 'delete'):
             setattr(self.images, fn, getattr(self, fn))
 
-    #TODO(bcwaldon): implement filters
+    # TODO(bcwaldon): implement filters
     def list(self, filters=None, marker=None, limit=30):
         if marker is None:
             index = 0
index 9b5a83fd7a7425a978f78c9bfdd73416cdec40bd..de1903eb86ebd4913ce14457eb5011f58e23aac9 100644 (file)
@@ -144,7 +144,7 @@ class _FakeImageService(object):
         self._imagedata = {}
         super(_FakeImageService, self).__init__()
 
-    #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
+    # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
     def detail(self, context, **kwargs):
         """Return list of detailed image information."""
         return copy.deepcopy(self.images.values())
index 26b272a2ff2ea4871b89165f71c661f0e9da94b3..68b91dc916d09c497687e771e94a957a934bcd1b 100644 (file)
@@ -97,7 +97,6 @@ class TestGlanceImageService(test.TestCase):
 
     def setUp(self):
         super(TestGlanceImageService, self).setUp()
-        #fakes.stub_out_compute_api_snapshot(self.stubs)
 
         client = glance_stubs.StubGlanceClient()
         self.service = self._create_image_service(client)
index d82e81cc3530960c523bf3900342b0ed4be34725..3cff918ccda105630a818183b7d4b635ee17d10e 100644 (file)
@@ -527,7 +527,7 @@ class BackupTestCase(BaseBackupTest):
         export['backup_service'] = 'cinder.tests.backup.bad_service'
         imported_record = self._create_export_record_db_entry()
 
-        #Test the case where the additional hosts list is empty
+        # Test the case where the additional hosts list is empty
         backup_hosts = []
         self.assertRaises(exception.ServiceNotFound,
                           self.backup_mgr.import_record,
@@ -537,8 +537,8 @@ class BackupTestCase(BaseBackupTest):
                           export['backup_url'],
                           backup_hosts)
 
-        #Test that the import backup keeps calling other hosts to find a
-        #suitable host for the backup service
+        # Test that the import backup keeps calling other hosts to find a
+        # suitable host for the backup service
         backup_hosts = ['fake1', 'fake2']
         BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
         with mock.patch(BackupAPI_import) as _mock_backup_import:
index fbcf270bb70b4003886d296b4cd858388fb2dbb0..2e417afa01c00e0f019fc72d20c2077e4cf63809 100644 (file)
@@ -718,7 +718,6 @@ class BackupCephTestCase(test.TestCase):
             self.service.delete(self.backup)
             self.assertTrue(mock_del_backup_snap.called)
 
-        #self.assertFalse(self.mock_rbd.ImageNotFound.called)
         self.assertTrue(self.mock_rbd.RBD.return_value.list.called)
         self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
 
index 82e0462ec8646fc670d4a44ee9af6305286e5653..ab2c9a27df9e2f7c71b26b08566a01ab7a9082c2 100644 (file)
@@ -835,7 +835,6 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
                                      mock_init):
         context = {}
         volume = {'id': self.VOLUME.get(u'name')}
-        #self.driver.ensure_export(context, volume)
         self.assertRaises(exception.VolumeBackendAPIException,
                           self.driver.ensure_export,
                           context,
index 56a557ace675d098eac67509d099e3a2713054f5..960378bdacfd5642f219aa8b0efc52b718105a65 100644 (file)
@@ -891,7 +891,7 @@ class DriverTestCaseBase(test.TestCase):
         self.configuration.storage_vnx_pool_name = 'unit_test_pool'
         self.configuration.san_login = 'sysadmin'
         self.configuration.san_password = 'sysadmin'
-        #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
+        # set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
         self.configuration.default_timeout = 0.0002
         self.configuration.initiator_auto_registration = True
         self.configuration.check_max_pool_luns_threshold = False
@@ -1010,9 +1010,9 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
                                     '-Deduplication',
                                     '-ThinProvisioning',
                                     '-FAST']
-        #case
+        # case
         self.driver.create_volume(self.testData.test_volume_with_type)
-        #verification
+        # verification
         expect_cmd = [
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
@@ -1046,10 +1046,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
                                     '-Deduplication',
                                     '-ThinProvisioning',
                                     '-FAST']
-        #case
+        # case
         self.driver.create_volume(self.testData.test_volume_with_type)
 
-        #verification
+        # verification
         expect_cmd = [
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
@@ -1082,10 +1082,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
                                     '-Deduplication',
                                     '-ThinProvisioning',
                                     '-FAST']
-        #case
+        # case
         self.driver.create_volume(self.testData.test_volume_with_type)
 
-        #verification
+        # verification
         expect_cmd = [
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
@@ -1112,10 +1112,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
                                     '-Deduplication',
                                     '-ThinProvisioning',
                                     '-FAST']
-        #case
+        # case
         self.driver.create_volume(self.testData.test_volume_with_type)
 
-        #verification
+        # verification
         expect_cmd = [
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
@@ -1268,7 +1268,7 @@ Time Remaining:  0 second(s)
         ret = self.driver.migrate_volume(None, self.testData.test_volume,
                                          fakehost)[0]
         self.assertTrue(ret)
-        #verification
+        # verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
                                 retry_disable=True,
                                 poll=True),
@@ -1317,7 +1317,7 @@ Time Remaining:  0 second(s)
         ret = self.driver.migrate_volume(None, self.testData.test_volume,
                                          fake_host)[0]
         self.assertTrue(ret)
-        #verification
+        # verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
                                 retry_disable=True,
                                 poll=True),
@@ -1364,7 +1364,7 @@ Time Remaining:  0 second(s)
         ret = self.driver.migrate_volume(None, self.testData.test_volume5,
                                          fakehost)[0]
         self.assertTrue(ret)
-        #verification
+        # verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
                                 retry_disable=True,
                                 poll=True),
@@ -1396,7 +1396,7 @@ Time Remaining:  0 second(s)
         ret = self.driver.migrate_volume(None, self.testData.test_volume,
                                          fakehost)[0]
         self.assertFalse(ret)
-        #verification
+        # verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
                                 retry_disable=True,
                                 poll=True)]
@@ -1405,11 +1405,11 @@ Time Remaining:  0 second(s)
     def test_create_destroy_volume_snapshot(self):
         fake_cli = self.driverSetup()
 
-        #case
+        # case
         self.driver.create_snapshot(self.testData.test_snapshot)
         self.driver.delete_snapshot(self.testData.test_snapshot)
 
-        #verification
+        # verification
         expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
                                 poll=False),
                       mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'),
@@ -1715,12 +1715,12 @@ Time Remaining:  0 second(s)
         results = [FAKE_ERROR_RETURN]
         fake_cli = self.driverSetup(commands, results)
 
-        #case
+        # case
         self.assertRaises(EMCVnxCLICmdError,
                           self.driver.create_snapshot,
                           self.testData.test_failed_snapshot)
 
-        #verification
+        # verification
         expect_cmd = [
             mock.call(
                 *self.testData.SNAP_CREATE_CMD('failed_snapshot'),
@@ -1729,7 +1729,7 @@ Time Remaining:  0 second(s)
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_from_snapshot(self):
-        #set up
+        # set up
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
         cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
         output_dest = self.testData.LUN_PROPERTY("vol2_dest")
@@ -2022,7 +2022,7 @@ Time Remaining:  0 second(s)
             self.testData.test_pool_name
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
         assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
-        #mock the command executor
+        # mock the command executor
         fake_command_execute = self.get_command_execute_simulator(
             commands, results)
         fake_cli = mock.MagicMock(side_effect=fake_command_execute)
@@ -2044,7 +2044,7 @@ Time Remaining:  0 second(s)
         self.configuration.storage_vnx_pool_name = invalid_pool_name
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
         assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
-        #mock the command executor
+        # mock the command executor
         fake_command_execute = self.get_command_execute_simulator(
             commands, results)
         fake_cli = mock.MagicMock(side_effect=fake_command_execute)
@@ -2073,7 +2073,7 @@ Time Remaining:  0 second(s)
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
         assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
 
-        #mock the command executor
+        # mock the command executor
         fake_command_execute = self.get_command_execute_simulator(
             commands, results)
         fake_cli = mock.MagicMock(side_effect=fake_command_execute)
@@ -2085,7 +2085,7 @@ Time Remaining:  0 second(s)
         expected = [mock.call(*get_lun_cmd, poll=True)]
         assert get_size == test_size
         fake_cli.assert_has_calls(expected)
-        #Test the function with invalid reference.
+        # Test the function with invalid reference.
         invaild_ref = {'fake': 'fake_ref'}
         self.assertRaises(exception.ManageExistingInvalidReference,
                           self.driver.manage_existing_get_size,
index af022965435f363ee5501403c5956a562cbe1e4d..45246f4c1fea8420e4d02763ebe912c1f63349e1 100644 (file)
@@ -3978,7 +3978,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
             mock_create_client.return_value = mock_client
             common = self.driver._login()
 
-            #Setup a single ISCSI IP
+            # Setup a single ISCSI IP
             iscsi_ips = ["10.10.220.253"]
             self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
 
@@ -4000,7 +4000,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
             mock_create_client.return_value = mock_client
             common = self.driver._login()
 
-            #Setup two ISCSI IPs
+            # Setup two ISCSI IPs
             iscsi_ips = ["10.10.220.252", "10.10.220.253"]
             self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
 
@@ -4024,7 +4024,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
             mock_create_client.return_value = mock_client
             common = self.driver._login()
 
-            #Setup two ISCSI IPs
+            # Setup two ISCSI IPs
             iscsi_ips = ["10.10.220.252", "10.10.220.253"]
             self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
 
@@ -4046,7 +4046,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
         mock_client.getPorts.return_value = PORTS1_RET
         mock_client.getVLUNs.return_value = VLUNS5_RET
 
-        #Setup two ISCSI IPs
+        # Setup two ISCSI IPs
         iscsi_ips = ["10.10.220.252", "10.10.220.253"]
         self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
 
index ca08eca85d64237f9c9280d1045d8517be9c529d..2893d38ca3c9dff94fe807461746e3f73716297c 100644 (file)
@@ -21,7 +21,7 @@ from cinder import test
 import cinder.volume.drivers.openvstorage as ovsvd
 
 
-#MOCKUPS
+# MOCKUPS
 MOCK_hostname = 'test-hostname'
 MOCK_mountpoint = '/mnt/test'
 MOCK_vdisk_guid = '0000'
index 3780949c0102cbc699e58e28c009e53e29522766..5ea48ed5ffec69d7e46240d54820bcf7a4c02c97 100644 (file)
@@ -31,7 +31,6 @@ VOLUMEUUID = 'a000000000000000000000000000001'
 INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa'
 DATA_IN_VOLUME = {'id': VOLUMEUUID}
 DATA_IN_CONNECTOR = {'initiator': INITIATOR}
-## dpl.getpool
 DATA_SERVER_INFO = 0, {
     'metadata': {'vendor': 'ProphetStor',
                  'version': '1.5'}}
@@ -67,7 +66,6 @@ DATA_POOLINFO = 0, {
     'objectType': 'application/cdmi-container',
     'percentComplete': 100}
 
-## dpl.assignvdev
 DATA_ASSIGNVDEV = 0, {
     'children': [],
     'childrenrange': '',
index 51425740fc4a83a836f03e89732479238b72b154..6150705efc368de288167c4df3aa9fffae4e55b3 100644 (file)
@@ -3032,7 +3032,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Make sure that the volumes have been created
         self._assert_vol_exists(volume['name'], True)
 
-        #Set up one WWPN that won't match and one that will.
+        # Set up one WWPN that won't match and one that will.
         self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0',
                                                             'AABBCCDDEEFF0010']
 
@@ -3066,7 +3066,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Make sure that the volumes have been created
         self._assert_vol_exists(volume['name'], True)
 
-        #Set up WWPNs that will not match what is available.
+        # Set up WWPNs that will not match what is available.
         self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0',
                                                             '123456789ABCDEF1']
 
@@ -3100,7 +3100,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         # Make sure that the volumes have been created
         self._assert_vol_exists(volume['name'], True)
 
-        #Set up one WWPN.
+        # Set up one WWPN.
         self.driver._state['storage_nodes']['1']['WWPN'] = ['AABBCCDDEEFF0012']
 
         wwpns = ['ff00000000000000', 'ff00000000000001']
index 183b063b136fbcd3e1f084ee7c8d1686496b3ab5..0922711de5918701c3f7bbf3fcf876ba22b32ed0 100644 (file)
@@ -123,17 +123,17 @@ class VolumeTransferTestCase(test.TestCase):
         self.assertEqual(len(ts), 0, 'Unexpected transfers listed.')
 
     def test_delete_transfer_with_deleted_volume(self):
-        #create a volume
+        # create a volume
         volume = utils.create_volume(self.ctxt, id='1',
                                      updated_at=self.updated_at)
-        #create a transfer
+        # create a transfer
         tx_api = transfer_api.API()
         transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
         t = tx_api.get(self.ctxt, transfer['id'])
         self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
-        #force delete volume
+        # force delete volume
         db.volume_destroy(context.get_admin_context(), volume['id'])
-        #Make sure transfer has been deleted.
+        # Make sure transfer has been deleted.
         self.assertRaises(exception.TransferNotFound,
                           tx_api.get,
                           self.ctxt,
index 47240fcdae9720aeb496ca69b25cadcd50f28bf4..b6abc15a83a40d537e93d321a79d4f9f5256f23a 100644 (file)
@@ -283,7 +283,7 @@ class VolumeTypeTestCase(test.TestCase):
         self.assertDictMatch(expected, res)
 
     def test_volume_types_diff(self):
-        #type_ref 1 and 2 have the same extra_specs, while 3 has different
+        # type_ref 1 and 2 have the same extra_specs, while 3 has different
         keyvals1 = {"key1": "val1", "key2": "val2"}
         keyvals2 = {"key1": "val0", "key2": "val2"}
         type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1)
@@ -300,7 +300,7 @@ class VolumeTypeTestCase(test.TestCase):
         self.assertEqual(same, False)
         self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val0'))
 
-        #qos_ref 1 and 2 have the same specs, while 3 has different
+        # qos_ref 1 and 2 have the same specs, while 3 has different
         qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}
         qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'}
         qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1)
index 37c1ee24131cf12b8c4fe2cc770c0795120aeb2d..ed6af574e9b737248aab38b50a2938c1449339a5 100644 (file)
@@ -683,7 +683,7 @@ class XIOISEDriverTestCase(object):
             raise exception.Invalid()
 
 #################################
-##        UNIT TESTS           ##
+#         UNIT TESTS            #
 #################################
     def test_do_setup(self, mock_req):
         self.setup_driver()
index c6d334644271a1ef21e2f5959c3bb0f01fc0b508..45ec04d467bd9e623264af0300b5546fd0e800e7 100644 (file)
@@ -193,7 +193,7 @@ class FakeRequest(object):
             if vol_name == vol:
                 attachments = params['attachments']
                 if srv in attachments:
-                    #already attached - ok
+                    # already attached - ok
                     return RUNTIME_VARS['good']
                 else:
                     attachments.append(srv)
@@ -247,7 +247,7 @@ class FakeRequest(object):
             if params['cg-name'] == cg_name:
                 snapshots = params['snapshots']
                 if snap_name in snapshots:
-                    #already attached
+                    # already attached
                     return RUNTIME_VARS['bad_volume']
                 else:
                     snapshots.append(snap_name)
index 4c2d905d1ad8fb76441d473aa7b29497f52b1df9..7e8cf21f4312e8f8e1f6c7be69931e990a395e5a 100644 (file)
@@ -444,7 +444,6 @@ class HDSNFSDriver(nfs.NfsDriver):
         nfs_info = self._get_nfs_info()
 
         for share in self.shares:
-            #export = share.split(':')[1]
             if share in nfs_info.keys():
                 LOG.info(_LI("share: %(share)s -> %(info)s"),
                          {'share': share, 'info': nfs_info[share]['path']})
index 2416acc786569d50b64baafbfb96744a4c9b012f..2e0895347969e45f8d89aa37a30d9c634b43d3aa 100644 (file)
@@ -70,7 +70,7 @@ class HuaweiVolumeDriver(object):
             'Huawei OceanStor %(product)s series storage arrays.')
             % {'protocol': protocol,
                'product': product})
-        #Map HVS to 18000
+        # Map HVS to 18000
         if product in MAPPING:
             LOG.warn(_LW("Product name %s is deprecated, update your "
                          "configuration to the new product name."), product)
index 334f5a2e1ec81066321c01d48c0df1715792bbec..a9719140c7db9a0f9d582d25a98ae71773ebb6aa 100644 (file)
@@ -36,7 +36,7 @@ from cinder.volume import driver
 LOG = logging.getLogger(__name__)
 
 nas_opts = [
-    #TODO(eharney): deprecate nas_ip and change this to nas_host
+    # TODO(eharney): deprecate nas_ip and change this to nas_host
     cfg.StrOpt('nas_ip',
                default='',
                help='IP address or Hostname of NAS system.'),
index 805cfe7522e1980c16c0092095d9e65505e6edab..e9e0e97212227282dc518aa0d98d6f320b4c593b 100644 (file)
@@ -824,7 +824,7 @@ class HP3PARCommon(object):
         qos_specs_id = volume_type.get('qos_specs_id')
         specs = volume_type.get('extra_specs')
 
-        #NOTE(kmartin): We prefer the qos_specs association
+        # NOTE(kmartin): We prefer the qos_specs association
         # and override any existing extra-specs settings
         # if present.
         if qos_specs_id is not None:
index aedec2b231d048800df045dc105dbb4c5f1accc7..21d46f9b1044aaeebf980bbe236436a4ae46a91f 100644 (file)
@@ -50,9 +50,9 @@ class SheepdogDriver(driver.VolumeDriver):
     def check_for_setup_error(self):
         """Return error if prerequisites aren't met."""
         try:
-            #NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
-            #  gives short output, but for compatibility reason we won't
-            #  use it and just check if 'running' is in the output.
+            # NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
+            # gives short output, but for compatibility reason we won't
+            # use it and just check if 'running' is in the output.
             (out, _err) = self._execute('collie', 'cluster', 'info')
             if 'status: running' not in out:
                 exception_message = (_("Sheepdog is not working: %s") % out)
index 5ca22f15a935c31316e0b29f5936111b320f706c..795ce1dc8a71d322d827f5b0ffcdf28691afe6ad 100644 (file)
@@ -231,7 +231,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
         # exist, this is expected as it signals that the image_id is missing.
         image_meta = self.image_service.show(context, image_id)
 
-        #check whether image is active
+        # check whether image is active
         if image_meta['status'] != 'active':
             msg = _('Image %(image_id)s is not active.')\
                 % {'image_id': image_id}
index d4ed6896273eed74851ab953551f90663e8016c9..8624a10f4d367568de18c44959fe0d1f23f017cc 100644 (file)
@@ -160,7 +160,7 @@ def get_default_volume_type():
         except exception.VolumeTypeNotFoundByName as e:
             # Couldn't find volume type with the name in default_volume_type
             # flag, record this issue and move on
-            #TODO(zhiteng) consider add notification to warn admin
+            # TODO(zhiteng) consider add notification to warn admin
             LOG.exception(_LE('Default volume type is not found,'
                           'please check default_volume_type config: %s') %
                           six.text_type(e))
diff --git a/tox.ini b/tox.ini
index 4d4850db6cbc01418009627dd828e48a22417a8f..c3589faf373dc8f356f872348eb2cfdb43689940 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -55,9 +55,6 @@ commands = python setup.py build_sphinx
 # E251 unexpected spaces around keyword / parameter equals
 # reason: no improvement in readability
 #
-# E265 block comment should start with '# '
-# reason: no improvement in readability
-#
 # H402 one line docstring needs punctuation
 # reason: removed in hacking (https://review.openstack.org/#/c/101497/)
 #
@@ -73,7 +70,7 @@ commands = python setup.py build_sphinx
 # H302,H405
 
 
-ignore = E251,E265,H302,H402,H405,H803,H904
+ignore = E251,H302,H402,H405,H803,H904
 exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build
 max-complexity=30