From 84afca21fd19d5f78259f0a820258645c1780749 Mon Sep 17 00:00:00 2001 From: Yuriy Nesenenko Date: Wed, 11 Feb 2015 13:19:40 +0200 Subject: [PATCH] Fix comments style according to the Hacking Rules According to the PEP8(E265) there should be at least one space before block comment. Change-Id: Ic51f80210becc375b30f0f4e9eeb54995775c817 Partial-Bug: #1407162 --- cinder/api/contrib/backups.py | 2 +- cinder/api/contrib/qos_specs_manage.py | 2 +- cinder/api/contrib/scheduler_stats.py | 2 +- cinder/api/middleware/sizelimit.py | 2 +- cinder/api/openstack/wsgi.py | 6 +-- cinder/api/v1/snapshots.py | 4 +- cinder/api/v1/volumes.py | 2 +- cinder/api/v2/snapshots.py | 4 +- cinder/api/views/qos_specs.py | 2 +- cinder/api/xmlutil.py | 10 ++--- cinder/backup/api.py | 6 +-- cinder/brick/initiator/connector.py | 14 +++--- cinder/db/sqlalchemy/api.py | 2 +- .../versions/021_add_default_quota_class.py | 6 +-- cinder/exception.py | 3 +- cinder/image/glance.py | 10 ++--- cinder/scheduler/filter_scheduler.py | 2 +- cinder/scheduler/weights/capacity.py | 2 +- cinder/tests/api/contrib/test_backups.py | 32 +++++++------- cinder/tests/api/extensions/foxinsocks.py | 4 +- cinder/tests/api/v1/test_limits.py | 8 ++-- cinder/tests/api/v1/test_snapshot_metadata.py | 6 +-- cinder/tests/api/v1/test_snapshots.py | 4 +- cinder/tests/api/v1/test_volume_metadata.py | 6 +-- cinder/tests/api/v1/test_volumes.py | 4 +- cinder/tests/api/v2/test_limits.py | 8 ++-- cinder/tests/api/v2/test_snapshot_metadata.py | 6 +-- cinder/tests/api/v2/test_snapshots.py | 4 +- cinder/tests/api/v2/test_volume_metadata.py | 6 +-- cinder/tests/brick/test_brick_lvm.py | 2 +- cinder/tests/glance/stubs.py | 4 +- cinder/tests/image/fake.py | 2 +- cinder/tests/image/test_glance.py | 1 - cinder/tests/test_backup.py | 6 +-- cinder/tests/test_backup_ceph.py | 1 - cinder/tests/test_dellsc.py | 1 - cinder/tests/test_emc_vnxdirect.py | 44 +++++++++---------- cinder/tests/test_hp3par.py | 8 ++-- cinder/tests/test_openvstorage.py | 2 +- cinder/tests/test_prophetstor_dpl.py | 2 - cinder/tests/test_storwize_svc.py | 6 +-- cinder/tests/test_volume_transfer.py | 8 ++-- cinder/tests/test_volume_types.py | 4 +- cinder/tests/test_xio.py | 2 +- cinder/tests/test_zadara.py | 4 +- cinder/volume/drivers/hds/nfs.py | 1 - cinder/volume/drivers/huawei/__init__.py | 2 +- cinder/volume/drivers/remotefs.py | 2 +- .../volume/drivers/san/hp/hp_3par_common.py | 2 +- cinder/volume/drivers/sheepdog.py | 6 +-- cinder/volume/flows/api/create_volume.py | 2 +- cinder/volume/volume_types.py | 2 +- tox.ini | 5 +-- 53 files changed, 139 insertions(+), 149 deletions(-) diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py index 138b9ee21..0ac54da63 100644 --- a/cinder/api/contrib/backups.py +++ b/cinder/api/contrib/backups.py @@ -341,7 +341,7 @@ class BackupsController(wsgi.Controller): raise exc.HTTPBadRequest(explanation=msg) context = req.environ['cinder.context'] import_data = body['backup-record'] - #Verify that body elements are provided + # Verify that body elements are provided try: backup_service = import_data['backup_service'] backup_url = import_data['backup_url'] diff --git a/cinder/api/contrib/qos_specs_manage.py b/cinder/api/contrib/qos_specs_manage.py index 03481ed35..b0224009b 100644 --- a/cinder/api/contrib/qos_specs_manage.py +++ b/cinder/api/contrib/qos_specs_manage.py @@ -217,7 +217,7 @@ class QoSSpecsController(wsgi.Controller): force = req.params.get('force', None) - #convert string to bool type in strict manner + # Convert string to bool type in strict manner force = strutils.bool_from_string(force) LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" % {'id': id, 'force': force}) diff --git a/cinder/api/contrib/scheduler_stats.py b/cinder/api/contrib/scheduler_stats.py index 9270b02e0..3c59e19e7 100644 --- a/cinder/api/contrib/scheduler_stats.py +++ b/cinder/api/contrib/scheduler_stats.py @@ -43,7 +43,7 @@ class SchedulerStatsController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context, 'get_pools') - #TODO(zhiteng) Add filters support + # TODO(zhiteng) Add filters support detail = req.params.get('detail', False) pools = self.scheduler_api.get_pools(context, filters=None) diff --git a/cinder/api/middleware/sizelimit.py b/cinder/api/middleware/sizelimit.py index 58d450c5e..7a69826ba 100644 --- a/cinder/api/middleware/sizelimit.py +++ b/cinder/api/middleware/sizelimit.py @@ -26,7 +26,7 @@ from cinder.openstack.common import log as logging from cinder import wsgi -#default request size is 112k +# Default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='Max size for body of a request') diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py index 703f71243..2704d9298 100644 --- a/cinder/api/openstack/wsgi.py +++ b/cinder/api/openstack/wsgi.py @@ -442,7 +442,7 @@ class XMLDictSerializer(DictSerializer): self._add_xmlns(node, has_atom) return node.toxml('UTF-8') - #NOTE (ameade): the has_atom should be removed after all of the + # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking @@ -462,7 +462,7 @@ class XMLDictSerializer(DictSerializer): if xmlns: result.setAttribute('xmlns', xmlns) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): collections = metadata.get('list_collections', {}) if nodename in collections: @@ -481,7 +481,7 @@ class XMLDictSerializer(DictSerializer): for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): collections = metadata.get('dict_collections', {}) if nodename in collections: diff --git a/cinder/api/v1/snapshots.py b/cinder/api/v1/snapshots.py index 59eb85607..400c1cbc9 100644 --- a/cinder/api/v1/snapshots.py +++ b/cinder/api/v1/snapshots.py @@ -140,12 +140,12 @@ class SnapshotsController(wsgi.Controller): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['cinder.context'] - #pop out limit and offset , they are not search_opts + # pop out limit and offset , they are not search_opts search_opts = req.GET.copy() search_opts.pop('limit', None) search_opts.pop('offset', None) - #filter out invalid option + # filter out invalid option allowed_search_options = ('status', 'volume_id', 'display_name') utils.remove_invalid_filter_options(context, search_opts, allowed_search_options) diff --git a/cinder/api/v1/volumes.py b/cinder/api/v1/volumes.py index 723395cbc..4798f5f4d 100644 --- a/cinder/api/v1/volumes.py +++ b/cinder/api/v1/volumes.py @@ -265,7 +265,7 @@ class VolumeController(wsgi.Controller): def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" - #pop out limit and offset , they are not search_opts + # pop out limit and offset , they are not search_opts search_opts = req.GET.copy() search_opts.pop('limit', None) search_opts.pop('offset', None) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py index 27060c308..0549d0150 100644 --- a/cinder/api/v2/snapshots.py +++ b/cinder/api/v2/snapshots.py @@ -143,12 +143,12 @@ class SnapshotsController(wsgi.Controller): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['cinder.context'] - #pop out limit and offset , they are not search_opts + # pop out limit and offset , they are not search_opts search_opts = req.GET.copy() search_opts.pop('limit', None) search_opts.pop('offset', None) - #filter out invalid option + # filter out invalid option allowed_search_options = ('status', 'volume_id', 'name') utils.remove_invalid_filter_options(context, search_opts, allowed_search_options) diff --git a/cinder/api/views/qos_specs.py b/cinder/api/views/qos_specs.py index cd83862dd..0241a12ef 100644 --- a/cinder/api/views/qos_specs.py +++ b/cinder/api/views/qos_specs.py @@ -43,7 +43,7 @@ class ViewBuilder(common.ViewBuilder): def detail(self, request, qos_spec): """Detailed view of a single qos_spec.""" - #TODO(zhiteng) Add associations to detailed view + # TODO(zhiteng) Add associations to detailed view return { 'qos_specs': qos_spec, 'links': self._get_links(request, diff --git a/cinder/api/xmlutil.py b/cinder/api/xmlutil.py index b52f72e46..76d102f5e 100644 --- a/cinder/api/xmlutil.py +++ b/cinder/api/xmlutil.py @@ -351,7 +351,7 @@ class TemplateElement(object): def getAttrib(self, obj): """Get attribute.""" tmpattrib = {} - #Now set up all the attributes... + # Now set up all the attributes... for key, value in self.attrib.items(): try: tmpattrib[key] = value(obj) @@ -393,7 +393,7 @@ class TemplateElement(object): tagnameList = self._splitTagName(tagname) insertIndex = 0 - #If parent is not none and has same tagname + # If parent is not none and has same tagname if parent is not None: for i in range(0, len(tagnameList)): tmpInsertPos = parent.find(tagnameList[i]) @@ -407,19 +407,19 @@ class TemplateElement(object): if insertIndex >= len(tagnameList): insertIndex = insertIndex - 1 - #Create root elem + # Create root elem elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap) rootelem = elem subelem = elem - #Create subelem + # Create subelem for i in range((insertIndex + 1), len(tagnameList)): subelem = etree.SubElement(elem, tagnameList[i]) elem = subelem # If we have a parent, append the node to the parent if parent is not None: - #If we can merge this element, then insert + # If we can merge this element, then insert if insertIndex > 0: parent.insert(len(list(parent)), rootelem) else: diff --git a/cinder/backup/api.py b/cinder/backup/api.py index 89a1f3652..33cfb7e09 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -180,9 +180,9 @@ class API(base.Base): finally: QUOTAS.rollback(context, reservations) - #TODO(DuncanT): In future, when we have a generic local attach, - # this can go via the scheduler, which enables - # better load balancing and isolation of services + # TODO(DuncanT): In future, when we have a generic local attach, + # this can go via the scheduler, which enables + # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup['host'], backup['id'], diff --git a/cinder/brick/initiator/connector.py b/cinder/brick/initiator/connector.py index ffed9c66c..5c0d53ef1 100644 --- a/cinder/brick/initiator/connector.py +++ b/cinder/brick/initiator/connector.py @@ -263,7 +263,7 @@ class ISCSIConnector(InitiatorConnector): device_info = {'type': 'block'} if self.use_multipath: - #multipath installed, discovering other targets if available + # multipath installed, discovering other targets if available for ip, iqn in self._discover_iscsi_portals(connection_properties): props = copy.deepcopy(connection_properties) props['target_portal'] = ip @@ -310,7 +310,7 @@ class ISCSIConnector(InitiatorConnector): host_device = next(dev for dev in host_devices if os.path.exists(dev)) if self.use_multipath: - #we use the multipath device instead of the single path device + # we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) if multipath_device is not None: @@ -514,8 +514,8 @@ class ISCSIConnector(InitiatorConnector): "node.session.auth.password", connection_properties['auth_password']) - #duplicate logins crash iscsiadm after load, - #so we scan active sessions to see if the node is logged in. + # duplicate logins crash iscsiadm after load, + # so we scan active sessions to see if the node is logged in. out = self._run_iscsiadm_bare(["-m", "session"], run_as_root=True, check_exit_code=[0, 1, 21])[0] or "" @@ -536,8 +536,8 @@ class ISCSIConnector(InitiatorConnector): ("--login",), check_exit_code=[0, 255]) except putils.ProcessExecutionError as err: - #as this might be one of many paths, - #only set successful logins to startup automatically + # as this might be one of many paths, + # only set successful logins to startup automatically if err.exit_code in [15]: self._iscsiadm_update(connection_properties, "node.startup", @@ -855,7 +855,7 @@ class AoEConnector(InitiatorConnector): waiting_status = {'tries': 0} - #NOTE(jbr_): Device path is not always present immediately + # NOTE(jbr_): Device path is not always present immediately def _wait_for_discovery(aoe_path): if os.path.exists(aoe_path): raise loopingcall.LoopingCallDone diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 2ad9f744c..cbb6df13d 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -2424,7 +2424,7 @@ def qos_specs_get_all(context, inactive=False, filters=None): ] """ filters = filters or {} - #TODO(zhiteng) Add filters for 'consumer' + # TODO(zhiteng) Add filters for 'consumer' read_deleted = "yes" if inactive else "no" rows = model_query(context, models.QualityOfServiceSpecs, diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py index b611fc26a..2ccbc197c 100644 --- a/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py +++ b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py @@ -52,20 +52,20 @@ def upgrade(migrate_engine): return try: - #Set default volumes + # Set default volumes qci = quota_classes.insert() qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'volumes', 'hard_limit': CONF.quota_volumes, 'deleted': False, }) - #Set default snapshots + # Set default snapshots qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'snapshots', 'hard_limit': CONF.quota_snapshots, 'deleted': False, }) - #Set default gigabytes + # Set default gigabytes qci.execute({'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'gigabytes', diff --git a/cinder/exception.py b/cinder/exception.py index 37633f45f..d886663fb 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -389,7 +389,6 @@ class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") -#TODO(bcwaldon): EOL this exception! class Duplicate(CinderException): pass @@ -722,7 +721,7 @@ class BadHTTPResponseStatus(ZadaraException): message = _("Bad HTTP response status %(status)s") -#SolidFire +# SolidFire class SolidFireAPIException(VolumeBackendAPIException): message = _("Bad response from SolidFire API") diff --git a/cinder/image/glance.py b/cinder/image/glance.py index ef76aeb0a..7607daf87 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -309,16 +309,16 @@ class GlanceImageService(object): image_meta, data=None, purge_props=True): """Modify the given image with the new data.""" image_meta = self._translate_to_glance(image_meta) - #NOTE(dosaboy): see comment in bug 1210467 + # NOTE(dosaboy): see comment in bug 1210467 if CONF.glance_api_version == 1: image_meta['purge_props'] = purge_props - #NOTE(bcwaldon): id is not an editable field, but it is likely to be + # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: image_meta['data'] = data try: - #NOTE(dosaboy): the v2 api separates update from upload + # NOTE(dosaboy): the v2 api separates update from upload if data and CONF.glance_api_version > 1: image_meta = self._client.call(context, 'upload', image_id, image_meta['data']) @@ -431,7 +431,7 @@ def _convert_to_string(metadata): def _extract_attributes(image): - #NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform + # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end # 'deleted_at' depends on 'deleted' @@ -513,7 +513,7 @@ def get_remote_image_service(context, image_href): :returns: a tuple of the form (image_service, image_id) """ - #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a + # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 583d3d119..c9dd604c9 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -169,7 +169,7 @@ class FilterScheduler(driver.Scheduler): return top_host.obj def get_pools(self, context, filters): - #TODO(zhiteng) Add filters support + # TODO(zhiteng) Add filters support return self.host_manager.get_pools(context) def _post_select_populate_filter_properties(self, filter_properties, diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py index d5068e97d..88d322145 100644 --- a/cinder/scheduler/weights/capacity.py +++ b/cinder/scheduler/weights/capacity.py @@ -74,7 +74,7 @@ class CapacityWeigher(weights.BaseHostWeigher): total_space = host_state.total_capacity_gb if (free_space == 'infinite' or free_space == 'unknown' or total_space == 'infinite' or total_space == 'unknown'): - #(zhiteng) 'infinite' and 'unknown' are treated the same + # (zhiteng) 'infinite' and 'unknown' are treated the same # here, for sorting purpose. # As a partial fix for bug #1350638, 'infinite' and 'unknown' are diff --git a/cinder/tests/api/contrib/test_backups.py b/cinder/tests/api/contrib/test_backups.py index 55bf52077..8184bb9df 100644 --- a/cinder/tests/api/contrib/test_backups.py +++ b/cinder/tests/api/contrib/test_backups.py @@ -548,26 +548,26 @@ class BackupsAPITestCase(test.TestCase): test_host = 'test_host' alt_host = 'strange_host' empty_service = [] - #service host not match with volume's host + # service host not match with volume's host host_not_match = [{'availability_zone': "fake_az", 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}] - #service az not match with volume's az + # service az not match with volume's az az_not_match = [{'availability_zone': "strange_az", 'host': test_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}] - #service disabled + # service disabled disabled_service = [] - #dead service that last reported at 20th century + # dead service that last reported at 20th century dead_service = [{'availability_zone': "fake_az", 'host': alt_host, 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}] - #first service's host not match but second one works. + # first service's host not match but second one works. multi_services = [{'availability_zone': "fake_az", 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}, {'availability_zone': "fake_az", 'host': test_host, 'disabled': 0, 'updated_at': timeutils.utcnow()}] - #Setup mock to run through the following service cases + # Setup mock to run through the following service cases _mock_service_get_all_by_topic.side_effect = [empty_service, host_not_match, az_not_match, @@ -579,32 +579,32 @@ class BackupsAPITestCase(test.TestCase): host=test_host)['id'] volume = self.volume_api.get(context.get_admin_context(), volume_id) - #test empty service + # test empty service self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), False) - #test host not match service + # test host not match service self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), False) - #test az not match service + # test az not match service self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), False) - #test disabled service + # test disabled service self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), False) - #test dead service + # test dead service self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), False) - #test multi services and the last service matches + # test multi services and the last service matches self.assertEqual(self.backup_api._is_backup_service_enabled(volume, test_host), True) @@ -1071,7 +1071,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(export.item(0).getAttribute('backup_url'), backup_url) - #db.backup_destroy(context.get_admin_context(), backup_id) + # db.backup_destroy(context.get_admin_context(), backup_id) def test_export_record_with_bad_backup_id(self): @@ -1266,7 +1266,7 @@ class BackupsAPITestCase(test.TestCase): backup_service = 'fake' backup_url = 'fake' - #test with no backup_service + # test with no backup_service req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_url': backup_url}} req.body = json.dumps(body) @@ -1279,7 +1279,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(res_dict['badRequest']['message'], 'Incorrect request body format.') - #test with no backup_url + # test with no backup_url req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {'backup_service': backup_service}} req.body = json.dumps(body) @@ -1293,7 +1293,7 @@ class BackupsAPITestCase(test.TestCase): self.assertEqual(res_dict['badRequest']['message'], 'Incorrect request body format.') - #test with no backup_url and backup_url + # test with no backup_url and backup_url req = webob.Request.blank('/v2/fake/backups/import_record') body = {'backup-record': {}} req.body = json.dumps(body) diff --git a/cinder/tests/api/extensions/foxinsocks.py b/cinder/tests/api/extensions/foxinsocks.py index fcc9635d3..f4711a381 100644 --- a/cinder/tests/api/extensions/foxinsocks.py +++ b/cinder/tests/api/extensions/foxinsocks.py @@ -45,7 +45,7 @@ class FoxInSocksServerControllerExtension(wsgi.Controller): class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') @@ -53,7 +53,7 @@ class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' diff --git a/cinder/tests/api/v1/test_limits.py b/cinder/tests/api/v1/test_limits.py index 92db58521..fe47ef391 100644 --- a/cinder/tests/api/v1/test_limits.py +++ b/cinder/tests/api/v1/test_limits.py @@ -855,7 +855,7 @@ class LimitsXMLSerializationTest(test.TestCase): root = etree.XML(output) xmlutil.validate_schema(root, 'limits') - #verify absolute limits + # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 4) for limit in absolutes: @@ -863,7 +863,7 @@ class LimitsXMLSerializationTest(test.TestCase): value = limit.get('value') self.assertEqual(value, str(fixture['limits']['absolute'][name])) - #verify rate limits + # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 2) for i, rate in enumerate(rates): @@ -890,10 +890,10 @@ class LimitsXMLSerializationTest(test.TestCase): root = etree.XML(output) xmlutil.validate_schema(root, 'limits') - #verify absolute limits + # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 0) - #verify rate limits + # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 0) diff --git a/cinder/tests/api/v1/test_snapshot_metadata.py b/cinder/tests/api/v1/test_snapshot_metadata.py index 89d1faf82..aa1f19006 100644 --- a/cinder/tests/api/v1/test_snapshot_metadata.py +++ b/cinder/tests/api/v1/test_snapshot_metadata.py @@ -536,19 +536,19 @@ class SnapshotMetaDataTest(test.TestCase): req.method = 'POST' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/cinder/tests/api/v1/test_snapshots.py b/cinder/tests/api/v1/test_snapshots.py index c5b0b8c28..5da7a9890 100644 --- a/cinder/tests/api/v1/test_snapshots.py +++ b/cinder/tests/api/v1/test_snapshots.py @@ -349,9 +349,9 @@ class SnapshotApiTest(test.TestCase): self.assertEqual(1, len(res['snapshots'])) self.assertEqual(2, res['snapshots'][0]['id']) - #admin case + # admin case list_snapshots_with_limit_and_offset(is_admin=True) - #non_admin case + # non_admin case list_snapshots_with_limit_and_offset(is_admin=False) def test_admin_list_snapshots_all_tenants(self): diff --git a/cinder/tests/api/v1/test_volume_metadata.py b/cinder/tests/api/v1/test_volume_metadata.py index 5cb270c40..af70cfcf2 100644 --- a/cinder/tests/api/v1/test_volume_metadata.py +++ b/cinder/tests/api/v1/test_volume_metadata.py @@ -519,19 +519,19 @@ class volumeMetaDataTest(test.TestCase): req.method = 'POST' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/cinder/tests/api/v1/test_volumes.py b/cinder/tests/api/v1/test_volumes.py index 3c16fe6cb..ef1226704 100644 --- a/cinder/tests/api/v1/test_volumes.py +++ b/cinder/tests/api/v1/test_volumes.py @@ -645,9 +645,9 @@ class VolumeApiTest(test.TestCase): self.assertEqual(len(volumes), 1) self.assertEqual(volumes[0]['id'], 2) - #admin case + # admin case volume_detail_limit_offset(is_admin=True) - #non_admin case + # non_admin case volume_detail_limit_offset(is_admin=False) def test_volume_show_with_admin_metadata(self): diff --git a/cinder/tests/api/v2/test_limits.py b/cinder/tests/api/v2/test_limits.py index 459bce894..0c775a07b 100644 --- a/cinder/tests/api/v2/test_limits.py +++ b/cinder/tests/api/v2/test_limits.py @@ -856,7 +856,7 @@ class LimitsXMLSerializationTest(test.TestCase): root = etree.XML(output) xmlutil.validate_schema(root, 'limits') - #verify absolute limits + # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 4) for limit in absolutes: @@ -864,7 +864,7 @@ class LimitsXMLSerializationTest(test.TestCase): value = limit.get('value') self.assertEqual(value, str(fixture['limits']['absolute'][name])) - #verify rate limits + # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 2) for i, rate in enumerate(rates): @@ -891,10 +891,10 @@ class LimitsXMLSerializationTest(test.TestCase): root = etree.XML(output) xmlutil.validate_schema(root, 'limits') - #verify absolute limits + # verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 0) - #verify rate limits + # verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 0) diff --git a/cinder/tests/api/v2/test_snapshot_metadata.py b/cinder/tests/api/v2/test_snapshot_metadata.py index 2fd62ca37..6caf16433 100644 --- a/cinder/tests/api/v2/test_snapshot_metadata.py +++ b/cinder/tests/api/v2/test_snapshot_metadata.py @@ -534,19 +534,19 @@ class SnapshotMetaDataTest(test.TestCase): req.method = 'POST' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/cinder/tests/api/v2/test_snapshots.py b/cinder/tests/api/v2/test_snapshots.py index 43fa3a4e1..7cb05895b 100644 --- a/cinder/tests/api/v2/test_snapshots.py +++ b/cinder/tests/api/v2/test_snapshots.py @@ -360,9 +360,9 @@ class SnapshotApiTest(test.TestCase): self.assertEqual(1, len(res['snapshots'])) self.assertEqual(2, res['snapshots'][0]['id']) - #admin case + # admin case list_snapshots_with_limit_and_offset(is_admin=True) - #non_admin case + # non_admin case list_snapshots_with_limit_and_offset(is_admin=False) def test_admin_list_snapshots_all_tenants(self): diff --git a/cinder/tests/api/v2/test_volume_metadata.py b/cinder/tests/api/v2/test_volume_metadata.py index ffc745df8..d4eb6e1c9 100644 --- a/cinder/tests/api/v2/test_volume_metadata.py +++ b/cinder/tests/api/v2/test_volume_metadata.py @@ -520,19 +520,19 @@ class volumeMetaDataTest(test.TestCase): req.method = 'POST' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, self.req_id, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/cinder/tests/brick/test_brick_lvm.py b/cinder/tests/brick/test_brick_lvm.py index 1e84032f7..d30ccffa5 100644 --- a/cinder/tests/brick/test_brick_lvm.py +++ b/cinder/tests/brick/test_brick_lvm.py @@ -38,7 +38,7 @@ class BrickLvmTestCase(test.TestCase): self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() - #Stub processutils.execute for static methods + # Stub processutils.execute for static methods self.stubs.Set(processutils, 'execute', self.fake_execute) self.vg = brick.LVM(self.configuration.volume_group_name, diff --git a/cinder/tests/glance/stubs.py b/cinder/tests/glance/stubs.py index 9a1bbdd27..ea0a5b458 100644 --- a/cinder/tests/glance/stubs.py +++ b/cinder/tests/glance/stubs.py @@ -25,12 +25,12 @@ class StubGlanceClient(object): _images = images or [] map(lambda image: self.create(**image), _images) - #NOTE(bcwaldon): HACK to get client.images.* to work + # NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): setattr(self.images, fn, getattr(self, fn)) - #TODO(bcwaldon): implement filters + # TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30): if marker is None: index = 0 diff --git a/cinder/tests/image/fake.py b/cinder/tests/image/fake.py index 9b5a83fd7..de1903eb8 100644 --- a/cinder/tests/image/fake.py +++ b/cinder/tests/image/fake.py @@ -144,7 +144,7 @@ class _FakeImageService(object): self._imagedata = {} super(_FakeImageService, self).__init__() - #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir + # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/cinder/tests/image/test_glance.py b/cinder/tests/image/test_glance.py index 26b272a2f..68b91dc91 100644 --- a/cinder/tests/image/test_glance.py +++ b/cinder/tests/image/test_glance.py @@ -97,7 +97,6 @@ class TestGlanceImageService(test.TestCase): def setUp(self): super(TestGlanceImageService, self).setUp() - #fakes.stub_out_compute_api_snapshot(self.stubs) client = glance_stubs.StubGlanceClient() self.service = self._create_image_service(client) diff --git a/cinder/tests/test_backup.py b/cinder/tests/test_backup.py index d82e81cc3..3cff918cc 100644 --- a/cinder/tests/test_backup.py +++ b/cinder/tests/test_backup.py @@ -527,7 +527,7 @@ class BackupTestCase(BaseBackupTest): export['backup_service'] = 'cinder.tests.backup.bad_service' imported_record = self._create_export_record_db_entry() - #Test the case where the additional hosts list is empty + # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, @@ -537,8 +537,8 @@ class BackupTestCase(BaseBackupTest): export['backup_url'], backup_hosts) - #Test that the import backup keeps calling other hosts to find a - #suitable host for the backup service + # Test that the import backup keeps calling other hosts to find a + # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: diff --git a/cinder/tests/test_backup_ceph.py b/cinder/tests/test_backup_ceph.py index fbcf270bb..2e417afa0 100644 --- a/cinder/tests/test_backup_ceph.py +++ b/cinder/tests/test_backup_ceph.py @@ -718,7 +718,6 @@ class BackupCephTestCase(test.TestCase): self.service.delete(self.backup) self.assertTrue(mock_del_backup_snap.called) - #self.assertFalse(self.mock_rbd.ImageNotFound.called) self.assertTrue(self.mock_rbd.RBD.return_value.list.called) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) diff --git a/cinder/tests/test_dellsc.py b/cinder/tests/test_dellsc.py index 82e0462ec..ab2c9a27d 100644 --- a/cinder/tests/test_dellsc.py +++ b/cinder/tests/test_dellsc.py @@ -835,7 +835,6 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} - #self.driver.ensure_export(context, volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, diff --git a/cinder/tests/test_emc_vnxdirect.py b/cinder/tests/test_emc_vnxdirect.py index 56a557ace..960378bda 100644 --- a/cinder/tests/test_emc_vnxdirect.py +++ b/cinder/tests/test_emc_vnxdirect.py @@ -891,7 +891,7 @@ class DriverTestCaseBase(test.TestCase): self.configuration.storage_vnx_pool_name = 'unit_test_pool' self.configuration.san_login = 'sysadmin' self.configuration.san_password = 'sysadmin' - #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms + # set the timeout to 0.012s = 0.0002 * 60 = 1.2ms self.configuration.default_timeout = 0.0002 self.configuration.initiator_auto_registration = True self.configuration.check_max_pool_luns_threshold = False @@ -1010,9 +1010,9 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase): '-Deduplication', '-ThinProvisioning', '-FAST'] - #case + # case self.driver.create_volume(self.testData.test_volume_with_type) - #verification + # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, @@ -1046,10 +1046,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase): '-Deduplication', '-ThinProvisioning', '-FAST'] - #case + # case self.driver.create_volume(self.testData.test_volume_with_type) - #verification + # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, @@ -1082,10 +1082,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase): '-Deduplication', '-ThinProvisioning', '-FAST'] - #case + # case self.driver.create_volume(self.testData.test_volume_with_type) - #verification + # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, @@ -1112,10 +1112,10 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase): '-Deduplication', '-ThinProvisioning', '-FAST'] - #case + # case self.driver.create_volume(self.testData.test_volume_with_type) - #verification + # verification expect_cmd = [ mock.call(*self.testData.LUN_CREATION_CMD( 'vol_with_type', 1, @@ -1268,7 +1268,7 @@ Time Remaining: 0 second(s) ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertTrue(ret) - #verification + # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1), retry_disable=True, poll=True), @@ -1317,7 +1317,7 @@ Time Remaining: 0 second(s) ret = self.driver.migrate_volume(None, self.testData.test_volume, fake_host)[0] self.assertTrue(ret) - #verification + # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True), @@ -1364,7 +1364,7 @@ Time Remaining: 0 second(s) ret = self.driver.migrate_volume(None, self.testData.test_volume5, fakehost)[0] self.assertTrue(ret) - #verification + # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5), retry_disable=True, poll=True), @@ -1396,7 +1396,7 @@ Time Remaining: 0 second(s) ret = self.driver.migrate_volume(None, self.testData.test_volume, fakehost)[0] self.assertFalse(ret) - #verification + # verification expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True, poll=True)] @@ -1405,11 +1405,11 @@ Time Remaining: 0 second(s) def test_create_destroy_volume_snapshot(self): fake_cli = self.driverSetup() - #case + # case self.driver.create_snapshot(self.testData.test_snapshot) self.driver.delete_snapshot(self.testData.test_snapshot) - #verification + # verification expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'), poll=False), mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'), @@ -1715,12 +1715,12 @@ Time Remaining: 0 second(s) results = [FAKE_ERROR_RETURN] fake_cli = self.driverSetup(commands, results) - #case + # case self.assertRaises(EMCVnxCLICmdError, self.driver.create_snapshot, self.testData.test_failed_snapshot) - #verification + # verification expect_cmd = [ mock.call( *self.testData.SNAP_CREATE_CMD('failed_snapshot'), @@ -1729,7 +1729,7 @@ Time Remaining: 0 second(s) fake_cli.assert_has_calls(expect_cmd) def test_create_volume_from_snapshot(self): - #set up + # set up cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest") cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest") output_dest = self.testData.LUN_PROPERTY("vol2_dest") @@ -2022,7 +2022,7 @@ Time Remaining: 0 second(s) self.testData.test_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) - #mock the command executor + # mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) @@ -2044,7 +2044,7 @@ Time Remaining: 0 second(s) self.configuration.storage_vnx_pool_name = invalid_pool_name self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) - #mock the command executor + # mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) @@ -2073,7 +2073,7 @@ Time Remaining: 0 second(s) self.driver = EMCCLIISCSIDriver(configuration=self.configuration) assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool) - #mock the command executor + # mock the command executor fake_command_execute = self.get_command_execute_simulator( commands, results) fake_cli = mock.MagicMock(side_effect=fake_command_execute) @@ -2085,7 +2085,7 @@ Time Remaining: 0 second(s) expected = [mock.call(*get_lun_cmd, poll=True)] assert get_size == test_size fake_cli.assert_has_calls(expected) - #Test the function with invalid reference. + # Test the function with invalid reference. invaild_ref = {'fake': 'fake_ref'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, diff --git a/cinder/tests/test_hp3par.py b/cinder/tests/test_hp3par.py index af0229654..45246f4c1 100644 --- a/cinder/tests/test_hp3par.py +++ b/cinder/tests/test_hp3par.py @@ -3978,7 +3978,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): mock_create_client.return_value = mock_client common = self.driver._login() - #Setup a single ISCSI IP + # Setup a single ISCSI IP iscsi_ips = ["10.10.220.253"] self.driver.configuration.hp3par_iscsi_ips = iscsi_ips @@ -4000,7 +4000,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): mock_create_client.return_value = mock_client common = self.driver._login() - #Setup two ISCSI IPs + # Setup two ISCSI IPs iscsi_ips = ["10.10.220.252", "10.10.220.253"] self.driver.configuration.hp3par_iscsi_ips = iscsi_ips @@ -4024,7 +4024,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): mock_create_client.return_value = mock_client common = self.driver._login() - #Setup two ISCSI IPs + # Setup two ISCSI IPs iscsi_ips = ["10.10.220.252", "10.10.220.253"] self.driver.configuration.hp3par_iscsi_ips = iscsi_ips @@ -4046,7 +4046,7 @@ class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): mock_client.getPorts.return_value = PORTS1_RET mock_client.getVLUNs.return_value = VLUNS5_RET - #Setup two ISCSI IPs + # Setup two ISCSI IPs iscsi_ips = ["10.10.220.252", "10.10.220.253"] self.driver.configuration.hp3par_iscsi_ips = iscsi_ips diff --git a/cinder/tests/test_openvstorage.py b/cinder/tests/test_openvstorage.py index ca08eca85..2893d38ca 100644 --- a/cinder/tests/test_openvstorage.py +++ b/cinder/tests/test_openvstorage.py @@ -21,7 +21,7 @@ from cinder import test import cinder.volume.drivers.openvstorage as ovsvd -#MOCKUPS +# MOCKUPS MOCK_hostname = 'test-hostname' MOCK_mountpoint = '/mnt/test' MOCK_vdisk_guid = '0000' diff --git a/cinder/tests/test_prophetstor_dpl.py b/cinder/tests/test_prophetstor_dpl.py index 3780949c0..5ea48ed5f 100644 --- a/cinder/tests/test_prophetstor_dpl.py +++ b/cinder/tests/test_prophetstor_dpl.py @@ -31,7 +31,6 @@ VOLUMEUUID = 'a000000000000000000000000000001' INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' DATA_IN_VOLUME = {'id': VOLUMEUUID} DATA_IN_CONNECTOR = {'initiator': INITIATOR} -## dpl.getpool DATA_SERVER_INFO = 0, { 'metadata': {'vendor': 'ProphetStor', 'version': '1.5'}} @@ -67,7 +66,6 @@ DATA_POOLINFO = 0, { 'objectType': 'application/cdmi-container', 'percentComplete': 100} -## dpl.assignvdev DATA_ASSIGNVDEV = 0, { 'children': [], 'childrenrange': '', diff --git a/cinder/tests/test_storwize_svc.py b/cinder/tests/test_storwize_svc.py index 51425740f..6150705ef 100644 --- a/cinder/tests/test_storwize_svc.py +++ b/cinder/tests/test_storwize_svc.py @@ -3032,7 +3032,7 @@ class StorwizeSVCDriverTestCase(test.TestCase): # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) - #Set up one WWPN that won't match and one that will. + # Set up one WWPN that won't match and one that will. self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0', 'AABBCCDDEEFF0010'] @@ -3066,7 +3066,7 @@ class StorwizeSVCDriverTestCase(test.TestCase): # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) - #Set up WWPNs that will not match what is available. + # Set up WWPNs that will not match what is available. self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0', '123456789ABCDEF1'] @@ -3100,7 +3100,7 @@ class StorwizeSVCDriverTestCase(test.TestCase): # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) - #Set up one WWPN. + # Set up one WWPN. self.driver._state['storage_nodes']['1']['WWPN'] = ['AABBCCDDEEFF0012'] wwpns = ['ff00000000000000', 'ff00000000000001'] diff --git a/cinder/tests/test_volume_transfer.py b/cinder/tests/test_volume_transfer.py index 183b063b1..0922711de 100644 --- a/cinder/tests/test_volume_transfer.py +++ b/cinder/tests/test_volume_transfer.py @@ -123,17 +123,17 @@ class VolumeTransferTestCase(test.TestCase): self.assertEqual(len(ts), 0, 'Unexpected transfers listed.') def test_delete_transfer_with_deleted_volume(self): - #create a volume + # create a volume volume = utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) - #create a transfer + # create a transfer tx_api = transfer_api.API() transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') - #force delete volume + # force delete volume db.volume_destroy(context.get_admin_context(), volume['id']) - #Make sure transfer has been deleted. + # Make sure transfer has been deleted. self.assertRaises(exception.TransferNotFound, tx_api.get, self.ctxt, diff --git a/cinder/tests/test_volume_types.py b/cinder/tests/test_volume_types.py index 47240fcda..b6abc15a8 100644 --- a/cinder/tests/test_volume_types.py +++ b/cinder/tests/test_volume_types.py @@ -283,7 +283,7 @@ class VolumeTypeTestCase(test.TestCase): self.assertDictMatch(expected, res) def test_volume_types_diff(self): - #type_ref 1 and 2 have the same extra_specs, while 3 has different + # type_ref 1 and 2 have the same extra_specs, while 3 has different keyvals1 = {"key1": "val1", "key2": "val2"} keyvals2 = {"key1": "val0", "key2": "val2"} type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1) @@ -300,7 +300,7 @@ class VolumeTypeTestCase(test.TestCase): self.assertEqual(same, False) self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val0')) - #qos_ref 1 and 2 have the same specs, while 3 has different + # qos_ref 1 and 2 have the same specs, while 3 has different qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'} qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1) diff --git a/cinder/tests/test_xio.py b/cinder/tests/test_xio.py index 37c1ee241..ed6af574e 100644 --- a/cinder/tests/test_xio.py +++ b/cinder/tests/test_xio.py @@ -683,7 +683,7 @@ class XIOISEDriverTestCase(object): raise exception.Invalid() ################################# -## UNIT TESTS ## +# UNIT TESTS # ################################# def test_do_setup(self, mock_req): self.setup_driver() diff --git a/cinder/tests/test_zadara.py b/cinder/tests/test_zadara.py index c6d334644..45ec04d46 100644 --- a/cinder/tests/test_zadara.py +++ b/cinder/tests/test_zadara.py @@ -193,7 +193,7 @@ class FakeRequest(object): if vol_name == vol: attachments = params['attachments'] if srv in attachments: - #already attached - ok + # already attached - ok return RUNTIME_VARS['good'] else: attachments.append(srv) @@ -247,7 +247,7 @@ class FakeRequest(object): if params['cg-name'] == cg_name: snapshots = params['snapshots'] if snap_name in snapshots: - #already attached + # already attached return RUNTIME_VARS['bad_volume'] else: snapshots.append(snap_name) diff --git a/cinder/volume/drivers/hds/nfs.py b/cinder/volume/drivers/hds/nfs.py index 4c2d905d1..7e8cf21f4 100644 --- a/cinder/volume/drivers/hds/nfs.py +++ b/cinder/volume/drivers/hds/nfs.py @@ -444,7 +444,6 @@ class HDSNFSDriver(nfs.NfsDriver): nfs_info = self._get_nfs_info() for share in self.shares: - #export = share.split(':')[1] if share in nfs_info.keys(): LOG.info(_LI("share: %(share)s -> %(info)s"), {'share': share, 'info': nfs_info[share]['path']}) diff --git a/cinder/volume/drivers/huawei/__init__.py b/cinder/volume/drivers/huawei/__init__.py index 2416acc78..2e0895347 100644 --- a/cinder/volume/drivers/huawei/__init__.py +++ b/cinder/volume/drivers/huawei/__init__.py @@ -70,7 +70,7 @@ class HuaweiVolumeDriver(object): 'Huawei OceanStor %(product)s series storage arrays.') % {'protocol': protocol, 'product': product}) - #Map HVS to 18000 + # Map HVS to 18000 if product in MAPPING: LOG.warn(_LW("Product name %s is deprecated, update your " "configuration to the new product name."), product) diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 334f5a2e1..a9719140c 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -36,7 +36,7 @@ from cinder.volume import driver LOG = logging.getLogger(__name__) nas_opts = [ - #TODO(eharney): deprecate nas_ip and change this to nas_host + # TODO(eharney): deprecate nas_ip and change this to nas_host cfg.StrOpt('nas_ip', default='', help='IP address or Hostname of NAS system.'), diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py index 805cfe752..e9e0e9721 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_common.py +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -824,7 +824,7 @@ class HP3PARCommon(object): qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') - #NOTE(kmartin): We prefer the qos_specs association + # NOTE(kmartin): We prefer the qos_specs association # and override any existing extra-specs settings # if present. if qos_specs_id is not None: diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py index aedec2b23..21d46f9b1 100644 --- a/cinder/volume/drivers/sheepdog.py +++ b/cinder/volume/drivers/sheepdog.py @@ -50,9 +50,9 @@ class SheepdogDriver(driver.VolumeDriver): def check_for_setup_error(self): """Return error if prerequisites aren't met.""" try: - #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' - # gives short output, but for compatibility reason we won't - # use it and just check if 'running' is in the output. + # NOTE(francois-charlier) Since 0.24 'collie cluster info -r' + # gives short output, but for compatibility reason we won't + # use it and just check if 'running' is in the output. (out, _err) = self._execute('collie', 'cluster', 'info') if 'status: running' not in out: exception_message = (_("Sheepdog is not working: %s") % out) diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py index 5ca22f15a..795ce1dc8 100644 --- a/cinder/volume/flows/api/create_volume.py +++ b/cinder/volume/flows/api/create_volume.py @@ -231,7 +231,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): # exist, this is expected as it signals that the image_id is missing. image_meta = self.image_service.show(context, image_id) - #check whether image is active + # check whether image is active if image_meta['status'] != 'active': msg = _('Image %(image_id)s is not active.')\ % {'image_id': image_id} diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py index d4ed68962..8624a10f4 100644 --- a/cinder/volume/volume_types.py +++ b/cinder/volume/volume_types.py @@ -160,7 +160,7 @@ def get_default_volume_type(): except exception.VolumeTypeNotFoundByName as e: # Couldn't find volume type with the name in default_volume_type # flag, record this issue and move on - #TODO(zhiteng) consider add notification to warn admin + # TODO(zhiteng) consider add notification to warn admin LOG.exception(_LE('Default volume type is not found,' 'please check default_volume_type config: %s') % six.text_type(e)) diff --git a/tox.ini b/tox.ini index 4d4850db6..c3589faf3 100644 --- a/tox.ini +++ b/tox.ini @@ -55,9 +55,6 @@ commands = python setup.py build_sphinx # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability # -# E265 block comment should start with '# ' -# reason: no improvement in readability -# # H402 one line docstring needs punctuation # reason: removed in hacking (https://review.openstack.org/#/c/101497/) # @@ -73,7 +70,7 @@ commands = python setup.py build_sphinx # H302,H405 -ignore = E251,E265,H302,H402,H405,H803,H904 +ignore = E251,H302,H402,H405,H803,H904 exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build max-complexity=30 -- 2.45.2