- [N329] LOG.exception and LOG.error messages require translations `_LE()`.
- [N330] LOG.warning messages require translations `_LW()`.
- [N333] Ensure that oslo namespaces are used for namespaced libraries.
+- [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs.
- [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now().
- [C302] six.text_type should be used instead of unicode
- [C303] Ensure that there are no 'print()' statements in code that is being committed.
- [C306] timeutils.strtime() must not be used (deprecated).
- [C307] LOG.warn is deprecated. Enforce use of LOG.warning.
-
General
-------
- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
if usages:
return values
else:
- return dict((k, v['limit']) for k, v in values.items())
+ return {k: v['limit'] for k, v in values.items()}
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['cinder.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
- abs_limits = dict((k, v['limit']) for k, v in quotas.items())
+ abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
60 * 60 * 24: "DAY",
}
- UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
+ UNIT_MAP = {v: k for k, v in UNITS.items()}
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
- d['metadata'] = dict((item['key'], item['value']) for item in metadata)
+ d['metadata'] = {item['key']: item['value'] for item in metadata}
# avoid circular ref when vol is a Volume instance
elif vol.get('metadata') and isinstance(vol.get('metadata'), dict):
d['metadata'] = vol['metadata']
context = req.environ['cinder.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
- abs_limits = dict((k, v['limit']) for k, v in quotas.items())
+ abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
60 * 60 * 24: "DAY",
}
- UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
+ UNIT_MAP = {v: k for k, v in UNITS.items()}
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
"""Retrieve the metadata of the volume object."""
if volume.get('volume_metadata'):
metadata = volume.get('volume_metadata')
- return dict((item['key'], item['value']) for item in metadata)
+ return {item['key']: item['value'] for item in metadata}
# avoid circular ref when vol is a Volume instance
elif volume.get('metadata') and isinstance(volume.get('metadata'),
dict):
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
- extra_specs = dict([(x['key'], x['value'])
- for x in inst_type_query['extra_specs']])
+ extra_specs = {x['key']: x['value']
+ for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
- return dict((row.resource, row) for row in rows)
+ return {row.resource: row for row in rows}
@require_context
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
- usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
- for k, v in usages.items())
+ usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
+ for k, v in usages.items()}
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_audit_log = re.compile(r"(.)*LOG\.audit(.)*")
no_print_statements = re.compile(r"\s*print\s*\(.+\).*")
+dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
# NOTE(jsbryant): When other oslo libraries switch over non-namespaced
# imports, we will need to add them to the regex below.
yield (0, msg)
+def dict_constructor_with_list_copy(logical_line):
+ msg = ("N336: Must use a dict comprehension instead of a dict constructor "
+ "with a sequence of key-value pairs.")
+ if dict_constructor_with_list_copy_re.match(logical_line):
+ yield (0, msg)
+
+
def factory(register):
register(no_vi_headers)
register(no_translate_debug_logs)
register(check_no_log_audit)
register(check_no_contextlib_nested)
register(no_log_warn)
+ register(dict_constructor_with_list_copy)
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
- sub_resources = dict((k, v) for k, v in resources.items()
- if k in desired and sync_filt(v))
+ sub_resources = {k: v for k, v in resources.items()
+ if k in desired and sync_filt(v)}
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
project_id,
context.quota_class, usages=False)
- return dict((k, v['limit']) for k, v in quotas.items())
+ return {k: v['limit'] for k, v in quotas.items()}
def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
- return dict((k, dict(limit=v))
- for k, v in self.absolute_limits.items())
+ return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
- return dict((k, dict(limit=v))
- for k, v in self.absolute_limits.items())
+ return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
- return dict([(k, v) for k, v in obj.iteritems()
- if k not in ignored_keys])
+ return {k: v for k, v in obj.iteritems()
+ if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
# metadata is a dict, compare the 'key' and 'value' of each
if key == 'volume_metadata':
self.assertEqual(len(val1), len(val2))
- val1_dict = dict((x.key, x.value) for x in val1)
- val2_dict = dict((x.key, x.value) for x in val2)
+ val1_dict = {x.key: x.value for x in val1}
+ val2_dict = {x.key: x.value for x in val2}
self.assertDictMatch(val1_dict, val2_dict)
else:
self.assertEqual(val1, val2)
step = str(step)
return val + step
- return [dict([(k, compose(v, i)) for k, v in values.items()])
+ return [{k: compose(v, i) for k, v in values.items()}
for i in range(1, 4)]
def test_volume_type_encryption_create(self):
step = str(step)
return val + step
- return [dict([(k, compose(v, i)) for k, v in base_values.items()])
+ return [{k: compose(v, i) for k, v in base_values.items()}
for i in range(1, 4)]
def test_backup_create(self):
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_gpfs_change_attributes')
def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs):
- metadata = [dict([('key', 'data_pool_name'), ('value', 'test')]),
- dict([('key', 'replicas'), ('value', 'test')]),
- dict([('key', 'dio'), ('value', 'test')]),
- dict([('key', 'write_affinity_depth'), ('value', 'test')]),
- dict([('key', 'block_group_factor'), ('value', 'test')]),
- dict([('key', 'write_affinity_failure_group'),
- ('value', 'test')]),
- dict([('key', 'test'),
- ('value', 'test')]),
- dict([('key', 'fstype'),
- ('value', 'test')]),
- dict([('key', 'fslabel'),
- ('value', 'test')]),
- dict([('key', 'test'),
- ('value', 'test')])]
+ metadata = [{'key': 'data_pool_name', 'value': 'test'},
+ {'key': 'replicas', 'value': 'test'},
+ {'key': 'dio', 'value': 'test'},
+ {'key': 'write_affinity_depth', 'value': 'test'},
+ {'key': 'block_group_factor', 'value': 'test'},
+ {'key': 'write_affinity_failure_group', 'value': 'test'},
+ {'key': 'test', 'value': 'test'},
+ {'key': 'fstype', 'value': 'test'},
+ {'key': 'fslabel', 'value': 'test'},
+ {'key': 'test', 'value': 'test'}]
self.driver._set_volume_attributes('', '', metadata)
self.assertEqual(1, len(list(checks.check_no_print_statements(
"print ('My print with space')",
"cinder/volume/anotherFile.py", False))))
+
+ def test_dict_constructor_with_list_copy(self):
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " dict([(i, connect_info[i])"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " attrs = dict([(k, _from_json(v))"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " type_names = dict((value, key) for key, value in"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " dict((value, key) for key, value in"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ "foo(param=dict((k, v) for k, v in bar.items()))"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " dict([[i,i] for i in range(3)])"))))
+
+ self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
+ " dd = dict([i,i] for i in range(3))"))))
+
+ self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
+ " dict()"))))
+
+ self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
+ " create_kwargs = dict(snapshot=snapshot,"))))
+
+ self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
+ " self._render_dict(xml, data_el, data.__dict__)"))))
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_project_quotas')
- return dict((k, dict(limit=v.default))
- for k, v in resources.items())
+ return {k: dict(limit=v.default) for k, v in resources.items()}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
# ensure that volume's glance metadata is copied
# to snapshot's glance metadata
self.assertEqual(len(vol_glance_meta), len(snap_glance_meta))
- vol_glance_dict = dict((x.key, x.value) for x in vol_glance_meta)
- snap_glance_dict = dict((x.key, x.value) for x in snap_glance_meta)
+ vol_glance_dict = {x.key: x.value for x in vol_glance_meta}
+ snap_glance_dict = {x.key: x.value for x in snap_glance_meta}
self.assertDictMatch(vol_glance_dict, snap_glance_dict)
# ensure that snapshot's status is changed to 'available'
db.volume_glance_metadata_copy_to_volume(self.ctxt, vol2['id'],
snapshot['id'])
metadata = db.volume_glance_metadata_get(self.ctxt, vol2['id'])
- metadata = dict([(m['key'], m['value']) for m in metadata])
+ metadata = {m['key']: m['value'] for m in metadata}
self.assertEqual(metadata, {'m1': 'v1'})
def test_volume_snapshot_glance_metadata_get_nonexistent(self):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
resource=volume)
- return dict(
- (meta_entry.key, meta_entry.value) for meta_entry in db_data
- )
+ return {meta_entry.key: meta_entry.value for meta_entry in db_data}
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
- metadata.update(dict(properties=dict((custom_property,
- volume_image_metadata
- [custom_property])
- for custom_property
- in custom_property_set)))
+ properties = {custom_property:
+ volume_image_metadata[custom_property]
+ for custom_property in custom_property_set}
+ metadata.update(dict(properties=properties))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
host_name = connector['host']
if isinstance(host_name, six.text_type):
- unicode_host_name_filter = dict((ord(six.text_type(char)), u'-')
- for char in invalid_ch_in_host)
+ unicode_host_name_filter = {ord(six.text_type(char)): u'-'
+ for char in invalid_ch_in_host}
host_name = host_name.translate(unicode_host_name_filter)
elif isinstance(host_name, str):
string_host_name_filter = string.maketrans(
(_('_get_hdr_dic: attribute headers and values do not match.\n '
'Headers: %(header)s\n Values: %(row)s.')
% {'header': six.text_type(header), 'row': six.text_type(row)}))
- dic = dict((a, v) for a, v in map(None, attributes, values))
+ dic = {a: v for a, v in map(None, attributes, values)}
return dic
def _get_conn_fc_wwpns(self):