This change adds Python 3 compatibility to the modified code.
Replace also six.iteritems(obj) with obj.items().
The iteritems() method of Python 2 dictionaries was renamed to items()
on Python 3. As discussed on the openstack-dev mailing list, iteritems()
must be replaced with items(), six.iteritems() should not be used. In
OpenStack, the overhead of creating a temporary list with dict.items()
on Python 2 is negligible.
Blueprint cinder-python3
Change-Id: Ic3d8fd6b71d2c9f21929b0d6bf68c8f84a5e2567
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
- for raw_key, raw_val in body.iteritems():
+ for raw_key, raw_val in body.items():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
- for key, value in update_values.iteritems():
+ for key, value in update_values.items():
result.update(update_setters[key](req, id, value))
return result
}
used_limits = {}
- for display_name, single_quota in quota_map.iteritems():
+ for display_name, single_quota in quota_map.items():
if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use']
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
- for _alias, ext in self.extension_manager.extensions.iteritems():
+ for _alias, ext in self.extension_manager.extensions.items():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
search_opts.pop('limit', None)
search_opts.pop('offset', None)
- for k, v in search_opts.iteritems():
+ for k, v in search_opts.items():
try:
search_opts[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):
filters['display_name'] = filters['name']
del filters['name']
- for k, v in filters.iteritems():
+ for k, v in filters.items():
try:
filters[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):
"injected_file_content_bytes": ["maxPersonalitySize"],
}
limits = {}
- for name, value in absolute_limits.iteritems():
+ for name, value in absolute_limits.items():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
- for ckey, cval in check_attrs.iteritems():
+ for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
- for key, value in CONF.iteritems():
+ for key, value in CONF.items():
print('%s = %s' % (key, value))
results = []
not_found = object()
for cgsnapshot in cgsnapshots:
- for opt, value in search_opts.iteritems():
+ for opt, value in search_opts.items():
if cgsnapshot.get(opt, not_found) != value:
break
else:
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
- for k, v in metadata_dict.iteritems():
+ for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
- for (key, value) in values.iteritems():
+ for (key, value) in values.items():
iscsi_target_ref[key] = value
session = get_session()
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
- for key, value in filters.iteritems():
+ for key, value in filters.items():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
- for k, v in value.iteritems():
+ for k, v in value.items():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)):
if delete:
original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session)
- for meta_key, meta_value in original_metadata.iteritems():
+ for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
- for meta_key, meta_value in original_metadata.iteritems():
+ for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
session = get_session()
with session.begin():
spec_ref = None
- for key, value in specs.iteritems():
+ for key, value in specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
specs_root.save(session=session)
# Insert all specification entries for QoS specs
- for k, v in values['qos_specs'].iteritems():
+ for k, v in values['qos_specs'].items():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
except AttributeError:
pass
- for k, v in self.kwargs.iteritems():
+ for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
- for name, value in kwargs.iteritems():
+ for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
'availability_zone': volume_ref.get('availability_zone'),
'volume_type_id': volume_type_id,
},
- 'volume_type': list(dict(vol_type).iteritems()),
+ 'volume_type': list(dict(vol_type).items()),
}
def execute(self, context, request_spec, volume_id, snapshot_id,
def flags(self, **kw):
"""Override CONF variables for a test."""
- for k, v in kw.iteritems():
+ for k, v in kw.items():
self.override_config(k, v)
def log_level(self, level):
}
limits = {}
- for display_name, q in quota_map.iteritems():
+ for display_name, q in quota_map.items():
limits[q] = {'limit': 2,
'in_use': 1}
self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
- for used_limit, value in abs_limits.iteritems():
+ for used_limit, value in abs_limits.items():
self.assertEqual(value,
limits[quota_map[used_limit]]['in_use'])
if filters is None or filters['is_public'] is None:
return VOLUME_TYPES
res = {}
- for k, v in VOLUME_TYPES.iteritems():
+ for k, v in VOLUME_TYPES.items():
if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v})
continue
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
- for k, v in kwargs.iteritems():
+ for k, v in kwargs.items():
setattr(self, k, v)
if entry['action'] != action:
continue
match = True
- for k, v in kwargs.iteritems():
+ for k, v in kwargs.items():
if entry.get(k) != v:
match = False
break
class FakeHostState(host_manager.HostState):
def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host)
- for (key, val) in attribute_dict.iteritems():
+ for (key, val) in attribute_dict.items():
setattr(self, key, val)
def list(self, detailed=True, search_opts=None):
matching = list(self._servers)
if search_opts:
- for opt, val in search_opts.iteritems():
+ for opt, val in search_opts.items():
matching = [m for m in matching
if getattr(m, opt, None) == val]
return matching
def test_config_commands_list(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ''
- for key, value in CONF.iteritems():
+ for key, value in CONF.items():
expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands()
self.assertEqual(
len(obj1), len(obj2),
"Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
- for key, value in obj1.iteritems():
+ for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
- for key, value in self._get_base_values().iteritems():
+ for key, value in self._get_base_values().items():
self.assertEqual(value, service[key])
def test_service_destroy(self):
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
- for key, value in new_values.iteritems():
+ for key, value in new_values.items():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes')
expected = {'resource': 'gigabytes', 'project_id': 'p1',
'in_use': 0, 'reserved': 2, 'total': 2}
- for key, value in expected.iteritems():
+ for key, value in expected.items():
self.assertEqual(value, quota_usage[key], key)
def test_quota_usage_get_all_by_project(self):
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
- for k, v in self._def_flags.iteritems():
+ for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
- for k, v in self._def_flags.iteritems():
+ for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
def create_consistencygroup(self, ctxt, group):
- volumes = [volume for k, volume in self.volumes.iteritems()
+ volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']]
if volumes:
volumes.append(volume)
# Delete snapshots in consistency group
- self.snapshots = {k: snap for k, snap in self.snapshots.iteritems()
+ self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== group.get('id', None))}
# Delete volume in consistency group
- self.volumes = {k: vol for k, vol in self.volumes.iteritems()
+ self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None)
== group.get('id', None))}
snapshots.append(snapshot)
# Delete snapshots in consistency group
- self.snapshots = {k: snap for k, snap in self.snapshots.iteritems()
+ self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== cgsnapshot.get('cgsnapshot_id', None))}
def _reset_flags(self):
self._driver.configuration.local_conf.reset()
- for k, v in self._def_flags.iteritems():
+ for k, v in self._def_flags.items():
self._set_flag(k, v)
def test_check_for_setup_error(self):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
- for key, value in headers.iteritems():
+ for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
- for key, value in headers.iteritems():
+ for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
- for key, value in headers.iteritems():
+ for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
- for k, v in self._fcmappings_list.iteritems():
+ for k, v in self._fcmappings_list.items():
if str(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
params = ['name', 'warning', 'udid',
'autoexpand', 'easytier', 'primary']
- for key, value in kwargs.iteritems():
+ for key, value in kwargs.items():
if key == 'easytier':
vol['easy_tier'] = value
continue
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
- for k, v in self._def_flags.iteritems():
+ for k, v in self._def_flags.items():
self._set_flag(k, v)
def _assert_vol_exists(self, name, exists):
for idx in range(len(opts_list)):
attrs = self._create_test_vol(opts_list[idx])
- for k, v in chck_list[idx].iteritems():
+ for k, v in chck_list[idx].items():
try:
if k[0] == '-':
k = k[1:]
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
- for k, v in expected[protocol]['data'].iteritems():
+ for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v)
# Initialize again, should notice it and do nothing
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
- for k, v in expected[protocol]['data'].iteritems():
+ for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v)
# Try to delete the 1st volume (should fail because it is mapped)
self.assertEqual(
ret['driver_volume_type'],
expected_fc_npiv['driver_volume_type'])
- for k, v in expected_fc_npiv['data'].iteritems():
+ for k, v in expected_fc_npiv['data'].items():
self.assertEqual(ret['data'][k], v)
self._set_flag('storwize_svc_npiv_compatibility_mode',
False)
metadata = db.volume_glance_metadata_get(ctxt, 2)
self.assertEqual(len(metadata), 3)
for expected, meta in zip(expected_metadata_2, metadata):
- for key, value in expected.iteritems():
+ for key, value in expected.items():
self.assertEqual(meta[key], value)
self.assertRaises(exception.GlanceMetadataExists,
self.assertEqual(self.vol_type1_description, new['description'])
- for k, v in self.vol_type1_specs.iteritems():
+ for k, v in self.vol_type1_specs.items():
self.assertEqual(v, new['extra_specs'][k],
'one of fields does not match')
self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.vol_type1['id'])
self.volume_type1_id = ref.id
- for k, v in self.vol_type1_specs.iteritems():
+ for k, v in self.vol_type1_specs.items():
self.vol_type1_specs[k] = str(v)
self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
- for (key, value) in exception_type.headers.iteritems():
+ for (key, value) in exception_type.headers.items():
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], value)
import mock
-import six
from cinder import test
from cinder.volume.drivers.netapp.dataontap import block_7mode
def _get_local_functions(self, obj):
"""Get function names of an object without superclass functions."""
- return set([key for key, value in six.iteritems(type(obj).__dict__)
+ return set([key for key, value in type(obj).__dict__.items()
if callable(value)])
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
for family in six.iterkeys(registry):
- for protocol, full_class_name in six.iteritems(registry[family]):
+ for protocol, full_class_name in registry[family].items():
driver = na_common.NetAppDriver.create_driver(
family, protocol, **kwargs)
self.assertEqual(full_class_name, get_full_class_name(driver))
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
- for (k, v) in kwargs.iteritems():
+ for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
- for key, value in visible_admin_meta.iteritems():
+ for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
results = []
not_found = object()
for snapshot in snapshots:
- for opt, value in search_opts.iteritems():
+ for opt, value in search_opts.items():
if snapshot.get(opt, not_found) != value:
break
else:
if not metadata:
metadata = {}
- for k, v in metadata.iteritems():
+ for k, v in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warning(msg)
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
- for device, size in dict_of_devices_sizes.iteritems():
+ for device, size in dict_of_devices_sizes.items():
if device not in used_devices:
free_size += size
total_size += size
error_msg = ""
# error_data is a single key value dict
- for key, value in error_data.iteritems():
+ for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
# Nothing to override
return default_dict
- for key, value in default_dict.iteritems():
+ for key, value in default_dict.items():
# Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value
if type_id:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
- for key, value in specs.iteritems():
+ for key, value in specs.items():
specs[key] = value
return specs
kvs = specs
LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
- for key, value in kvs.iteritems():
+ for key, value in kvs.items():
if key in huawei_valid_keys:
qos[key.upper()] = value
def _check_qos_high_priority(self, qos):
"""Check QoS priority."""
- for key, value in qos.iteritems():
+ for key, value in qos.items():
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
return True
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
- for k, node in self._storage_nodes.iteritems():
+ for k, node in self._storage_nodes.items():
if vdisk_params['protocol'] != node['protocol']:
continue
if node['id'] == preferred_node:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
- for k, value in specs.iteritems():
+ for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
- for k, node in self._storage_nodes.iteritems():
+ for k, node in self._storage_nodes.items():
if not node['WWPN']:
to_delete.append(k)
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
- for k, node in self._state['storage_nodes'].iteritems():
+ for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
def _get_opts_from_specs(self, opts, specs):
qos = {}
- for k, value in specs.iteritems():
+ for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
- for key, value in qos.iteritems():
+ for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
- for key, value in self.svc_qos_keys.iteritems():
+ for key, value in self.svc_qos_keys.items():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
- for key, value in qos.iteritems():
+ for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.
return
normalized_qos_keys = [key.lower() for key in QOS_KEYS]
keylist = []
- for key, value in six.iteritems(qos_spec):
+ for key, value in qos_spec.items():
lower_case_key = key.lower()
if lower_case_key not in normalized_qos_keys:
msg = _('Unrecognized QOS keyword: "%s"') % key
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
- for opt_name, opt_value in secure_options.iteritems():
+ for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
new_base_file = base_file_img_info.backing_file
base_id = None
- for key, value in snap_info.iteritems():
+ for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
else:
kvs = specs
- for key, value in kvs.iteritems():
+ for key, value in kvs.items():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
def _get_keys_by_volume_type(self, volume_type):
hp3par_keys = {}
specs = volume_type.get('extra_specs')
- for key, value in specs.iteritems():
+ for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
# identify key (nsp) of least used nsp
current_smallest_count = sys.maxint
- for (nsp, count) in nsp_counts.iteritems():
+ for (nsp, count) in nsp_counts.items():
if count < current_smallest_count:
current_least_used_nsp = nsp
current_smallest_count = count
def _get_lh_extra_specs(self, extra_specs, valid_keys):
"""Get LeftHand extra_specs (valid_keys only)."""
extra_specs_of_interest = {}
- for key, value in extra_specs.iteritems():
+ for key, value in extra_specs.items():
if key in valid_keys:
extra_specs_of_interest[key] = value
return extra_specs_of_interest
def _map_extra_specs(self, extra_specs):
"""Map the extra spec key/values to LeftHand key/values."""
client_options = {}
- for key, value in extra_specs.iteritems():
+ for key, value in extra_specs.items():
# map extra spec key to lh client option key
client_key = extra_specs_key_map[key]
# map extra spect value to lh client option value
# only set the ones that have changed
changed_extra_specs = {}
- for key, value in lh_extra_specs.iteritems():
+ for key, value in lh_extra_specs.items():
(old, new) = diff['extra_specs'][key]
if old != new:
changed_extra_specs[key] = value
else:
options_dict['username'] = 'guest'
named_options = ','.join("%s=%s" % (key, val) for (key, val)
- in options_dict.iteritems())
+ in options_dict.items())
options_list = ','.join(options_list)
flags = '-o ' + ','.join([named_options, options_list])
else:
kvs = specs
- for key, value in kvs.iteritems():
+ for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
used = 0
free = 0
agSize = 512 * units.Mi
- for (id, desc) in dl.iteritems():
+ for (id, desc) in dl.items():
if desc.generationLeft != -1:
continue
total += desc.agCount * agSize
templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication
if diff['extra_specs']:
- for (k, v) in diff['extra_specs'].iteritems():
+ for (k, v) in diff['extra_specs'].items():
if k == 'volume_backend_name':
if v[0] != v[1]:
# Retype of a volume backend not supported yet,
volume_type = volume_types.get_volume_type(ctxt, type_id)
extra_specs = volume_type.get('extra_specs')
# Parse out RAID, pool and affinity values
- for key, value in extra_specs.iteritems():
+ for key, value in extra_specs.items():
subkey = ''
if ':' in key:
fields = key.split(':')
else:
kvs = volume_type.get('extra_specs')
# Parse out min, max and burst values
- for key, value in kvs.iteritems():
+ for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if not metadata:
metadata = {}
- for (k, v) in metadata.iteritems():
+ for (k, v) in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warning(msg)
LOG.debug("Searching by: %s", search_opts)
def _check_specs_match(qos_specs, searchdict):
- for k, v in searchdict.iteritems():
+ for k, v in searchdict.items():
if ((k not in qos_specs['specs'].keys() or
qos_specs['specs'][k] != v)):
return False
filter_mapping = {'qos_specs': _check_specs_match}
result = {}
- for name, args in qos_specs.iteritems():
+ for name, args in qos_specs.items():
# go over all filters in the list
- for opt, values in search_opts.iteritems():
+ for opt, values in search_opts.items():
try:
filter_func = filter_mapping[opt]
except KeyError:
LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
- for k, v in searchdict.iteritems():
+ for k, v in searchdict.items():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
- for type_name, type_args in vol_types.iteritems():
+ for type_name, type_args in vol_types.items():
# go over all filters in the list
- for opt, values in search_opts.iteritems():
+ for opt, values in search_opts.items():
try:
filter_func = filter_mapping[opt]
except KeyError:
dict1 = {}
if dict2 is None:
dict2 = {}
- for k, v in dict1.iteritems():
+ for k, v in dict1.items():
res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]:
equal = False
- for k, v in dict2.iteritems():
+ for k, v in dict2.items():
res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]:
equal = False
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS') # noqa
- for (key, value) in resp.headers.iteritems():
+ for (key, value) in resp.headers.items():
print(key, '=', value) # noqa
print() # noqa