From 3fabed9d64696a204263fd7e00b65115a8ecce87 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Fri, 12 Jun 2015 15:21:09 +0200 Subject: [PATCH] Replace dit.itervalues() with dict.values() This change adds Python 3 compatibility to the modified code. The itervalues() method of Python 2 dictionaries was renamed to values() on Python 3. As discussed on the openstack-dev mailing list, itervalues() must be replaced with values(), six.itervalues() should not be used. In OpenStack, the overhead of creating a temporary list with dict.values() on Python 2 is negligible. Blueprint cinder-python3 Change-Id: Ibcf4597aeef1835f08bee2d6831205c428884c31 --- cinder/backup/manager.py | 2 +- cinder/db/sqlalchemy/api.py | 2 +- cinder/scheduler/host_manager.py | 2 +- cinder/tests/unit/test_ibm_flashsystem.py | 18 ++++---- cinder/tests/unit/test_storwize_svc.py | 42 +++++++++---------- cinder/volume/drivers/datera.py | 2 +- cinder/volume/drivers/emc/scaleio.py | 2 +- .../drivers/ibm/storwize_svc/__init__.py | 4 +- cinder/volume/throttling.py | 2 +- 9 files changed, 38 insertions(+), 38 deletions(-) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index 73756fb90..c360ccd17 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -189,7 +189,7 @@ class BackupManager(manager.SchedulerDependentManager): """ ctxt = context.get_admin_context() - for mgr in self.volume_managers.itervalues(): + for mgr in self.volume_managers.values(): self._init_volume_driver(ctxt, mgr.driver) LOG.info(_LI("Cleaning up incomplete backup operations.")) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index b3c00fe89..f0cd78e3a 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -3623,7 +3623,7 @@ def purge_deleted_rows(context, age_in_days): metadata.bind = engine tables = [] - for model_class in models.__dict__.itervalues(): + for model_class in models.__dict__.values(): if hasattr(model_class, "__tablename__") \ and hasattr(model_class, "deleted"): tables.append(model_class.__tablename__) diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 77faf1d2c..1e4a5765e 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -533,7 +533,7 @@ class HostManager(object): pool_key = '.'.join([host, pool.pool_name]) all_pools[pool_key] = pool - return all_pools.itervalues() + return all_pools.values() def get_pools(self, context): """Returns a dict of all pools on all hosts HostManager knows about.""" diff --git a/cinder/tests/unit/test_ibm_flashsystem.py b/cinder/tests/unit/test_ibm_flashsystem.py index 929e101c3..b829042a2 100644 --- a/cinder/tests/unit/test_ibm_flashsystem.py +++ b/cinder/tests/unit/test_ibm_flashsystem.py @@ -63,7 +63,7 @@ class FlashSystemManagementSimulator(object): @staticmethod def _find_unused_id(d): ids = [] - for v in d.itervalues(): + for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): @@ -148,7 +148,7 @@ class FlashSystemManagementSimulator(object): if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) - for host in self._hosts_list.itervalues(): + for host in self._hosts_list.values(): rows.append([host['id'], host['host_name'], '1', '1', 'degraded']) if len(rows) > 1: @@ -197,7 +197,7 @@ class FlashSystemManagementSimulator(object): rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) - for mapping in self._mappings_list.itervalues(): + for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], @@ -417,7 +417,7 @@ class FlashSystemManagementSimulator(object): 'vdisk_UID', 'IO_group_id', 'IO_group_name']) mappings_found = 0 - for mapping in self._mappings_list.itervalues(): + for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] @@ -502,7 +502,7 @@ class FlashSystemManagementSimulator(object): host_info[added_key].append(added_val) - for v in self._hosts_list.itervalues(): + for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: @@ -563,7 +563,7 @@ class FlashSystemManagementSimulator(object): if host_name not in self._hosts_list: return self._errors['CMMVC50000'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC50000'] @@ -593,12 +593,12 @@ class FlashSystemManagementSimulator(object): if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC50000'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC50000'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): return self._errors['CMMVC50000'] @@ -615,7 +615,7 @@ class FlashSystemManagementSimulator(object): vdisk = kwargs['obj'].strip('\'\"') mapping_ids = [] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if v['vol'] == vdisk: mapping_ids.append(v['id']) if not mapping_ids: diff --git a/cinder/tests/unit/test_storwize_svc.py b/cinder/tests/unit/test_storwize_svc.py index cd23c887a..32ec0ff5c 100644 --- a/cinder/tests/unit/test_storwize_svc.py +++ b/cinder/tests/unit/test_storwize_svc.py @@ -203,7 +203,7 @@ class StorwizeSVCManagementSimulator(object): @staticmethod def _find_unused_id(d): ids = [] - for v in d.itervalues(): + for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): @@ -534,9 +534,9 @@ port_speed!N/A target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None host_infos = [] - for hv in self._hosts_list.itervalues(): + for hv in self._hosts_list.values(): if (not host_name) or (hv['host_name'] == host_name): - for mv in self._mappings_list.itervalues(): + for mv in self._mappings_list.values(): if mv['host'] == hv['host_name']: if not target_wwpn or target_wwpn in hv['wwpns']: host_infos.append(hv) @@ -653,10 +653,10 @@ port_speed!N/A return self._errors['CMMVC5753E'] if not force: - for mapping in self._mappings_list.itervalues(): + for mapping in self._mappings_list.values(): if mapping['vol'] == vol_name: return self._errors['CMMVC5840E'] - for fcmap in self._fcmappings_list.itervalues(): + for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): return self._errors['CMMVC5840E'] @@ -688,7 +688,7 @@ port_speed!N/A 'fc_name': '', 'fc_map_count': '0', } - for fcmap in self._fcmappings_list.itervalues(): + for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): ret_vals['fc_id'] = fcmap['id'] @@ -705,7 +705,7 @@ port_speed!N/A 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', 'fast_write_state', 'se_copy_count', 'RC_change']) - for vol in self._volumes_list.itervalues(): + for vol in self._volumes_list.values(): if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == 'name=' + vol['name']) or (kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])): @@ -773,7 +773,7 @@ port_speed!N/A rows.append(['mirror_write_priority', 'latency']) rows.append(['RC_change', 'no']) - for copy in vol['copies'].itervalues(): + for copy in vol['copies'].values(): rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['primary', copy['primary']]) @@ -820,7 +820,7 @@ port_speed!N/A host_info[added_key].append(added_val) - for v in self._hosts_list.itervalues(): + for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: @@ -893,7 +893,7 @@ port_speed!N/A if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC5871E'] @@ -907,7 +907,7 @@ port_speed!N/A rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) found = False - for host in self._hosts_list.itervalues(): + for host in self._hosts_list.values(): filterstr = 'name=' + host['host_name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): @@ -959,7 +959,7 @@ port_speed!N/A rows.append(['type', 'id', 'name', 'iscsi_auth_method', 'iscsi_chap_secret']) - for host in self._hosts_list.itervalues(): + for host in self._hosts_list.values(): method = 'none' secret = '' if 'chapsecret' in host: @@ -995,12 +995,12 @@ port_speed!N/A if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC6071E'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC5879E'] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): return self._errors['CMMVC6071E'] @@ -1019,7 +1019,7 @@ port_speed!N/A vol = kwargs['obj'].strip('\'\"') mapping_ids = [] - for v in self._mappings_list.itervalues(): + for v in self._mappings_list.values(): if v['vol'] == vol: mapping_ids.append(v['id']) if not mapping_ids: @@ -1046,7 +1046,7 @@ port_speed!N/A rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) - for mapping in self._mappings_list.itervalues(): + for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], @@ -1067,7 +1067,7 @@ port_speed!N/A rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) - for mapping in self._mappings_list.itervalues(): + for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] @@ -1220,7 +1220,7 @@ port_speed!N/A vdisk = kwargs['obj'] rows = [] rows.append(['id', 'name']) - for v in self._fcmappings_list.itervalues(): + for v in self._fcmappings_list.values(): if v['source'] == vdisk or v['target'] == vdisk: rows.append([v['id'], v['name']]) return self._print_info_cmd(rows=rows, **kwargs) @@ -1369,7 +1369,7 @@ port_speed!N/A if 'obj' not in kwargs: rows.append(['id', 'name', 'status' 'start_time']) - for fcconsistgrp in self._fcconsistgrp_list.itervalues(): + for fcconsistgrp in self._fcconsistgrp_list.values(): rows.append([fcconsistgrp['id'], fcconsistgrp['name'], fcconsistgrp['status'], @@ -1482,7 +1482,7 @@ port_speed!N/A 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'se_copy', 'easy_tier', 'easy_tier_status', 'compressed_copy']) - for copy in vol['copies'].itervalues(): + for copy in vol['copies'].values(): rows.append([vol['id'], vol['name'], copy['id'], copy['status'], copy['sync'], copy['primary'], copy['mdisk_grp_id'], copy['mdisk_grp_name'], @@ -1612,7 +1612,7 @@ port_speed!N/A rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress', 'estimated_completion_time']) copy_found = False - for copy in vol['copies'].itervalues(): + for copy in vol['copies'].values(): if not copy_id or copy_id == copy['id']: copy_found = True row = [vol['id'], name, copy['id']] diff --git a/cinder/volume/drivers/datera.py b/cinder/volume/drivers/datera.py index 7d024060a..244f544b7 100644 --- a/cinder/volume/drivers/datera.py +++ b/cinder/volume/drivers/datera.py @@ -182,7 +182,7 @@ class DateraDriver(san.SanISCSIDriver): # NOTE(thingee): Refer to the Datera test for a stub of what this # looks like. We're just going to pull the first IP that the Datera # cluster makes available for the portal. - iqn = next(export['targetIds'].itervalues())['ids'][0]['id'] + iqn = next(export['targetIds'].values())['ids'][0]['id'] else: export = self._issue_api_request( 'export_configs', diff --git a/cinder/volume/drivers/emc/scaleio.py b/cinder/volume/drivers/emc/scaleio.py index 23b3c5a23..b8f071add 100644 --- a/cinder/volume/drivers/emc/scaleio.py +++ b/cinder/volume/drivers/emc/scaleio.py @@ -835,7 +835,7 @@ class ScaleIODriver(driver.VolumeDriver): verify=verify_cert) response = r.json() LOG.info(_LI("Query capacity stats response: %s."), response) - for res in response.itervalues(): + for res in response.values(): capacityInUse = res['capacityInUseInKb'] capacityLimit = res['capacityLimitInKb'] total_capacity_gb = capacityLimit / units.Mi diff --git a/cinder/volume/drivers/ibm/storwize_svc/__init__.py b/cinder/volume/drivers/ibm/storwize_svc/__init__.py index 5bd324f3a..ff14e7ee1 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/__init__.py +++ b/cinder/volume/drivers/ibm/storwize_svc/__init__.py @@ -394,7 +394,7 @@ class StorwizeSVCDriver(san.SanDriver): # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] - for node in self._state['storage_nodes'].itervalues(): + for node in self._state['storage_nodes'].values(): if vol_opts['protocol'] not in node['enabled_protocols']: continue if node['id'] == preferred_node: @@ -454,7 +454,7 @@ class StorwizeSVCDriver(san.SanDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: - for node in self._state['storage_nodes'].itervalues(): + for node in self._state['storage_nodes'].values(): conn_wwpns.extend(node['WWPN']) if not vol_opts['multipath']: diff --git a/cinder/volume/throttling.py b/cinder/volume/throttling.py index 860aba99b..95a8c17d6 100644 --- a/cinder/volume/throttling.py +++ b/cinder/volume/throttling.py @@ -87,7 +87,7 @@ class BlkioCgroup(Throttle): 'device \'%(device)s\'.'), {'device': dev}) def _set_limits(self, rw, devs): - total = sum(devs.itervalues()) + total = sum(devs.values()) for dev in devs: self._limit_bps(rw, dev, self.bps_limit * devs[dev] / total) -- 2.45.2