From d499f57d25cdb0c835746ed88cbdcfbf3c1757e3 Mon Sep 17 00:00:00 2001 From: Mike Mason Date: Fri, 17 Oct 2014 10:46:26 +0000 Subject: [PATCH] Amend unused variables to assist pylint testing Amedning unused variables with a prefix of an underscore to prevent them being picked up in pylint testing, consistency, and for general housekeeping. Change to pylintrc also required to enforce the rule. Closes-bug #1268062 Change-Id: I80c2cbdc52d6f37823fae90d0096836166412643 --- cinder/backup/drivers/swift.py | 4 +- cinder/brick/initiator/linuxfc.py | 6 +-- cinder/brick/initiator/linuxscsi.py | 10 ++--- cinder/brick/iscsi/iscsi.py | 12 ++--- cinder/brick/local_dev/lvm.py | 32 +++++++------- cinder/brick/remotefs/remotefs.py | 2 +- cinder/common/config.py | 2 +- cinder/db/sqlalchemy/api.py | 9 ++-- cinder/image/glance.py | 4 +- cinder/image/image_utils.py | 4 +- cinder/tests/api/openstack/test_wsgi.py | 19 ++++---- cinder/tests/brick/test_brick_connector.py | 4 +- cinder/tests/image/test_glance.py | 4 +- cinder/tests/test_backup_ceph.py | 8 ++-- cinder/tests/test_backup_swift.py | 2 +- cinder/tests/test_glusterfs.py | 6 +-- cinder/tests/test_gpfs.py | 6 +-- cinder/tests/test_migrations.py | 44 +++++++++---------- cinder/tests/test_quota.py | 2 +- cinder/tests/test_storwize_svc.py | 30 ++++++------- cinder/tests/test_zadara.py | 6 +-- cinder/volume/drivers/block_device.py | 4 +- cinder/volume/drivers/hds/hds.py | 2 +- cinder/volume/drivers/netapp/iscsi.py | 16 +++---- cinder/volume/drivers/netapp/nfs.py | 7 +-- cinder/volume/drivers/nfs.py | 2 +- cinder/volume/drivers/rbd.py | 16 +++---- .../volume/drivers/san/hp/hp_3par_common.py | 6 +-- cinder/volume/drivers/san/hp/hp_3par_fc.py | 4 +- cinder/volume/drivers/sheepdog.py | 2 +- cinder/volume/drivers/solidfire.py | 6 +-- .../volume/drivers/vmware/read_write_util.py | 4 +- cinder/volume/drivers/vmware/vmdk.py | 10 ++--- cinder/volume/drivers/zadara.py | 2 +- pylintrc | 4 ++ 35 files changed, 153 insertions(+), 148 deletions(-) diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index 1c1035ef2..fdee8c88b 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -248,7 +248,7 @@ class SwiftBackupDriver(BackupDriver): LOG.debug('_read_metadata started, container name: %(container)s, ' 'metadata filename: %(filename)s' % {'container': container, 'filename': filename}) - (resp, body) = self.conn.get_object(container, filename) + (_resp, body) = self.conn.get_object(container, filename) metadata = json.loads(body) LOG.debug('_read_metadata finished (%s)' % metadata) return metadata @@ -428,7 +428,7 @@ class SwiftBackupDriver(BackupDriver): 'volume_id': volume_id, }) try: - (resp, body) = self.conn.get_object(container, object_name) + (_resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) compression_algorithm = metadata_object[object_name]['compression'] diff --git a/cinder/brick/initiator/linuxfc.py b/cinder/brick/initiator/linuxfc.py index 391f747b6..edd9e7c52 100644 --- a/cinder/brick/initiator/linuxfc.py +++ b/cinder/brick/initiator/linuxfc.py @@ -39,9 +39,9 @@ class LinuxFibreChannel(linuxscsi.LinuxSCSI): """Get the Fibre Channel HBA information.""" out = None try: - out, err = self._execute('systool', '-c', 'fc_host', '-v', - run_as_root=True, - root_helper=self._root_helper) + out, _err = self._execute('systool', '-c', 'fc_host', '-v', + run_as_root=True, + root_helper=self._root_helper) except putils.ProcessExecutionError as exc: # This handles the case where rootwrap is used # and systool is not installed diff --git a/cinder/brick/initiator/linuxscsi.py b/cinder/brick/initiator/linuxscsi.py index 1b3b87390..9ee74ae3f 100644 --- a/cinder/brick/initiator/linuxscsi.py +++ b/cinder/brick/initiator/linuxscsi.py @@ -65,8 +65,8 @@ class LinuxSCSI(executor.Executor): self.echo_scsi_command(path, "1") def get_device_info(self, device): - (out, err) = self._execute('sg_scan', device, run_as_root=True, - root_helper=self._root_helper) + (out, _err) = self._execute('sg_scan', device, run_as_root=True, + root_helper=self._root_helper) dev_info = {'device': device, 'host': None, 'channel': None, 'id': None, 'lun': None} if out: @@ -135,9 +135,9 @@ class LinuxSCSI(executor.Executor): devices = [] out = None try: - (out, err) = self._execute('multipath', '-l', device, - run_as_root=True, - root_helper=self._root_helper) + (out, _err) = self._execute('multipath', '-l', device, + run_as_root=True, + root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warn(_("multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) diff --git a/cinder/brick/iscsi/iscsi.py b/cinder/brick/iscsi/iscsi.py index e94a3438e..4703be835 100644 --- a/cinder/brick/iscsi/iscsi.py +++ b/cinder/brick/iscsi/iscsi.py @@ -104,7 +104,7 @@ class TgtAdm(TargetAdmin): self.volumes_dir = volumes_dir def _get_target(self, iqn): - (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: @@ -119,7 +119,7 @@ class TgtAdm(TargetAdmin): capture = False target_info = [] - (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: @@ -478,9 +478,9 @@ class LioAdm(TargetAdmin): raise def _get_target(self, iqn): - (out, err) = self._execute('cinder-rtstool', - 'get-targets', - run_as_root=True) + (out, _err) = self._execute('cinder-rtstool', + 'get-targets', + run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: @@ -561,7 +561,7 @@ class LioAdm(TargetAdmin): def initialize_connection(self, volume, connector): volume_iqn = volume['provider_location'].split(' ')[1] - (auth_method, auth_user, auth_pass) = \ + (_auth_method, auth_user, auth_pass) = \ volume['provider_auth'].split(' ', 3) # Add initiator iqns to target ACL diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index 04174c2f0..205c2db03 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -101,7 +101,7 @@ class LVM(executor.Executor): """ exists = False - (out, err) = self._execute( + (out, _err) = self._execute( 'env', 'LC_ALL=C', 'vgs', '--noheadings', '-o', 'name', self.vg_name, root_helper=self._root_helper, run_as_root=True) @@ -117,8 +117,8 @@ class LVM(executor.Executor): self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) def _get_vg_uuid(self): - (out, err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings', - '-o uuid', self.vg_name) + (out, _err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings', + '-o uuid', self.vg_name) if out is not None: return out.split() else: @@ -171,9 +171,9 @@ class LVM(executor.Executor): """ cmd = ['env', 'LC_ALL=C', 'vgs', '--version'] - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) lines = out.split('\n') for line in lines: @@ -249,9 +249,9 @@ class LVM(executor.Executor): cmd.append(vg_name) lvs_start = time.time() - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) total_time = time.time() - lvs_start if total_time > 60: LOG.warning(_('Took %s seconds to get logical volumes.'), @@ -300,9 +300,9 @@ class LVM(executor.Executor): '--separator', ':', '--nosuffix'] - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) pvs = out.split() if vg_name is not None: @@ -344,9 +344,9 @@ class LVM(executor.Executor): cmd.append(vg_name) start_vgs = time.time() - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) total_time = time.time() - start_vgs if total_time > 60: LOG.warning(_('Took %s seconds to get volume groups.'), total_time) @@ -618,7 +618,7 @@ class LVM(executor.Executor): run_as_root=True) def lv_has_snapshot(self, name): - out, err = self._execute( + out, _err = self._execute( 'env', 'LC_ALL=C', 'lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) diff --git a/cinder/brick/remotefs/remotefs.py b/cinder/brick/remotefs/remotefs.py index b20ebb96a..ce60e2c4d 100644 --- a/cinder/brick/remotefs/remotefs.py +++ b/cinder/brick/remotefs/remotefs.py @@ -77,7 +77,7 @@ class RemoteFsClient(object): self._get_hash_str(device_name)) def _read_mounts(self): - (out, err) = self._execute('mount', check_exit_code=0) + (out, _err) = self._execute('mount', check_exit_code=0) lines = out.split('\n') mounts = {} for line in lines: diff --git a/cinder/common/config.py b/cinder/common/config.py index 0327712b4..a78640d20 100644 --- a/cinder/common/config.py +++ b/cinder/common/config.py @@ -47,7 +47,7 @@ def _get_my_ip(): try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() + (addr, _port) = csock.getsockname() csock.close() return addr except socket.error: diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index ff3ed181f..da249da26 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -252,7 +252,7 @@ def model_query(context, *args, **kwargs): def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): - (volumes, gigs) = _volume_data_get_for_project( + (volumes, _gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: @@ -262,7 +262,7 @@ def _sync_volumes(context, project_id, session, volume_type_id=None, def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): - (snapshots, gigs) = _snapshot_data_get_for_project( + (snapshots, _gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: @@ -272,7 +272,7 @@ def _sync_snapshots(context, project_id, session, volume_type_id=None, def _sync_backups(context, project_id, session, volume_type_id=None, volume_type_name=None): - (backups, gigs) = _backup_data_get_for_project( + (backups, _gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'backups' return {key: backups} @@ -3026,7 +3026,8 @@ def consistencygroup_create(context, values): def consistencygroup_update(context, consistencygroup_id, values): session = get_session() with session.begin(): - result = model_query(context, models.ConsistencyGroup, project_only=True).\ + result = model_query(context, models.ConsistencyGroup, + project_only=True).\ filter_by(id=consistencygroup_id).\ first() diff --git a/cinder/image/glance.py b/cinder/image/glance.py index 19876ed19..76a4788fd 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -466,14 +466,14 @@ def _remove_read_only(image_meta): def _reraise_translated_image_exception(image_id): """Transform the exception for the image but keep its traceback intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() + _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_image_exception(image_id, exc_value) raise new_exc, None, exc_trace def _reraise_translated_exception(): """Transform the exception but keep its traceback intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() + _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_plain_exception(exc_value) raise new_exc, None, exc_trace diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index a3bea1a22..3d280a3ca 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -57,7 +57,7 @@ def qemu_img_info(path, run_as_root=True): cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path) if os.name == 'nt': cmd = cmd[2:] - out, err = utils.execute(*cmd, run_as_root=run_as_root) + out, _err = utils.execute(*cmd, run_as_root=run_as_root) return imageutils.QemuImgInfo(out) @@ -363,7 +363,7 @@ def fix_vhd_chain(vhd_chain): def get_vhd_size(vhd_path): - out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') + out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') return int(out) diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py index 254306439..67e57e717 100644 --- a/cinder/tests/api/openstack/test_wsgi.py +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -336,7 +336,7 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'index', None, '') + method, _extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(actual, expected) @@ -359,9 +359,9 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/json', - '{"fooAction": true}') + method, _extensions = resource.get_method(None, 'action', + 'application/json', + '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_xml(self): @@ -372,9 +372,8 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/xml', - 'true') + method, _extensions = resource.get_method( + None, 'action', 'application/xml', 'true') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): @@ -407,9 +406,9 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/xml', - 'truetruepool-00000001 """ - for (vol_name, params) in RUNTIME_VARS['volumes']: + for (_vol_name, params) in RUNTIME_VARS['volumes']: if params['cg-name'] == cg_name: snapshots = params['snapshots'] resp = header diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py index 35e16bfbf..95f5b4fc9 100644 --- a/cinder/volume/drivers/block_device.py +++ b/cinder/volume/drivers/block_device.py @@ -190,8 +190,8 @@ class BlockDeviceDriver(driver.ISCSIDriver): return used_devices def _get_device_size(self, dev_path): - out, err = self._execute('blockdev', '--getsz', dev_path, - run_as_root=True) + out, _err = self._execute('blockdev', '--getsz', dev_path, + run_as_root=True) size_in_m = int(out) return size_in_m / 2048 diff --git a/cinder/volume/drivers/hds/hds.py b/cinder/volume/drivers/hds/hds.py index f08498f54..0e9c3b5df 100644 --- a/cinder/volume/drivers/hds/hds.py +++ b/cinder/volume/drivers/hds/hds.py @@ -385,7 +385,7 @@ class HUSDriver(driver.ISCSIDriver): info = _loc_info(prov_loc) (arid, lun) = info['id_lu'] if 'tgt' in info.keys(): # connected? - (_portal, iqn, loc, ctl, port) = info['tgt'] + (_portal, iqn, _loc, ctl, port) = info['tgt'] self.bend.del_iscsi_conn(self.config['hus_cmd'], HDS_VERSION, self.config['mgmt_ip0'], diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py index c518e4e4b..80b4e0a9c 100644 --- a/cinder/volume/drivers/netapp/iscsi.py +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -422,7 +422,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): for lun in api_luns: meta_dict = self._create_lun_meta(lun) path = lun.get_child_content('path') - (rest, splitter, name) = path.rpartition('/') + (_rest, _splitter, name) = path.rpartition('/') handle = self._create_lun_handle(meta_dict) size = lun.get_child_content('size') discovered_lun = NetAppLun(handle, name, @@ -460,7 +460,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): msg_fmt = {'code': code, 'message': message} exc_info = sys.exc_info() LOG.warn(msg % msg_fmt) - (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) + (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) if lun_id is not None: return lun_id else: @@ -468,7 +468,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): def _unmap_lun(self, path, initiator): """Unmaps a lun from given initiator.""" - (igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator) + (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator) lun_unmap = NaElement.create_node_with_children( 'lun-unmap', **{'path': path, 'initiator-group': igroup_name}) @@ -988,7 +988,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): zbc = block_count if z_calls == 0: z_calls = 1 - for call in range(0, z_calls): + for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit @@ -1003,7 +1003,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): block_ranges = NaElement("block-ranges") segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count - for segment in range(0, segments): + for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit @@ -1353,7 +1353,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): """Clone LUN with the given handle to the new name.""" metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] - (parent, splitter, name) = path.rpartition('/') + (parent, _splitter, name) = path.rpartition('/') clone_path = '%s/%s' % (parent, new_name) # zAPI can only handle 2^24 blocks per range bc_limit = 2 ** 24 # 8GB @@ -1364,7 +1364,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): zbc = block_count if z_calls == 0: z_calls = 1 - for call in range(0, z_calls): + for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit @@ -1380,7 +1380,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): bc_limit = 2 ** 24 # 8GB segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count - for segment in range(0, segments): + for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py index 4e6131e6d..4140e49fc 100644 --- a/cinder/volume/drivers/netapp/nfs.py +++ b/cinder/volume/drivers/netapp/nfs.py @@ -300,7 +300,7 @@ class NetAppNFSDriver(nfs.NfsDriver): self.configuration.thres_avl_size_perc_stop for share in getattr(self, '_mounted_shares', []): try: - total_size, total_avl, total_alc =\ + total_size, total_avl, _total_alc =\ self._get_capacity_info(share) avl_percent = int((total_avl / total_size) * 100) if avl_percent <= thres_size_perc_start: @@ -636,7 +636,8 @@ class NetAppNFSDriver(nfs.NfsDriver): def _check_share_can_hold_size(self, share, size): """Checks if volume can hold image with size.""" - tot_size, tot_available, tot_allocated = self._get_capacity_info(share) + _tot_size, tot_available, _tot_allocated = self._get_capacity_info( + share) if tot_available < size: msg = _("Container size smaller than required file size.") raise exception.VolumeDriverException(msg) @@ -1415,7 +1416,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): def _clone_volume(self, volume_name, clone_name, volume_id, share=None): """Clones mounted volume with NetApp filer.""" - (host_ip, export_path) = self._get_export_ip_path(volume_id, share) + (_host_ip, export_path) = self._get_export_ip_path(volume_id, share) storage_path = self._get_actual_path_for_export(export_path) target_path = '%s/%s' % (storage_path, clone_name) (clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path, diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index cc993e277..d2aef63cc 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -168,7 +168,7 @@ class NfsDriver(remotefs.RemoteFSDriver): for nfs_share in self._mounted_shares: if not self._is_share_eligible(nfs_share, volume_size_in_gib): continue - total_size, total_available, total_allocated = \ + _total_size, _total_available, total_allocated = \ self._get_capacity_info(nfs_share) if target_share is not None: if target_share_reserved > total_allocated: diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index a55c1f666..2da6499f0 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -386,8 +386,8 @@ class RBDDriver(driver.VolumeDriver): """ parent_volume = self.rbd.Image(client.ioctx, volume_name) try: - pool, parent, snap = self._get_clone_info(parent_volume, - volume_name) + _pool, parent, _snap = self._get_clone_info(parent_volume, + volume_name) finally: parent_volume.close() @@ -440,8 +440,8 @@ class RBDDriver(driver.VolumeDriver): try: # First flatten source volume if required. if flatten_parent: - pool, parent, snap = self._get_clone_info(src_volume, - src_name) + _pool, parent, snap = self._get_clone_info(src_volume, + src_name) # Flatten source volume LOG.debug("flattening source volume %s" % (src_name)) src_volume.flatten() @@ -639,9 +639,9 @@ class RBDDriver(driver.VolumeDriver): raise exception.VolumeIsBusy(volume_name=volume_name) # Determine if this volume is itself a clone - pool, parent, parent_snap = self._get_clone_info(rbd_image, - volume_name, - clone_snap) + _pool, parent, parent_snap = self._get_clone_info(rbd_image, + volume_name, + clone_snap) finally: rbd_image.close() @@ -780,7 +780,7 @@ class RBDDriver(driver.VolumeDriver): if image_location is None or not self._is_cloneable( image_location, image_meta): return ({}, False) - prefix, pool, image, snapshot = self._parse_location(image_location) + _prefix, pool, image, snapshot = self._parse_location(image_location) self._clone(volume, pool, image, snapshot) self._resize(volume) return {'provider_location': None}, True diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py index b93e204fd..c3388d246 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_common.py +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -1274,7 +1274,7 @@ class HP3PARCommon(object): type_id = volume.get('volume_type_id', None) - hp3par_keys, qos, volume_type, vvs_name = self.get_type_info( + hp3par_keys, qos, _volume_type, vvs_name = self.get_type_info( type_id) name = volume.get('display_name', None) @@ -1633,7 +1633,7 @@ class HP3PARCommon(object): " to %(new_cpg)s") % {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) - response, body = self.client.modifyVolume( + _response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, @@ -1696,7 +1696,7 @@ class HP3PARCommon(object): self.validate_persona(new_persona) if host is not None: - (host_type, host_id, host_cpg) = ( + (host_type, host_id, _host_cpg) = ( host['capabilities']['location_info']).split(':') if not (host_type == 'HP3PARDriver'): diff --git a/cinder/volume/drivers/san/hp/hp_3par_fc.py b/cinder/volume/drivers/san/hp/hp_3par_fc.py index 1c5adadac..7d289604a 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_fc.py +++ b/cinder/volume/drivers/san/hp/hp_3par_fc.py @@ -258,7 +258,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): LOG.info(_("Need to remove FC Zone, building initiator " "target map")) - target_wwns, init_targ_map, numPaths = \ + target_wwns, init_targ_map, _numPaths = \ self._build_initiator_target_map(connector) info['data'] = {'target_wwn': target_wwns, @@ -296,7 +296,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) - for target in init_targ_map[initiator]: + for _target in init_targ_map[initiator]: numPaths += 1 target_wwns = list(set(target_wwns)) else: diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py index e05796ee5..2af9c4ccf 100644 --- a/cinder/volume/drivers/sheepdog.py +++ b/cinder/volume/drivers/sheepdog.py @@ -55,7 +55,7 @@ class SheepdogDriver(driver.VolumeDriver): #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' # gives short output, but for compatibility reason we won't # use it and just check if 'running' is in the output. - (out, err) = self._execute('collie', 'cluster', 'info') + (out, _err) = self._execute('collie', 'cluster', 'info') if 'status: running' not in out: exception_message = (_("Sheepdog is not working: %s") % out) raise exception.VolumeBackendAPIException( diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index be56afb7c..aa8abbfa6 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -532,7 +532,7 @@ class SolidFireDriver(SanISCSIDriver): def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], src_vref['project_id'], volume) @@ -605,14 +605,14 @@ class SolidFireDriver(SanISCSIDriver): restore at which time we'll rework this appropriately. """ - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, _model) = self._do_clone_volume( snapshot['volume_id'], snapshot['project_id'], snapshot) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], snapshot['project_id'], volume) diff --git a/cinder/volume/drivers/vmware/read_write_util.py b/cinder/volume/drivers/vmware/read_write_util.py index a43489b67..718d1914c 100644 --- a/cinder/volume/drivers/vmware/read_write_util.py +++ b/cinder/volume/drivers/vmware/read_write_util.py @@ -148,7 +148,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile): param_list = {'dcPath': data_center_name, 'dsName': datastore_name} base_url = base_url + '?' + urllib.urlencode(param_list) _urlparse = urlparse.urlparse(base_url) - scheme, netloc, path, params, query, fragment = _urlparse + scheme, netloc, path, _params, query, _fragment = _urlparse if scheme == 'http': conn = httplib.HTTPConnection(netloc) elif scheme == 'https': @@ -211,7 +211,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile): # Prepare the http connection to the vmdk url cookies = session.vim.client.options.transport.cookiejar _urlparse = urlparse.urlparse(url) - scheme, netloc, path, params, query, fragment = _urlparse + scheme, netloc, path, _params, query, _fragment = _urlparse if scheme == 'http': conn = httplib.HTTPConnection(netloc) elif scheme == 'https': diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index a5c77efbe..50f92080c 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -1127,7 +1127,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): if disk_conversion: # Clone the temporary backing for disk type conversion. - (host, rp, folder, summary) = self._select_ds_for_volume( + (host, _rp, _folder, summary) = self._select_ds_for_volume( volume) datastore = summary.datastore LOG.debug("Cloning temporary backing: %s for disk type " @@ -1163,7 +1163,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): """ try: # find host in which to create the volume - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, rp, folder, summary) = self._select_ds_for_volume(volume) except error_util.VimException as excep: err_msg = (_("Exception in _select_ds_for_volume: " "%s."), excep) @@ -1646,7 +1646,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): {'name': name, 'path': tmp_file_path}) - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, rp, folder, summary) = self._select_ds_for_volume(volume) LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.", {'ds': summary.name, 'name': name}) @@ -1708,7 +1708,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): renamed = False try: # Find datastore for clone. - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) @@ -1981,7 +1981,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): datastore = None if not clone_type == volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py index c2385c205..4a10ea75b 100644 --- a/cinder/volume/drivers/zadara.py +++ b/cinder/volume/drivers/zadara.py @@ -323,7 +323,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): def _get_vpsa_volume_name(self, name): """Return VPSA's name for the volume.""" - (vol_name, size) = self._get_vpsa_volume_name_and_size(name) + (vol_name, _size) = self._get_vpsa_volume_name_and_size(name) return vol_name def _get_volume_cg_name(self, name): diff --git a/pylintrc b/pylintrc index a7021ded5..b8ffa2ab3 100644 --- a/pylintrc +++ b/pylintrc @@ -29,3 +29,7 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ max-public-methods=100 min-public-methods=0 max-args=6 + +[Variables] + +dummy-variables-rgx=_ -- 2.45.2