From: Mike Mason <mikemason010@gmail.com> Date: Fri, 17 Oct 2014 10:46:26 +0000 (+0000) Subject: Amend unused variables to assist pylint testing X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=d499f57d25cdb0c835746ed88cbdcfbf3c1757e3;p=openstack-build%2Fcinder-build.git Amend unused variables to assist pylint testing Amedning unused variables with a prefix of an underscore to prevent them being picked up in pylint testing, consistency, and for general housekeeping. Change to pylintrc also required to enforce the rule. Closes-bug #1268062 Change-Id: I80c2cbdc52d6f37823fae90d0096836166412643 --- diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index 1c1035ef2..fdee8c88b 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -248,7 +248,7 @@ class SwiftBackupDriver(BackupDriver): LOG.debug('_read_metadata started, container name: %(container)s, ' 'metadata filename: %(filename)s' % {'container': container, 'filename': filename}) - (resp, body) = self.conn.get_object(container, filename) + (_resp, body) = self.conn.get_object(container, filename) metadata = json.loads(body) LOG.debug('_read_metadata finished (%s)' % metadata) return metadata @@ -428,7 +428,7 @@ class SwiftBackupDriver(BackupDriver): 'volume_id': volume_id, }) try: - (resp, body) = self.conn.get_object(container, object_name) + (_resp, body) = self.conn.get_object(container, object_name) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) compression_algorithm = metadata_object[object_name]['compression'] diff --git a/cinder/brick/initiator/linuxfc.py b/cinder/brick/initiator/linuxfc.py index 391f747b6..edd9e7c52 100644 --- a/cinder/brick/initiator/linuxfc.py +++ b/cinder/brick/initiator/linuxfc.py @@ -39,9 +39,9 @@ class LinuxFibreChannel(linuxscsi.LinuxSCSI): """Get the Fibre Channel HBA information.""" out = None try: - out, err = self._execute('systool', '-c', 'fc_host', '-v', - run_as_root=True, - root_helper=self._root_helper) + out, _err = self._execute('systool', '-c', 'fc_host', '-v', + run_as_root=True, + root_helper=self._root_helper) except putils.ProcessExecutionError as exc: # This handles the case where rootwrap is used # and systool is not installed diff --git a/cinder/brick/initiator/linuxscsi.py b/cinder/brick/initiator/linuxscsi.py index 1b3b87390..9ee74ae3f 100644 --- a/cinder/brick/initiator/linuxscsi.py +++ b/cinder/brick/initiator/linuxscsi.py @@ -65,8 +65,8 @@ class LinuxSCSI(executor.Executor): self.echo_scsi_command(path, "1") def get_device_info(self, device): - (out, err) = self._execute('sg_scan', device, run_as_root=True, - root_helper=self._root_helper) + (out, _err) = self._execute('sg_scan', device, run_as_root=True, + root_helper=self._root_helper) dev_info = {'device': device, 'host': None, 'channel': None, 'id': None, 'lun': None} if out: @@ -135,9 +135,9 @@ class LinuxSCSI(executor.Executor): devices = [] out = None try: - (out, err) = self._execute('multipath', '-l', device, - run_as_root=True, - root_helper=self._root_helper) + (out, _err) = self._execute('multipath', '-l', device, + run_as_root=True, + root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warn(_("multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) diff --git a/cinder/brick/iscsi/iscsi.py b/cinder/brick/iscsi/iscsi.py index e94a3438e..4703be835 100644 --- a/cinder/brick/iscsi/iscsi.py +++ b/cinder/brick/iscsi/iscsi.py @@ -104,7 +104,7 @@ class TgtAdm(TargetAdmin): self.volumes_dir = volumes_dir def _get_target(self, iqn): - (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: @@ -119,7 +119,7 @@ class TgtAdm(TargetAdmin): capture = False target_info = [] - (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: @@ -478,9 +478,9 @@ class LioAdm(TargetAdmin): raise def _get_target(self, iqn): - (out, err) = self._execute('cinder-rtstool', - 'get-targets', - run_as_root=True) + (out, _err) = self._execute('cinder-rtstool', + 'get-targets', + run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: @@ -561,7 +561,7 @@ class LioAdm(TargetAdmin): def initialize_connection(self, volume, connector): volume_iqn = volume['provider_location'].split(' ')[1] - (auth_method, auth_user, auth_pass) = \ + (_auth_method, auth_user, auth_pass) = \ volume['provider_auth'].split(' ', 3) # Add initiator iqns to target ACL diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index 04174c2f0..205c2db03 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -101,7 +101,7 @@ class LVM(executor.Executor): """ exists = False - (out, err) = self._execute( + (out, _err) = self._execute( 'env', 'LC_ALL=C', 'vgs', '--noheadings', '-o', 'name', self.vg_name, root_helper=self._root_helper, run_as_root=True) @@ -117,8 +117,8 @@ class LVM(executor.Executor): self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) def _get_vg_uuid(self): - (out, err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings', - '-o uuid', self.vg_name) + (out, _err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings', + '-o uuid', self.vg_name) if out is not None: return out.split() else: @@ -171,9 +171,9 @@ class LVM(executor.Executor): """ cmd = ['env', 'LC_ALL=C', 'vgs', '--version'] - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) lines = out.split('\n') for line in lines: @@ -249,9 +249,9 @@ class LVM(executor.Executor): cmd.append(vg_name) lvs_start = time.time() - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) total_time = time.time() - lvs_start if total_time > 60: LOG.warning(_('Took %s seconds to get logical volumes.'), @@ -300,9 +300,9 @@ class LVM(executor.Executor): '--separator', ':', '--nosuffix'] - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) pvs = out.split() if vg_name is not None: @@ -344,9 +344,9 @@ class LVM(executor.Executor): cmd.append(vg_name) start_vgs = time.time() - (out, err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) + (out, _err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) total_time = time.time() - start_vgs if total_time > 60: LOG.warning(_('Took %s seconds to get volume groups.'), total_time) @@ -618,7 +618,7 @@ class LVM(executor.Executor): run_as_root=True) def lv_has_snapshot(self, name): - out, err = self._execute( + out, _err = self._execute( 'env', 'LC_ALL=C', 'lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) diff --git a/cinder/brick/remotefs/remotefs.py b/cinder/brick/remotefs/remotefs.py index b20ebb96a..ce60e2c4d 100644 --- a/cinder/brick/remotefs/remotefs.py +++ b/cinder/brick/remotefs/remotefs.py @@ -77,7 +77,7 @@ class RemoteFsClient(object): self._get_hash_str(device_name)) def _read_mounts(self): - (out, err) = self._execute('mount', check_exit_code=0) + (out, _err) = self._execute('mount', check_exit_code=0) lines = out.split('\n') mounts = {} for line in lines: diff --git a/cinder/common/config.py b/cinder/common/config.py index 0327712b4..a78640d20 100644 --- a/cinder/common/config.py +++ b/cinder/common/config.py @@ -47,7 +47,7 @@ def _get_my_ip(): try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() + (addr, _port) = csock.getsockname() csock.close() return addr except socket.error: diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index ff3ed181f..da249da26 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -252,7 +252,7 @@ def model_query(context, *args, **kwargs): def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): - (volumes, gigs) = _volume_data_get_for_project( + (volumes, _gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: @@ -262,7 +262,7 @@ def _sync_volumes(context, project_id, session, volume_type_id=None, def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): - (snapshots, gigs) = _snapshot_data_get_for_project( + (snapshots, _gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: @@ -272,7 +272,7 @@ def _sync_snapshots(context, project_id, session, volume_type_id=None, def _sync_backups(context, project_id, session, volume_type_id=None, volume_type_name=None): - (backups, gigs) = _backup_data_get_for_project( + (backups, _gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'backups' return {key: backups} @@ -3026,7 +3026,8 @@ def consistencygroup_create(context, values): def consistencygroup_update(context, consistencygroup_id, values): session = get_session() with session.begin(): - result = model_query(context, models.ConsistencyGroup, project_only=True).\ + result = model_query(context, models.ConsistencyGroup, + project_only=True).\ filter_by(id=consistencygroup_id).\ first() diff --git a/cinder/image/glance.py b/cinder/image/glance.py index 19876ed19..76a4788fd 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -466,14 +466,14 @@ def _remove_read_only(image_meta): def _reraise_translated_image_exception(image_id): """Transform the exception for the image but keep its traceback intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() + _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_image_exception(image_id, exc_value) raise new_exc, None, exc_trace def _reraise_translated_exception(): """Transform the exception but keep its traceback intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() + _exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_plain_exception(exc_value) raise new_exc, None, exc_trace diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index a3bea1a22..3d280a3ca 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -57,7 +57,7 @@ def qemu_img_info(path, run_as_root=True): cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path) if os.name == 'nt': cmd = cmd[2:] - out, err = utils.execute(*cmd, run_as_root=run_as_root) + out, _err = utils.execute(*cmd, run_as_root=run_as_root) return imageutils.QemuImgInfo(out) @@ -363,7 +363,7 @@ def fix_vhd_chain(vhd_chain): def get_vhd_size(vhd_path): - out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') + out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') return int(out) diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py index 254306439..67e57e717 100644 --- a/cinder/tests/api/openstack/test_wsgi.py +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -336,7 +336,7 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'index', None, '') + method, _extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(actual, expected) @@ -359,9 +359,9 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/json', - '{"fooAction": true}') + method, _extensions = resource.get_method(None, 'action', + 'application/json', + '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_xml(self): @@ -372,9 +372,8 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/xml', - '<fooAction>true</fooAction>') + method, _extensions = resource.get_method( + None, 'action', 'application/xml', '<fooAction>true</fooAction>') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): @@ -407,9 +406,9 @@ class ResourceTest(test.TestCase): controller = Controller() resource = wsgi.Resource(controller) - method, extensions = resource.get_method(None, 'action', - 'application/xml', - '<fooAction>true</fooAction') + method, _extensions = resource.get_method(None, 'action', + 'application/xml', + '<fooAction>true</fooAction') self.assertEqual(controller.action, method) def test_get_action_args(self): diff --git a/cinder/tests/brick/test_brick_connector.py b/cinder/tests/brick/test_brick_connector.py index db87184a7..5f5978b99 100644 --- a/cinder/tests/brick/test_brick_connector.py +++ b/cinder/tests/brick/test_brick_connector.py @@ -553,14 +553,14 @@ class AoEConnectorTestCase(ConnectorTestCase): self.assertDictMatch(volume_info, expected_info) def test_connect_volume_could_not_discover_path(self): - aoe_device, aoe_path = self.connector._get_aoe_info( + _aoe_device, aoe_path = self.connector._get_aoe_info( self.connection_properties) number_of_calls = 4 self._mock_path_exists(aoe_path, [False] * (number_of_calls + 1)) self.mox.StubOutWithMock(self.connector, '_execute') - for i in xrange(number_of_calls): + for _i in xrange(number_of_calls): self.connector._execute('aoe-discover', run_as_root=True, root_helper='sudo', diff --git a/cinder/tests/image/test_glance.py b/cinder/tests/image/test_glance.py index e5f9ec138..b58037363 100644 --- a/cinder/tests/image/test_glance.py +++ b/cinder/tests/image/test_glance.py @@ -522,8 +522,8 @@ class TestGlanceImageService(test.TestCase): def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] - (service, same_id) = glance.get_remote_image_service(self.context, - image_id) + (_service, same_id) = glance.get_remote_image_service(self.context, + image_id) self.assertEqual(same_id, image_id) def test_glance_client_image_ref(self): diff --git a/cinder/tests/test_backup_ceph.py b/cinder/tests/test_backup_ceph.py index 75fc6cc11..f0821fa31 100644 --- a/cinder/tests/test_backup_ceph.py +++ b/cinder/tests/test_backup_ceph.py @@ -177,7 +177,7 @@ class BackupCephTestCase(test.TestCase): # Create a file with some data in it. self.volume_file = tempfile.NamedTemporaryFile() self.addCleanup(self.volume_file.close) - for i in xrange(0, self.num_chunks): + for _i in xrange(0, self.num_chunks): data = os.urandom(self.chunk_size) self.checksum.update(data) self.volume_file.write(data) @@ -286,7 +286,7 @@ class BackupCephTestCase(test.TestCase): checksum = hashlib.sha256() test_file.seek(0) - for c in xrange(0, self.num_chunks): + for _c in xrange(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal @@ -357,7 +357,7 @@ class BackupCephTestCase(test.TestCase): checksum = hashlib.sha256() test_file.seek(0) - for c in xrange(0, self.num_chunks): + for _c in xrange(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal @@ -639,7 +639,7 @@ class BackupCephTestCase(test.TestCase): checksum = hashlib.sha256() test_file.seek(0) - for c in xrange(0, self.num_chunks): + for _c in xrange(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal diff --git a/cinder/tests/test_backup_swift.py b/cinder/tests/test_backup_swift.py index 87d1e018c..1c2c48be6 100644 --- a/cinder/tests/test_backup_swift.py +++ b/cinder/tests/test_backup_swift.py @@ -80,7 +80,7 @@ class BackupSwiftTestCase(test.TestCase): self._create_volume_db_entry() self.volume_file = tempfile.NamedTemporaryFile() self.addCleanup(self.volume_file.close) - for i in xrange(0, 128): + for _i in xrange(0, 128): self.volume_file.write(os.urandom(1024)) def test_backup_swift_url(self): diff --git a/cinder/tests/test_glusterfs.py b/cinder/tests/test_glusterfs.py index 0f93e3f8d..1ec3c8b68 100644 --- a/cinder/tests/test_glusterfs.py +++ b/cinder/tests/test_glusterfs.py @@ -790,7 +790,7 @@ class GlusterFsDriverTestCase(test.TestCase): mock.patch.object(self._driver, '_load_shares_config'), mock.patch.object(self._driver, '_do_umount'), mock.patch.object(glusterfs, 'LOG') - ) as (mock_load_shares_config, mock_do_umount, mock_logger): + ) as (_mock_load_shares_config, mock_do_umount, mock_logger): mock_do_umount.side_effect = Exception() self._driver._unmount_shares() @@ -805,7 +805,7 @@ class GlusterFsDriverTestCase(test.TestCase): with contextlib.nested( mock.patch.object(self._driver, '_load_shares_config'), mock.patch.object(self._driver, '_do_umount') - ) as (mock_load_shares_config, mock_do_umount): + ) as (_mock_load_shares_config, mock_do_umount): self._driver._unmount_shares() self.assertTrue(mock_do_umount.called) @@ -819,7 +819,7 @@ class GlusterFsDriverTestCase(test.TestCase): with contextlib.nested( mock.patch.object(self._driver, '_load_shares_config'), mock.patch.object(self._driver, '_do_umount') - ) as (mock_load_shares_config, mock_do_umount): + ) as (_mock_load_shares_config, mock_do_umount): self._driver._unmount_shares() mock_do_umount.assert_any_call(True, diff --git a/cinder/tests/test_gpfs.py b/cinder/tests/test_gpfs.py index fec6ad85c..a4b0ada9b 100644 --- a/cinder/tests/test_gpfs.py +++ b/cinder/tests/test_gpfs.py @@ -1462,9 +1462,9 @@ class GPFSDriverTestCase(test.TestCase): volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - diff, equal = volume_types.volume_types_diff(ctxt, - old_type_ref['id'], - new_type_ref['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, + old_type_ref['id'], + new_type_ref['id']) volume = {} volume['name'] = 'test' diff --git a/cinder/tests/test_migrations.py b/cinder/tests/test_migrations.py index e03caa62d..fef77e34f 100644 --- a/cinder/tests/test_migrations.py +++ b/cinder/tests/test_migrations.py @@ -237,7 +237,7 @@ class TestMigrations(test.TestCase): Walks all version scripts for each tested database, ensuring that there are no errors in the version scripts for each engine """ - for key, engine in self.engines.items(): + for _key, engine in self.engines.items(): self._walk_versions(engine, self.snake_walk) def test_mysql_connect_fail(self): @@ -469,7 +469,7 @@ class TestMigrations(test.TestCase): def test_migration_005(self): """Test that adding source_volid column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -485,7 +485,7 @@ class TestMigrations(test.TestCase): sqlalchemy.types.VARCHAR) def _metadatas(self, upgrade_to, downgrade_to=None): - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -536,7 +536,7 @@ class TestMigrations(test.TestCase): def test_migration_008(self): """Test that adding and removing the backups table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -598,7 +598,7 @@ class TestMigrations(test.TestCase): def test_migration_009(self): """Test adding snapshot_metadata table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -640,7 +640,7 @@ class TestMigrations(test.TestCase): def test_migration_010(self): """Test adding transfers table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -683,7 +683,7 @@ class TestMigrations(test.TestCase): def test_migration_011(self): """Test adding transfers table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -727,7 +727,7 @@ class TestMigrations(test.TestCase): def test_migration_012(self): """Test that adding attached_host column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -753,7 +753,7 @@ class TestMigrations(test.TestCase): def test_migration_013(self): """Test that adding provider_geometry column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -779,7 +779,7 @@ class TestMigrations(test.TestCase): def test_migration_014(self): """Test that adding _name_id column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -805,7 +805,7 @@ class TestMigrations(test.TestCase): def test_migration_015(self): """Test removing migrations table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -821,7 +821,7 @@ class TestMigrations(test.TestCase): def test_migration_016(self): """Test that dropping xen storage manager tables works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -849,7 +849,7 @@ class TestMigrations(test.TestCase): """Test that added encryption information works correctly.""" # upgrade schema - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -902,7 +902,7 @@ class TestMigrations(test.TestCase): def test_migration_018(self): """Test that added qos_specs table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -940,7 +940,7 @@ class TestMigrations(test.TestCase): def test_migration_019(self): """Test that adding migration_status column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -966,7 +966,7 @@ class TestMigrations(test.TestCase): def test_migration_020(self): """Test adding volume_admin_metadata table works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1006,7 +1006,7 @@ class TestMigrations(test.TestCase): def test_migration_021(self): """Test adding default data for quota classes works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1037,7 +1037,7 @@ class TestMigrations(test.TestCase): def test_migration_022(self): """Test that adding disabled_reason column works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1063,7 +1063,7 @@ class TestMigrations(test.TestCase): def test_migration_023(self): """Test that adding reservations index works correctly.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1096,7 +1096,7 @@ class TestMigrations(test.TestCase): def test_migration_024(self): """Test adding replication columns to volume table.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1129,7 +1129,7 @@ class TestMigrations(test.TestCase): def test_migration_025(self): """Test adding table and columns for consistencygroups.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) @@ -1264,7 +1264,7 @@ class TestMigrations(test.TestCase): def test_migration_026(self): """Test adding default data for consistencygroups quota class.""" - for (key, engine) in self.engines.items(): + for (_key, engine) in self.engines.items(): migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.db_initial_version()) diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py index 3c9945f3e..94d403710 100644 --- a/cinder/tests/test_quota.py +++ b/cinder/tests/test_quota.py @@ -98,7 +98,7 @@ class QuotaIntegrationTestCase(test.TestCase): def test_too_many_volumes(self): volume_ids = [] - for i in range(CONF.quota_volumes): + for _i in range(CONF.quota_volumes): vol_ref = self._create_volume() volume_ids.append(vol_ref['id']) self.assertRaises(exception.VolumeLimitExceeded, diff --git a/cinder/tests/test_storwize_svc.py b/cinder/tests/test_storwize_svc.py index b6838179f..d75396ea7 100644 --- a/cinder/tests/test_storwize_svc.py +++ b/cinder/tests/test_storwize_svc.py @@ -2565,8 +2565,8 @@ class StorwizeSVCDriverTestCase(test.TestCase): old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) @@ -2655,8 +2655,8 @@ class StorwizeSVCDriverTestCase(test.TestCase): old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) @@ -2688,8 +2688,8 @@ class StorwizeSVCDriverTestCase(test.TestCase): old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) @@ -3084,9 +3084,9 @@ class StorwizeSVCDriverTestCase(test.TestCase): disable_type = self._create_replication_volume_type(False) enable_type = self._create_replication_volume_type(True) - diff, equal = volume_types.volume_types_diff(ctxt, - disable_type['id'], - enable_type['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, + disable_type['id'], + enable_type['id']) volume = self._generate_vol_info(None, None) volume['host'] = host @@ -3131,9 +3131,9 @@ class StorwizeSVCDriverTestCase(test.TestCase): self.assertIsNone(model_update) enable_type = self._create_replication_volume_type(True) - diff, equal = volume_types.volume_types_diff(ctxt, - None, - enable_type['id']) + diff, _equal = volume_types.volume_types_diff(ctxt, + None, + enable_type['id']) # Enable replica self.driver.retype(ctxt, volume, enable_type, diff, host) @@ -3245,8 +3245,8 @@ class StorwizeSVCDriverTestCase(test.TestCase): the vdisk_UID parameter and returns it. Returns None if the specified vdisk does not exist. """ - vdisk_properties, err = self.sim._cmd_lsvdisk(obj=vdisk_name, - delim='!') + vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, + delim='!') # Iterate through each row until we find the vdisk_UID entry for row in vdisk_properties.split('\n'): @@ -3299,7 +3299,7 @@ class StorwizeSVCDriverTestCase(test.TestCase): # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. - volume, uid = self._create_volume_and_return_uid('manage_test') + _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. diff --git a/cinder/tests/test_zadara.py b/cinder/tests/test_zadara.py index f4693b484..c6d334644 100644 --- a/cinder/tests/test_zadara.py +++ b/cinder/tests/test_zadara.py @@ -243,7 +243,7 @@ class FakeRequest(object): cg_name = self.url.split('/')[3] snap_name = params['display_name'] - for (vol_name, params) in RUNTIME_VARS['volumes']: + for (_vol_name, params) in RUNTIME_VARS['volumes']: if params['cg-name'] == cg_name: snapshots = params['snapshots'] if snap_name in snapshots: @@ -258,7 +258,7 @@ class FakeRequest(object): def _delete_snapshot(self): snap = self.url.split('/')[3].split('.')[0] - for (vol_name, params) in RUNTIME_VARS['volumes']: + for (_vol_name, params) in RUNTIME_VARS['volumes']: if snap in params['snapshots']: params['snapshots'].remove(snap) return RUNTIME_VARS['good'] @@ -431,7 +431,7 @@ class FakeRequest(object): <pool-name>pool-00000001</pool-name> </snapshot>""" - for (vol_name, params) in RUNTIME_VARS['volumes']: + for (_vol_name, params) in RUNTIME_VARS['volumes']: if params['cg-name'] == cg_name: snapshots = params['snapshots'] resp = header diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py index 35e16bfbf..95f5b4fc9 100644 --- a/cinder/volume/drivers/block_device.py +++ b/cinder/volume/drivers/block_device.py @@ -190,8 +190,8 @@ class BlockDeviceDriver(driver.ISCSIDriver): return used_devices def _get_device_size(self, dev_path): - out, err = self._execute('blockdev', '--getsz', dev_path, - run_as_root=True) + out, _err = self._execute('blockdev', '--getsz', dev_path, + run_as_root=True) size_in_m = int(out) return size_in_m / 2048 diff --git a/cinder/volume/drivers/hds/hds.py b/cinder/volume/drivers/hds/hds.py index f08498f54..0e9c3b5df 100644 --- a/cinder/volume/drivers/hds/hds.py +++ b/cinder/volume/drivers/hds/hds.py @@ -385,7 +385,7 @@ class HUSDriver(driver.ISCSIDriver): info = _loc_info(prov_loc) (arid, lun) = info['id_lu'] if 'tgt' in info.keys(): # connected? - (_portal, iqn, loc, ctl, port) = info['tgt'] + (_portal, iqn, _loc, ctl, port) = info['tgt'] self.bend.del_iscsi_conn(self.config['hus_cmd'], HDS_VERSION, self.config['mgmt_ip0'], diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py index c518e4e4b..80b4e0a9c 100644 --- a/cinder/volume/drivers/netapp/iscsi.py +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -422,7 +422,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): for lun in api_luns: meta_dict = self._create_lun_meta(lun) path = lun.get_child_content('path') - (rest, splitter, name) = path.rpartition('/') + (_rest, _splitter, name) = path.rpartition('/') handle = self._create_lun_handle(meta_dict) size = lun.get_child_content('size') discovered_lun = NetAppLun(handle, name, @@ -460,7 +460,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): msg_fmt = {'code': code, 'message': message} exc_info = sys.exc_info() LOG.warn(msg % msg_fmt) - (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) + (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) if lun_id is not None: return lun_id else: @@ -468,7 +468,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): def _unmap_lun(self, path, initiator): """Unmaps a lun from given initiator.""" - (igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator) + (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator) lun_unmap = NaElement.create_node_with_children( 'lun-unmap', **{'path': path, 'initiator-group': igroup_name}) @@ -988,7 +988,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): zbc = block_count if z_calls == 0: z_calls = 1 - for call in range(0, z_calls): + for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit @@ -1003,7 +1003,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): block_ranges = NaElement("block-ranges") segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count - for segment in range(0, segments): + for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit @@ -1353,7 +1353,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): """Clone LUN with the given handle to the new name.""" metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] - (parent, splitter, name) = path.rpartition('/') + (parent, _splitter, name) = path.rpartition('/') clone_path = '%s/%s' % (parent, new_name) # zAPI can only handle 2^24 blocks per range bc_limit = 2 ** 24 # 8GB @@ -1364,7 +1364,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): zbc = block_count if z_calls == 0: z_calls = 1 - for call in range(0, z_calls): + for _call in range(0, z_calls): if zbc > z_limit: block_count = z_limit zbc -= z_limit @@ -1380,7 +1380,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): bc_limit = 2 ** 24 # 8GB segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count - for segment in range(0, segments): + for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py index 4e6131e6d..4140e49fc 100644 --- a/cinder/volume/drivers/netapp/nfs.py +++ b/cinder/volume/drivers/netapp/nfs.py @@ -300,7 +300,7 @@ class NetAppNFSDriver(nfs.NfsDriver): self.configuration.thres_avl_size_perc_stop for share in getattr(self, '_mounted_shares', []): try: - total_size, total_avl, total_alc =\ + total_size, total_avl, _total_alc =\ self._get_capacity_info(share) avl_percent = int((total_avl / total_size) * 100) if avl_percent <= thres_size_perc_start: @@ -636,7 +636,8 @@ class NetAppNFSDriver(nfs.NfsDriver): def _check_share_can_hold_size(self, share, size): """Checks if volume can hold image with size.""" - tot_size, tot_available, tot_allocated = self._get_capacity_info(share) + _tot_size, tot_available, _tot_allocated = self._get_capacity_info( + share) if tot_available < size: msg = _("Container size smaller than required file size.") raise exception.VolumeDriverException(msg) @@ -1415,7 +1416,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): def _clone_volume(self, volume_name, clone_name, volume_id, share=None): """Clones mounted volume with NetApp filer.""" - (host_ip, export_path) = self._get_export_ip_path(volume_id, share) + (_host_ip, export_path) = self._get_export_ip_path(volume_id, share) storage_path = self._get_actual_path_for_export(export_path) target_path = '%s/%s' % (storage_path, clone_name) (clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path, diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index cc993e277..d2aef63cc 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -168,7 +168,7 @@ class NfsDriver(remotefs.RemoteFSDriver): for nfs_share in self._mounted_shares: if not self._is_share_eligible(nfs_share, volume_size_in_gib): continue - total_size, total_available, total_allocated = \ + _total_size, _total_available, total_allocated = \ self._get_capacity_info(nfs_share) if target_share is not None: if target_share_reserved > total_allocated: diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index a55c1f666..2da6499f0 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -386,8 +386,8 @@ class RBDDriver(driver.VolumeDriver): """ parent_volume = self.rbd.Image(client.ioctx, volume_name) try: - pool, parent, snap = self._get_clone_info(parent_volume, - volume_name) + _pool, parent, _snap = self._get_clone_info(parent_volume, + volume_name) finally: parent_volume.close() @@ -440,8 +440,8 @@ class RBDDriver(driver.VolumeDriver): try: # First flatten source volume if required. if flatten_parent: - pool, parent, snap = self._get_clone_info(src_volume, - src_name) + _pool, parent, snap = self._get_clone_info(src_volume, + src_name) # Flatten source volume LOG.debug("flattening source volume %s" % (src_name)) src_volume.flatten() @@ -639,9 +639,9 @@ class RBDDriver(driver.VolumeDriver): raise exception.VolumeIsBusy(volume_name=volume_name) # Determine if this volume is itself a clone - pool, parent, parent_snap = self._get_clone_info(rbd_image, - volume_name, - clone_snap) + _pool, parent, parent_snap = self._get_clone_info(rbd_image, + volume_name, + clone_snap) finally: rbd_image.close() @@ -780,7 +780,7 @@ class RBDDriver(driver.VolumeDriver): if image_location is None or not self._is_cloneable( image_location, image_meta): return ({}, False) - prefix, pool, image, snapshot = self._parse_location(image_location) + _prefix, pool, image, snapshot = self._parse_location(image_location) self._clone(volume, pool, image, snapshot) self._resize(volume) return {'provider_location': None}, True diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py index b93e204fd..c3388d246 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_common.py +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -1274,7 +1274,7 @@ class HP3PARCommon(object): type_id = volume.get('volume_type_id', None) - hp3par_keys, qos, volume_type, vvs_name = self.get_type_info( + hp3par_keys, qos, _volume_type, vvs_name = self.get_type_info( type_id) name = volume.get('display_name', None) @@ -1633,7 +1633,7 @@ class HP3PARCommon(object): " to %(new_cpg)s") % {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) - response, body = self.client.modifyVolume( + _response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, @@ -1696,7 +1696,7 @@ class HP3PARCommon(object): self.validate_persona(new_persona) if host is not None: - (host_type, host_id, host_cpg) = ( + (host_type, host_id, _host_cpg) = ( host['capabilities']['location_info']).split(':') if not (host_type == 'HP3PARDriver'): diff --git a/cinder/volume/drivers/san/hp/hp_3par_fc.py b/cinder/volume/drivers/san/hp/hp_3par_fc.py index 1c5adadac..7d289604a 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_fc.py +++ b/cinder/volume/drivers/san/hp/hp_3par_fc.py @@ -258,7 +258,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): LOG.info(_("Need to remove FC Zone, building initiator " "target map")) - target_wwns, init_targ_map, numPaths = \ + target_wwns, init_targ_map, _numPaths = \ self._build_initiator_target_map(connector) info['data'] = {'target_wwn': target_wwns, @@ -296,7 +296,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) - for target in init_targ_map[initiator]: + for _target in init_targ_map[initiator]: numPaths += 1 target_wwns = list(set(target_wwns)) else: diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py index e05796ee5..2af9c4ccf 100644 --- a/cinder/volume/drivers/sheepdog.py +++ b/cinder/volume/drivers/sheepdog.py @@ -55,7 +55,7 @@ class SheepdogDriver(driver.VolumeDriver): #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' # gives short output, but for compatibility reason we won't # use it and just check if 'running' is in the output. - (out, err) = self._execute('collie', 'cluster', 'info') + (out, _err) = self._execute('collie', 'cluster', 'info') if 'status: running' not in out: exception_message = (_("Sheepdog is not working: %s") % out) raise exception.VolumeBackendAPIException( diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index be56afb7c..aa8abbfa6 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -532,7 +532,7 @@ class SolidFireDriver(SanISCSIDriver): def create_cloned_volume(self, volume, src_vref): """Create a clone of an existing volume.""" - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, model) = self._do_clone_volume( src_vref['id'], src_vref['project_id'], volume) @@ -605,14 +605,14 @@ class SolidFireDriver(SanISCSIDriver): restore at which time we'll rework this appropriately. """ - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, _model) = self._do_clone_volume( snapshot['volume_id'], snapshot['project_id'], snapshot) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from the specified snapshot.""" - (data, sfaccount, model) = self._do_clone_volume( + (_data, _sfaccount, model) = self._do_clone_volume( snapshot['id'], snapshot['project_id'], volume) diff --git a/cinder/volume/drivers/vmware/read_write_util.py b/cinder/volume/drivers/vmware/read_write_util.py index a43489b67..718d1914c 100644 --- a/cinder/volume/drivers/vmware/read_write_util.py +++ b/cinder/volume/drivers/vmware/read_write_util.py @@ -148,7 +148,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile): param_list = {'dcPath': data_center_name, 'dsName': datastore_name} base_url = base_url + '?' + urllib.urlencode(param_list) _urlparse = urlparse.urlparse(base_url) - scheme, netloc, path, params, query, fragment = _urlparse + scheme, netloc, path, _params, query, _fragment = _urlparse if scheme == 'http': conn = httplib.HTTPConnection(netloc) elif scheme == 'https': @@ -211,7 +211,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile): # Prepare the http connection to the vmdk url cookies = session.vim.client.options.transport.cookiejar _urlparse = urlparse.urlparse(url) - scheme, netloc, path, params, query, fragment = _urlparse + scheme, netloc, path, _params, query, _fragment = _urlparse if scheme == 'http': conn = httplib.HTTPConnection(netloc) elif scheme == 'https': diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index a5c77efbe..50f92080c 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -1127,7 +1127,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): if disk_conversion: # Clone the temporary backing for disk type conversion. - (host, rp, folder, summary) = self._select_ds_for_volume( + (host, _rp, _folder, summary) = self._select_ds_for_volume( volume) datastore = summary.datastore LOG.debug("Cloning temporary backing: %s for disk type " @@ -1163,7 +1163,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): """ try: # find host in which to create the volume - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, rp, folder, summary) = self._select_ds_for_volume(volume) except error_util.VimException as excep: err_msg = (_("Exception in _select_ds_for_volume: " "%s."), excep) @@ -1646,7 +1646,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): {'name': name, 'path': tmp_file_path}) - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, rp, folder, summary) = self._select_ds_for_volume(volume) LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.", {'ds': summary.name, 'name': name}) @@ -1708,7 +1708,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver): renamed = False try: # Find datastore for clone. - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) @@ -1981,7 +1981,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): datastore = None if not clone_type == volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host - (host, rp, folder, summary) = self._select_ds_for_volume(volume) + (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py index c2385c205..4a10ea75b 100644 --- a/cinder/volume/drivers/zadara.py +++ b/cinder/volume/drivers/zadara.py @@ -323,7 +323,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): def _get_vpsa_volume_name(self, name): """Return VPSA's name for the volume.""" - (vol_name, size) = self._get_vpsa_volume_name_and_size(name) + (vol_name, _size) = self._get_vpsa_volume_name_and_size(name) return vol_name def _get_volume_cg_name(self, name): diff --git a/pylintrc b/pylintrc index a7021ded5..b8ffa2ab3 100644 --- a/pylintrc +++ b/pylintrc @@ -29,3 +29,7 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ max-public-methods=100 min-public-methods=0 max-args=6 + +[Variables] + +dummy-variables-rgx=_