]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Amend unused variables to assist pylint testing
authorMike Mason <mikemason010@gmail.com>
Fri, 17 Oct 2014 10:46:26 +0000 (10:46 +0000)
committerMike Mason <mikemason010@gmail.com>
Thu, 30 Oct 2014 16:23:57 +0000 (16:23 +0000)
Amedning unused variables with a prefix of an underscore to prevent them
being picked up in pylint testing, consistency, and for general housekeeping.
Change to pylintrc also required to enforce the rule.

Closes-bug #1268062

Change-Id: I80c2cbdc52d6f37823fae90d0096836166412643

35 files changed:
cinder/backup/drivers/swift.py
cinder/brick/initiator/linuxfc.py
cinder/brick/initiator/linuxscsi.py
cinder/brick/iscsi/iscsi.py
cinder/brick/local_dev/lvm.py
cinder/brick/remotefs/remotefs.py
cinder/common/config.py
cinder/db/sqlalchemy/api.py
cinder/image/glance.py
cinder/image/image_utils.py
cinder/tests/api/openstack/test_wsgi.py
cinder/tests/brick/test_brick_connector.py
cinder/tests/image/test_glance.py
cinder/tests/test_backup_ceph.py
cinder/tests/test_backup_swift.py
cinder/tests/test_glusterfs.py
cinder/tests/test_gpfs.py
cinder/tests/test_migrations.py
cinder/tests/test_quota.py
cinder/tests/test_storwize_svc.py
cinder/tests/test_zadara.py
cinder/volume/drivers/block_device.py
cinder/volume/drivers/hds/hds.py
cinder/volume/drivers/netapp/iscsi.py
cinder/volume/drivers/netapp/nfs.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/rbd.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/san/hp/hp_3par_fc.py
cinder/volume/drivers/sheepdog.py
cinder/volume/drivers/solidfire.py
cinder/volume/drivers/vmware/read_write_util.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/zadara.py
pylintrc

index 1c1035ef23c0e115341ef9ebfbab3a6f9a92e38a..fdee8c88b16824cb36e2bb3481c562dcb6ba024a 100644 (file)
@@ -248,7 +248,7 @@ class SwiftBackupDriver(BackupDriver):
         LOG.debug('_read_metadata started, container name: %(container)s, '
                   'metadata filename: %(filename)s' %
                   {'container': container, 'filename': filename})
-        (resp, body) = self.conn.get_object(container, filename)
+        (_resp, body) = self.conn.get_object(container, filename)
         metadata = json.loads(body)
         LOG.debug('_read_metadata finished (%s)' % metadata)
         return metadata
@@ -428,7 +428,7 @@ class SwiftBackupDriver(BackupDriver):
                           'volume_id': volume_id,
                       })
             try:
-                (resp, body) = self.conn.get_object(container, object_name)
+                (_resp, body) = self.conn.get_object(container, object_name)
             except socket.error as err:
                 raise exception.SwiftConnectionFailed(reason=err)
             compression_algorithm = metadata_object[object_name]['compression']
index 391f747b65bb920cc0efe74b2df3bddacc957b80..edd9e7c52ac474a55a2ebe34c92b4ecd81580661 100644 (file)
@@ -39,9 +39,9 @@ class LinuxFibreChannel(linuxscsi.LinuxSCSI):
         """Get the Fibre Channel HBA information."""
         out = None
         try:
-            out, err = self._execute('systool', '-c', 'fc_host', '-v',
-                                     run_as_root=True,
-                                     root_helper=self._root_helper)
+            out, _err = self._execute('systool', '-c', 'fc_host', '-v',
+                                      run_as_root=True,
+                                      root_helper=self._root_helper)
         except putils.ProcessExecutionError as exc:
             # This handles the case where rootwrap is used
             # and systool is not installed
index 1b3b87390300af9c09cf20df215d3aae48655c5d..9ee74ae3f010c0822ec57db356dded4c8b4b2564 100644 (file)
@@ -65,8 +65,8 @@ class LinuxSCSI(executor.Executor):
             self.echo_scsi_command(path, "1")
 
     def get_device_info(self, device):
-        (out, err) = self._execute('sg_scan', device, run_as_root=True,
-                                   root_helper=self._root_helper)
+        (out, _err) = self._execute('sg_scan', device, run_as_root=True,
+                                    root_helper=self._root_helper)
         dev_info = {'device': device, 'host': None,
                     'channel': None, 'id': None, 'lun': None}
         if out:
@@ -135,9 +135,9 @@ class LinuxSCSI(executor.Executor):
         devices = []
         out = None
         try:
-            (out, err) = self._execute('multipath', '-l', device,
-                                       run_as_root=True,
-                                       root_helper=self._root_helper)
+            (out, _err) = self._execute('multipath', '-l', device,
+                                        run_as_root=True,
+                                        root_helper=self._root_helper)
         except putils.ProcessExecutionError as exc:
             LOG.warn(_("multipath call failed exit (%(code)s)")
                      % {'code': exc.exit_code})
index e94a3438e594879042d52043a9d2726459e081af..4703be8351a3e04caf2300dcf63c6432b9e1fbe2 100644 (file)
@@ -104,7 +104,7 @@ class TgtAdm(TargetAdmin):
         self.volumes_dir = volumes_dir
 
     def _get_target(self, iqn):
-        (out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
+        (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True)
         lines = out.split('\n')
         for line in lines:
             if iqn in line:
@@ -119,7 +119,7 @@ class TgtAdm(TargetAdmin):
         capture = False
         target_info = []
 
-        (out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
+        (out, _err) = self._execute('tgt-admin', '--show', run_as_root=True)
         lines = out.split('\n')
 
         for line in lines:
@@ -478,9 +478,9 @@ class LioAdm(TargetAdmin):
             raise
 
     def _get_target(self, iqn):
-        (out, err) = self._execute('cinder-rtstool',
-                                   'get-targets',
-                                   run_as_root=True)
+        (out, _err) = self._execute('cinder-rtstool',
+                                    'get-targets',
+                                    run_as_root=True)
         lines = out.split('\n')
         for line in lines:
             if iqn in line:
@@ -561,7 +561,7 @@ class LioAdm(TargetAdmin):
     def initialize_connection(self, volume, connector):
         volume_iqn = volume['provider_location'].split(' ')[1]
 
-        (auth_method, auth_user, auth_pass) = \
+        (_auth_method, auth_user, auth_pass) = \
             volume['provider_auth'].split(' ', 3)
 
         # Add initiator iqns to target ACL
index 04174c2f005d243919b76be5677d5bad80a1646b..205c2db03a4ba75fbfc9507750327e3d42088d60 100644 (file)
@@ -101,7 +101,7 @@ class LVM(executor.Executor):
 
         """
         exists = False
-        (out, err) = self._execute(
+        (out, _err) = self._execute(
             'env', 'LC_ALL=C', 'vgs', '--noheadings', '-o', 'name',
             self.vg_name, root_helper=self._root_helper, run_as_root=True)
 
@@ -117,8 +117,8 @@ class LVM(executor.Executor):
         self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
 
     def _get_vg_uuid(self):
-        (out, err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings',
-                                   '-o uuid', self.vg_name)
+        (out, _err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings',
+                                    '-o uuid', self.vg_name)
         if out is not None:
             return out.split()
         else:
@@ -171,9 +171,9 @@ class LVM(executor.Executor):
         """
 
         cmd = ['env', 'LC_ALL=C', 'vgs', '--version']
-        (out, err) = putils.execute(*cmd,
-                                    root_helper=root_helper,
-                                    run_as_root=True)
+        (out, _err) = putils.execute(*cmd,
+                                     root_helper=root_helper,
+                                     run_as_root=True)
         lines = out.split('\n')
 
         for line in lines:
@@ -249,9 +249,9 @@ class LVM(executor.Executor):
             cmd.append(vg_name)
 
         lvs_start = time.time()
-        (out, err) = putils.execute(*cmd,
-                                    root_helper=root_helper,
-                                    run_as_root=True)
+        (out, _err) = putils.execute(*cmd,
+                                     root_helper=root_helper,
+                                     run_as_root=True)
         total_time = time.time() - lvs_start
         if total_time > 60:
             LOG.warning(_('Took %s seconds to get logical volumes.'),
@@ -300,9 +300,9 @@ class LVM(executor.Executor):
                '--separator', ':',
                '--nosuffix']
 
-        (out, err) = putils.execute(*cmd,
-                                    root_helper=root_helper,
-                                    run_as_root=True)
+        (out, _err) = putils.execute(*cmd,
+                                     root_helper=root_helper,
+                                     run_as_root=True)
 
         pvs = out.split()
         if vg_name is not None:
@@ -344,9 +344,9 @@ class LVM(executor.Executor):
             cmd.append(vg_name)
 
         start_vgs = time.time()
-        (out, err) = putils.execute(*cmd,
-                                    root_helper=root_helper,
-                                    run_as_root=True)
+        (out, _err) = putils.execute(*cmd,
+                                     root_helper=root_helper,
+                                     run_as_root=True)
         total_time = time.time() - start_vgs
         if total_time > 60:
             LOG.warning(_('Took %s seconds to get volume groups.'), total_time)
@@ -618,7 +618,7 @@ class LVM(executor.Executor):
                       run_as_root=True)
 
     def lv_has_snapshot(self, name):
-        out, err = self._execute(
+        out, _err = self._execute(
             'env', 'LC_ALL=C', 'lvdisplay', '--noheading',
             '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name),
             root_helper=self._root_helper, run_as_root=True)
index b20ebb96aba4a78ae3dda49d8d05dd1a7492831f..ce60e2c4d3c30c81c47ab2fb950bb56bcef36885 100644 (file)
@@ -77,7 +77,7 @@ class RemoteFsClient(object):
                             self._get_hash_str(device_name))
 
     def _read_mounts(self):
-        (out, err) = self._execute('mount', check_exit_code=0)
+        (out, _err) = self._execute('mount', check_exit_code=0)
         lines = out.split('\n')
         mounts = {}
         for line in lines:
index 0327712b426a6cfaf19126a3d87e6a3392499569..a78640d20bb232873cc912eb3b69f05d12abeaf1 100644 (file)
@@ -47,7 +47,7 @@ def _get_my_ip():
     try:
         csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         csock.connect(('8.8.8.8', 80))
-        (addr, port) = csock.getsockname()
+        (addr, _port) = csock.getsockname()
         csock.close()
         return addr
     except socket.error:
index ff3ed181fc19817c2f77afd71361b5ad9a3206c5..da249da26c7f3c99ddf7792b7d3d8c7c698d11aa 100644 (file)
@@ -252,7 +252,7 @@ def model_query(context, *args, **kwargs):
 
 def _sync_volumes(context, project_id, session, volume_type_id=None,
                   volume_type_name=None):
-    (volumes, gigs) = _volume_data_get_for_project(
+    (volumes, _gigs) = _volume_data_get_for_project(
         context, project_id, volume_type_id=volume_type_id, session=session)
     key = 'volumes'
     if volume_type_name:
@@ -262,7 +262,7 @@ def _sync_volumes(context, project_id, session, volume_type_id=None,
 
 def _sync_snapshots(context, project_id, session, volume_type_id=None,
                     volume_type_name=None):
-    (snapshots, gigs) = _snapshot_data_get_for_project(
+    (snapshots, _gigs) = _snapshot_data_get_for_project(
         context, project_id, volume_type_id=volume_type_id, session=session)
     key = 'snapshots'
     if volume_type_name:
@@ -272,7 +272,7 @@ def _sync_snapshots(context, project_id, session, volume_type_id=None,
 
 def _sync_backups(context, project_id, session, volume_type_id=None,
                   volume_type_name=None):
-    (backups, gigs) = _backup_data_get_for_project(
+    (backups, _gigs) = _backup_data_get_for_project(
         context, project_id, volume_type_id=volume_type_id, session=session)
     key = 'backups'
     return {key: backups}
@@ -3026,7 +3026,8 @@ def consistencygroup_create(context, values):
 def consistencygroup_update(context, consistencygroup_id, values):
     session = get_session()
     with session.begin():
-        result = model_query(context, models.ConsistencyGroup, project_only=True).\
+        result = model_query(context, models.ConsistencyGroup,
+                             project_only=True).\
             filter_by(id=consistencygroup_id).\
             first()
 
index 19876ed197d4c817c8b1b77542bdf2414d2a6c71..76a4788fd1903ba16b7c4e7443838391317a12a2 100644 (file)
@@ -466,14 +466,14 @@ def _remove_read_only(image_meta):
 
 def _reraise_translated_image_exception(image_id):
     """Transform the exception for the image but keep its traceback intact."""
-    exc_type, exc_value, exc_trace = sys.exc_info()
+    _exc_type, exc_value, exc_trace = sys.exc_info()
     new_exc = _translate_image_exception(image_id, exc_value)
     raise new_exc, None, exc_trace
 
 
 def _reraise_translated_exception():
     """Transform the exception but keep its traceback intact."""
-    exc_type, exc_value, exc_trace = sys.exc_info()
+    _exc_type, exc_value, exc_trace = sys.exc_info()
     new_exc = _translate_plain_exception(exc_value)
     raise new_exc, None, exc_trace
 
index a3bea1a2222958612899412a2e835a94b0fef61c..3d280a3ca41c4bdb80bf5f3a17ec2ecfcdc18181 100644 (file)
@@ -57,7 +57,7 @@ def qemu_img_info(path, run_as_root=True):
     cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
     if os.name == 'nt':
         cmd = cmd[2:]
-    out, err = utils.execute(*cmd, run_as_root=run_as_root)
+    out, _err = utils.execute(*cmd, run_as_root=run_as_root)
     return imageutils.QemuImgInfo(out)
 
 
@@ -363,7 +363,7 @@ def fix_vhd_chain(vhd_chain):
 
 
 def get_vhd_size(vhd_path):
-    out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
+    out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
     return int(out)
 
 
index 254306439e4df13fea347acb870438545b49595f..67e57e71707062ca06f2b5a3fedaf7d4d507a55b 100644 (file)
@@ -336,7 +336,7 @@ class ResourceTest(test.TestCase):
 
         controller = Controller()
         resource = wsgi.Resource(controller)
-        method, extensions = resource.get_method(None, 'index', None, '')
+        method, _extensions = resource.get_method(None, 'index', None, '')
         actual = resource.dispatch(method, None, {'pants': 'off'})
         expected = 'off'
         self.assertEqual(actual, expected)
@@ -359,9 +359,9 @@ class ResourceTest(test.TestCase):
 
         controller = Controller()
         resource = wsgi.Resource(controller)
-        method, extensions = resource.get_method(None, 'action',
-                                                 'application/json',
-                                                 '{"fooAction": true}')
+        method, _extensions = resource.get_method(None, 'action',
+                                                  'application/json',
+                                                  '{"fooAction": true}')
         self.assertEqual(controller._action_foo, method)
 
     def test_get_method_action_xml(self):
@@ -372,9 +372,8 @@ class ResourceTest(test.TestCase):
 
         controller = Controller()
         resource = wsgi.Resource(controller)
-        method, extensions = resource.get_method(None, 'action',
-                                                 'application/xml',
-                                                 '<fooAction>true</fooAction>')
+        method, _extensions = resource.get_method(
+            None, 'action', 'application/xml', '<fooAction>true</fooAction>')
         self.assertEqual(controller._action_foo, method)
 
     def test_get_method_action_bad_body(self):
@@ -407,9 +406,9 @@ class ResourceTest(test.TestCase):
 
         controller = Controller()
         resource = wsgi.Resource(controller)
-        method, extensions = resource.get_method(None, 'action',
-                                                 'application/xml',
-                                                 '<fooAction>true</fooAction')
+        method, _extensions = resource.get_method(None, 'action',
+                                                  'application/xml',
+                                                  '<fooAction>true</fooAction')
         self.assertEqual(controller.action, method)
 
     def test_get_action_args(self):
index db87184a78d0d94e01d2f97626d2ec382bc84ea1..5f5978b993b314e6dc308eed11bf580033310b18 100644 (file)
@@ -553,14 +553,14 @@ class AoEConnectorTestCase(ConnectorTestCase):
         self.assertDictMatch(volume_info, expected_info)
 
     def test_connect_volume_could_not_discover_path(self):
-        aoe_device, aoe_path = self.connector._get_aoe_info(
+        _aoe_device, aoe_path = self.connector._get_aoe_info(
             self.connection_properties)
 
         number_of_calls = 4
         self._mock_path_exists(aoe_path, [False] * (number_of_calls + 1))
         self.mox.StubOutWithMock(self.connector, '_execute')
 
-        for i in xrange(number_of_calls):
+        for _i in xrange(number_of_calls):
             self.connector._execute('aoe-discover',
                                     run_as_root=True,
                                     root_helper='sudo',
index e5f9ec138138f1f2b3fb73a4c8f57071085cd8ab..b58037363548eaadb61c6b237d7c3850617e7de4 100644 (file)
@@ -522,8 +522,8 @@ class TestGlanceImageService(test.TestCase):
     def test_glance_client_image_id(self):
         fixture = self._make_fixture(name='test image')
         image_id = self.service.create(self.context, fixture)['id']
-        (service, same_id) = glance.get_remote_image_service(self.context,
-                                                             image_id)
+        (_service, same_id) = glance.get_remote_image_service(self.context,
+                                                              image_id)
         self.assertEqual(same_id, image_id)
 
     def test_glance_client_image_ref(self):
index 75fc6cc1179392fc03e6192e659693f6e106476a..f0821fa315e138419dd9c9bc2a5ec4ebf942c178 100644 (file)
@@ -177,7 +177,7 @@ class BackupCephTestCase(test.TestCase):
         # Create a file with some data in it.
         self.volume_file = tempfile.NamedTemporaryFile()
         self.addCleanup(self.volume_file.close)
-        for i in xrange(0, self.num_chunks):
+        for _i in xrange(0, self.num_chunks):
             data = os.urandom(self.chunk_size)
             self.checksum.update(data)
             self.volume_file.write(data)
@@ -286,7 +286,7 @@ class BackupCephTestCase(test.TestCase):
 
             checksum = hashlib.sha256()
             test_file.seek(0)
-            for c in xrange(0, self.num_chunks):
+            for _c in xrange(0, self.num_chunks):
                 checksum.update(test_file.read(self.chunk_size))
 
             # Ensure the files are equal
@@ -357,7 +357,7 @@ class BackupCephTestCase(test.TestCase):
 
             checksum = hashlib.sha256()
             test_file.seek(0)
-            for c in xrange(0, self.num_chunks):
+            for _c in xrange(0, self.num_chunks):
                 checksum.update(test_file.read(self.chunk_size))
 
             # Ensure the files are equal
@@ -639,7 +639,7 @@ class BackupCephTestCase(test.TestCase):
 
                     checksum = hashlib.sha256()
                     test_file.seek(0)
-                    for c in xrange(0, self.num_chunks):
+                    for _c in xrange(0, self.num_chunks):
                         checksum.update(test_file.read(self.chunk_size))
 
                     # Ensure the files are equal
index 87d1e018cd8b941d8ac0d76e6b54d0eb7b55c145..1c2c48be60e8bec907a092b25580c2c5085643ce 100644 (file)
@@ -80,7 +80,7 @@ class BackupSwiftTestCase(test.TestCase):
         self._create_volume_db_entry()
         self.volume_file = tempfile.NamedTemporaryFile()
         self.addCleanup(self.volume_file.close)
-        for i in xrange(0, 128):
+        for _i in xrange(0, 128):
             self.volume_file.write(os.urandom(1024))
 
     def test_backup_swift_url(self):
index 0f93e3f8d2d88ae5ba878ed4000a0ca5b379a317..1ec3c8b686d31279e6c609be3c7704616d4585d9 100644 (file)
@@ -790,7 +790,7 @@ class GlusterFsDriverTestCase(test.TestCase):
             mock.patch.object(self._driver, '_load_shares_config'),
             mock.patch.object(self._driver, '_do_umount'),
             mock.patch.object(glusterfs, 'LOG')
-        ) as (mock_load_shares_config, mock_do_umount, mock_logger):
+        ) as (_mock_load_shares_config, mock_do_umount, mock_logger):
             mock_do_umount.side_effect = Exception()
 
             self._driver._unmount_shares()
@@ -805,7 +805,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         with contextlib.nested(
             mock.patch.object(self._driver, '_load_shares_config'),
             mock.patch.object(self._driver, '_do_umount')
-        ) as (mock_load_shares_config, mock_do_umount):
+        ) as (_mock_load_shares_config, mock_do_umount):
             self._driver._unmount_shares()
 
             self.assertTrue(mock_do_umount.called)
@@ -819,7 +819,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         with contextlib.nested(
             mock.patch.object(self._driver, '_load_shares_config'),
             mock.patch.object(self._driver, '_do_umount')
-        ) as (mock_load_shares_config, mock_do_umount):
+        ) as (_mock_load_shares_config, mock_do_umount):
             self._driver._unmount_shares()
 
             mock_do_umount.assert_any_call(True,
index fec6ad85c0da44d42582b8c81f152b9addcada9b..a4b0ada9b538655fe58bcebad4f1fb7ac6bdffec 100644 (file)
@@ -1462,9 +1462,9 @@ class GPFSDriverTestCase(test.TestCase):
         volume_types.get_volume_type(ctxt, old_type_ref['id'])
         new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
 
-        diff, equal = volume_types.volume_types_diff(ctxt,
-                                                     old_type_ref['id'],
-                                                     new_type_ref['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt,
+                                                      old_type_ref['id'],
+                                                      new_type_ref['id'])
 
         volume = {}
         volume['name'] = 'test'
index e03caa62d13f43c21efce95a93300bdfb762404f..fef77e34fe16e59cbc28bade18fa464e89e5832e 100644 (file)
@@ -237,7 +237,7 @@ class TestMigrations(test.TestCase):
         Walks all version scripts for each tested database, ensuring
         that there are no errors in the version scripts for each engine
         """
-        for key, engine in self.engines.items():
+        for _key, engine in self.engines.items():
             self._walk_versions(engine, self.snake_walk)
 
     def test_mysql_connect_fail(self):
@@ -469,7 +469,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_005(self):
         """Test that adding source_volid column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -485,7 +485,7 @@ class TestMigrations(test.TestCase):
                                   sqlalchemy.types.VARCHAR)
 
     def _metadatas(self, upgrade_to, downgrade_to=None):
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -536,7 +536,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_008(self):
         """Test that adding and removing the backups table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -598,7 +598,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_009(self):
         """Test adding snapshot_metadata table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -640,7 +640,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_010(self):
         """Test adding transfers table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -683,7 +683,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_011(self):
         """Test adding transfers table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -727,7 +727,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_012(self):
         """Test that adding attached_host column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -753,7 +753,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_013(self):
         """Test that adding provider_geometry column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -779,7 +779,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_014(self):
         """Test that adding _name_id column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -805,7 +805,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_015(self):
         """Test removing migrations table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -821,7 +821,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_016(self):
         """Test that dropping xen storage manager tables works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -849,7 +849,7 @@ class TestMigrations(test.TestCase):
         """Test that added encryption information works correctly."""
 
         # upgrade schema
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -902,7 +902,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_018(self):
         """Test that added qos_specs table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -940,7 +940,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_019(self):
         """Test that adding migration_status column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -966,7 +966,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_020(self):
         """Test adding volume_admin_metadata table works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1006,7 +1006,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_021(self):
         """Test adding default data for quota classes works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1037,7 +1037,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_022(self):
         """Test that adding disabled_reason column works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1063,7 +1063,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_023(self):
         """Test that adding reservations index works correctly."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1096,7 +1096,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_024(self):
         """Test adding replication columns to volume table."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1129,7 +1129,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_025(self):
         """Test adding table and columns for consistencygroups."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
@@ -1264,7 +1264,7 @@ class TestMigrations(test.TestCase):
 
     def test_migration_026(self):
         """Test adding default data for consistencygroups quota class."""
-        for (key, engine) in self.engines.items():
+        for (_key, engine) in self.engines.items():
             migration_api.version_control(engine,
                                           TestMigrations.REPOSITORY,
                                           migration.db_initial_version())
index 3c9945f3ec1bde5560359ed18138a9e0ff5a1224..94d40371073cd76dc0d6e329dce259f5c9c36cb6 100644 (file)
@@ -98,7 +98,7 @@ class QuotaIntegrationTestCase(test.TestCase):
 
     def test_too_many_volumes(self):
         volume_ids = []
-        for i in range(CONF.quota_volumes):
+        for _i in range(CONF.quota_volumes):
             vol_ref = self._create_volume()
             volume_ids.append(vol_ref['id'])
         self.assertRaises(exception.VolumeLimitExceeded,
index b6838179f310b739358669369b4f006bafd67f23..d75396ea72f1898f380128ad46e157183de60584 100644 (file)
@@ -2565,8 +2565,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
         new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
 
-        diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
-                                                     new_type_ref['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
+                                                      new_type_ref['id'])
 
         volume = self._generate_vol_info(None, None)
         old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
@@ -2655,8 +2655,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
         new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
 
-        diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
-                                                     new_type_ref['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
+                                                      new_type_ref['id'])
 
         volume = self._generate_vol_info(None, None)
         old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
@@ -2688,8 +2688,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
         new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
 
-        diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
-                                                     new_type_ref['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
+                                                      new_type_ref['id'])
 
         volume = self._generate_vol_info(None, None)
         old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
@@ -3084,9 +3084,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         disable_type = self._create_replication_volume_type(False)
         enable_type = self._create_replication_volume_type(True)
 
-        diff, equal = volume_types.volume_types_diff(ctxt,
-                                                     disable_type['id'],
-                                                     enable_type['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt,
+                                                      disable_type['id'],
+                                                      enable_type['id'])
 
         volume = self._generate_vol_info(None, None)
         volume['host'] = host
@@ -3131,9 +3131,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         self.assertIsNone(model_update)
 
         enable_type = self._create_replication_volume_type(True)
-        diff, equal = volume_types.volume_types_diff(ctxt,
-                                                     None,
-                                                     enable_type['id'])
+        diff, _equal = volume_types.volume_types_diff(ctxt,
+                                                      None,
+                                                      enable_type['id'])
 
         # Enable replica
         self.driver.retype(ctxt, volume, enable_type, diff, host)
@@ -3245,8 +3245,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
         the vdisk_UID parameter and returns it.
         Returns None if the specified vdisk does not exist.
         """
-        vdisk_properties, err = self.sim._cmd_lsvdisk(obj=vdisk_name,
-                                                      delim='!')
+        vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name,
+                                                       delim='!')
 
         # Iterate through each row until we find the vdisk_UID entry
         for row in vdisk_properties.split('\n'):
@@ -3299,7 +3299,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
 
         # Create a volume as a way of getting a vdisk created, and find out the
         # UID of that vdisk.
-        volume, uid = self._create_volume_and_return_uid('manage_test')
+        _volume, uid = self._create_volume_and_return_uid('manage_test')
 
         # Descriptor of the Cinder volume that we want to own the vdisk
         # referenced by uid.
index f4693b4842ef52ae59fe12a9252eac588133d9f5..c6d334644271a1ef21e2f5959c3bb0f01fc0b508 100644 (file)
@@ -243,7 +243,7 @@ class FakeRequest(object):
         cg_name = self.url.split('/')[3]
         snap_name = params['display_name']
 
-        for (vol_name, params) in RUNTIME_VARS['volumes']:
+        for (_vol_name, params) in RUNTIME_VARS['volumes']:
             if params['cg-name'] == cg_name:
                 snapshots = params['snapshots']
                 if snap_name in snapshots:
@@ -258,7 +258,7 @@ class FakeRequest(object):
     def _delete_snapshot(self):
         snap = self.url.split('/')[3].split('.')[0]
 
-        for (vol_name, params) in RUNTIME_VARS['volumes']:
+        for (_vol_name, params) in RUNTIME_VARS['volumes']:
             if snap in params['snapshots']:
                 params['snapshots'].remove(snap)
                 return RUNTIME_VARS['good']
@@ -431,7 +431,7 @@ class FakeRequest(object):
                     <pool-name>pool-00000001</pool-name>
                 </snapshot>"""
 
-        for (vol_name, params) in RUNTIME_VARS['volumes']:
+        for (_vol_name, params) in RUNTIME_VARS['volumes']:
             if params['cg-name'] == cg_name:
                 snapshots = params['snapshots']
                 resp = header
index 35e16bfbf35a62802c2f77fc6d9aa750091b342f..95f5b4fc9e7636babc5d15a7d301728d8d447f38 100644 (file)
@@ -190,8 +190,8 @@ class BlockDeviceDriver(driver.ISCSIDriver):
         return used_devices
 
     def _get_device_size(self, dev_path):
-        out, err = self._execute('blockdev', '--getsz', dev_path,
-                                 run_as_root=True)
+        out, _err = self._execute('blockdev', '--getsz', dev_path,
+                                  run_as_root=True)
         size_in_m = int(out)
         return size_in_m / 2048
 
index f08498f54dd57e3d7ec2237beb6edc67a7a810d5..0e9c3b5df8b09ff9d5dbc1ec1be4b9825d978713 100644 (file)
@@ -385,7 +385,7 @@ class HUSDriver(driver.ISCSIDriver):
         info = _loc_info(prov_loc)
         (arid, lun) = info['id_lu']
         if 'tgt' in info.keys():  # connected?
-            (_portal, iqn, loc, ctl, port) = info['tgt']
+            (_portal, iqn, _loc, ctl, port) = info['tgt']
             self.bend.del_iscsi_conn(self.config['hus_cmd'],
                                      HDS_VERSION,
                                      self.config['mgmt_ip0'],
index c518e4e4b71fd6037b773479804adfc9602cde54..80b4e0a9c07952041674063922d89c1360bb8f6e 100644 (file)
@@ -422,7 +422,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
         for lun in api_luns:
             meta_dict = self._create_lun_meta(lun)
             path = lun.get_child_content('path')
-            (rest, splitter, name) = path.rpartition('/')
+            (_rest, _splitter, name) = path.rpartition('/')
             handle = self._create_lun_handle(meta_dict)
             size = lun.get_child_content('size')
             discovered_lun = NetAppLun(handle, name,
@@ -460,7 +460,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
             msg_fmt = {'code': code, 'message': message}
             exc_info = sys.exc_info()
             LOG.warn(msg % msg_fmt)
-            (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
+            (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
             if lun_id is not None:
                 return lun_id
             else:
@@ -468,7 +468,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
 
     def _unmap_lun(self, path, initiator):
         """Unmaps a lun from given initiator."""
-        (igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
+        (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator)
         lun_unmap = NaElement.create_node_with_children(
             'lun-unmap',
             **{'path': path, 'initiator-group': igroup_name})
@@ -988,7 +988,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
         zbc = block_count
         if z_calls == 0:
             z_calls = 1
-        for call in range(0, z_calls):
+        for _call in range(0, z_calls):
             if zbc > z_limit:
                 block_count = z_limit
                 zbc -= z_limit
@@ -1003,7 +1003,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
                 block_ranges = NaElement("block-ranges")
                 segments = int(math.ceil(block_count / float(bc_limit)))
                 bc = block_count
-                for segment in range(0, segments):
+                for _segment in range(0, segments):
                     if bc > bc_limit:
                         block_count = bc_limit
                         bc -= bc_limit
@@ -1353,7 +1353,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
         """Clone LUN with the given handle to the new name."""
         metadata = self._get_lun_attr(name, 'metadata')
         path = metadata['Path']
-        (parent, splitter, name) = path.rpartition('/')
+        (parent, _splitter, name) = path.rpartition('/')
         clone_path = '%s/%s' % (parent, new_name)
         # zAPI can only handle 2^24 blocks per range
         bc_limit = 2 ** 24  # 8GB
@@ -1364,7 +1364,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
         zbc = block_count
         if z_calls == 0:
             z_calls = 1
-        for call in range(0, z_calls):
+        for _call in range(0, z_calls):
             if zbc > z_limit:
                 block_count = z_limit
                 zbc -= z_limit
@@ -1380,7 +1380,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
                 bc_limit = 2 ** 24  # 8GB
                 segments = int(math.ceil(block_count / float(bc_limit)))
                 bc = block_count
-                for segment in range(0, segments):
+                for _segment in range(0, segments):
                     if bc > bc_limit:
                         block_count = bc_limit
                         bc -= bc_limit
index 4e6131e6d90f4e604394153c11a3e36d9e5766e2..4140e49fcf562499f416783504f86cfe365e2fa7 100644 (file)
@@ -300,7 +300,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
                 self.configuration.thres_avl_size_perc_stop
             for share in getattr(self, '_mounted_shares', []):
                 try:
-                    total_size, total_avl, total_alc =\
+                    total_size, total_avl, _total_alc =\
                         self._get_capacity_info(share)
                     avl_percent = int((total_avl / total_size) * 100)
                     if avl_percent <= thres_size_perc_start:
@@ -636,7 +636,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
 
     def _check_share_can_hold_size(self, share, size):
         """Checks if volume can hold image with size."""
-        tot_size, tot_available, tot_allocated = self._get_capacity_info(share)
+        _tot_size, tot_available, _tot_allocated = self._get_capacity_info(
+            share)
         if tot_available < size:
             msg = _("Container size smaller than required file size.")
             raise exception.VolumeDriverException(msg)
@@ -1415,7 +1416,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
     def _clone_volume(self, volume_name, clone_name,
                       volume_id, share=None):
         """Clones mounted volume with NetApp filer."""
-        (host_ip, export_path) = self._get_export_ip_path(volume_id, share)
+        (_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
         storage_path = self._get_actual_path_for_export(export_path)
         target_path = '%s/%s' % (storage_path, clone_name)
         (clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path,
index cc993e277ef1e4d6544d96d45560a64a64e5d6df..d2aef63cc2080929c2ef0d69d7d834d55139b4c9 100644 (file)
@@ -168,7 +168,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
         for nfs_share in self._mounted_shares:
             if not self._is_share_eligible(nfs_share, volume_size_in_gib):
                 continue
-            total_size, total_available, total_allocated = \
+            _total_size, _total_available, total_allocated = \
                 self._get_capacity_info(nfs_share)
             if target_share is not None:
                 if target_share_reserved > total_allocated:
index a55c1f666683e5ed545916e7933a8002f4828790..2da6499f0d3fa2d5caaf9e352567f5aba5973b2b 100644 (file)
@@ -386,8 +386,8 @@ class RBDDriver(driver.VolumeDriver):
         """
         parent_volume = self.rbd.Image(client.ioctx, volume_name)
         try:
-            pool, parent, snap = self._get_clone_info(parent_volume,
-                                                      volume_name)
+            _pool, parent, _snap = self._get_clone_info(parent_volume,
+                                                        volume_name)
         finally:
             parent_volume.close()
 
@@ -440,8 +440,8 @@ class RBDDriver(driver.VolumeDriver):
             try:
                 # First flatten source volume if required.
                 if flatten_parent:
-                    pool, parent, snap = self._get_clone_info(src_volume,
-                                                              src_name)
+                    _pool, parent, snap = self._get_clone_info(src_volume,
+                                                               src_name)
                     # Flatten source volume
                     LOG.debug("flattening source volume %s" % (src_name))
                     src_volume.flatten()
@@ -639,9 +639,9 @@ class RBDDriver(driver.VolumeDriver):
                     raise exception.VolumeIsBusy(volume_name=volume_name)
 
                 # Determine if this volume is itself a clone
-                pool, parent, parent_snap = self._get_clone_info(rbd_image,
-                                                                 volume_name,
-                                                                 clone_snap)
+                _pool, parent, parent_snap = self._get_clone_info(rbd_image,
+                                                                  volume_name,
+                                                                  clone_snap)
             finally:
                 rbd_image.close()
 
@@ -780,7 +780,7 @@ class RBDDriver(driver.VolumeDriver):
         if image_location is None or not self._is_cloneable(
                 image_location, image_meta):
             return ({}, False)
-        prefix, pool, image, snapshot = self._parse_location(image_location)
+        _prefix, pool, image, snapshot = self._parse_location(image_location)
         self._clone(volume, pool, image, snapshot)
         self._resize(volume)
         return {'provider_location': None}, True
index b93e204fd85f6293c214df00705872a98bb05193..c3388d24645987b05501b83ed9be3c313bffb543 100644 (file)
@@ -1274,7 +1274,7 @@ class HP3PARCommon(object):
 
             type_id = volume.get('volume_type_id', None)
 
-            hp3par_keys, qos, volume_type, vvs_name = self.get_type_info(
+            hp3par_keys, qos, _volume_type, vvs_name = self.get_type_info(
                 type_id)
 
             name = volume.get('display_name', None)
@@ -1633,7 +1633,7 @@ class HP3PARCommon(object):
                            " to %(new_cpg)s") %
                          {'volume_name': volume_name,
                           'old_cpg': old_cpg, 'new_cpg': new_cpg})
-                response, body = self.client.modifyVolume(
+                _response, body = self.client.modifyVolume(
                     volume_name,
                     {'action': 6,
                      'tuneOperation': 1,
@@ -1696,7 +1696,7 @@ class HP3PARCommon(object):
             self.validate_persona(new_persona)
 
         if host is not None:
-            (host_type, host_id, host_cpg) = (
+            (host_type, host_id, _host_cpg) = (
                 host['capabilities']['location_info']).split(':')
 
             if not (host_type == 'HP3PARDriver'):
index 1c5adadac84646cad914ee2f7ea55d1d07a1e6a6..7d289604acf15de6f367c69fa2358a1cab3b354c 100644 (file)
@@ -258,7 +258,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
                 LOG.info(_("Need to remove FC Zone, building initiator "
                          "target map"))
 
-                target_wwns, init_targ_map, numPaths = \
+                target_wwns, init_targ_map, _numPaths = \
                     self._build_initiator_target_map(connector)
 
                 info['data'] = {'target_wwn': target_wwns,
@@ -296,7 +296,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
                     init_targ_map[initiator] += fabric['target_port_wwn_list']
                     init_targ_map[initiator] = list(set(
                         init_targ_map[initiator]))
-                    for target in init_targ_map[initiator]:
+                    for _target in init_targ_map[initiator]:
                         numPaths += 1
             target_wwns = list(set(target_wwns))
         else:
index e05796ee5b4132e1af757bfc20baf08d190d5e93..2af9c4ccfeb7283fe1233d18288dfb15d0d677ee 100644 (file)
@@ -55,7 +55,7 @@ class SheepdogDriver(driver.VolumeDriver):
             #NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
             #  gives short output, but for compatibility reason we won't
             #  use it and just check if 'running' is in the output.
-            (out, err) = self._execute('collie', 'cluster', 'info')
+            (out, _err) = self._execute('collie', 'cluster', 'info')
             if 'status: running' not in out:
                 exception_message = (_("Sheepdog is not working: %s") % out)
                 raise exception.VolumeBackendAPIException(
index be56afb7c0ff3d3ea3276d5621974466790a7578..aa8abbfa6f87e85024faa224b6472393bd498e1e 100644 (file)
@@ -532,7 +532,7 @@ class SolidFireDriver(SanISCSIDriver):
 
     def create_cloned_volume(self, volume, src_vref):
         """Create a clone of an existing volume."""
-        (data, sfaccount, model) = self._do_clone_volume(
+        (_data, _sfaccount, model) = self._do_clone_volume(
             src_vref['id'],
             src_vref['project_id'],
             volume)
@@ -605,14 +605,14 @@ class SolidFireDriver(SanISCSIDriver):
         restore at which time we'll rework this appropriately.
 
         """
-        (data, sfaccount, model) = self._do_clone_volume(
+        (_data, _sfaccount, _model) = self._do_clone_volume(
             snapshot['volume_id'],
             snapshot['project_id'],
             snapshot)
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Create a volume from the specified snapshot."""
-        (data, sfaccount, model) = self._do_clone_volume(
+        (_data, _sfaccount, model) = self._do_clone_volume(
             snapshot['id'],
             snapshot['project_id'],
             volume)
index a43489b67c09fee7b400467456fa87a826c271ad..718d1914c7139028797b9fecc935d1027232e9e1 100644 (file)
@@ -148,7 +148,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile):
         param_list = {'dcPath': data_center_name, 'dsName': datastore_name}
         base_url = base_url + '?' + urllib.urlencode(param_list)
         _urlparse = urlparse.urlparse(base_url)
-        scheme, netloc, path, params, query, fragment = _urlparse
+        scheme, netloc, path, _params, query, _fragment = _urlparse
         if scheme == 'http':
             conn = httplib.HTTPConnection(netloc)
         elif scheme == 'https':
@@ -211,7 +211,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile):
         # Prepare the http connection to the vmdk url
         cookies = session.vim.client.options.transport.cookiejar
         _urlparse = urlparse.urlparse(url)
-        scheme, netloc, path, params, query, fragment = _urlparse
+        scheme, netloc, path, _params, query, _fragment = _urlparse
         if scheme == 'http':
             conn = httplib.HTTPConnection(netloc)
         elif scheme == 'https':
index a5c77efbed7bddf5c0783c29a1bf13353c810137..50f92080cb5b112abd902f6ed33c9ff49339c3d7 100644 (file)
@@ -1127,7 +1127,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
 
             if disk_conversion:
                 # Clone the temporary backing for disk type conversion.
-                (host, rp, folder, summary) = self._select_ds_for_volume(
+                (host, _rp, _folder, summary) = self._select_ds_for_volume(
                     volume)
                 datastore = summary.datastore
                 LOG.debug("Cloning temporary backing: %s for disk type "
@@ -1163,7 +1163,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         """
         try:
             # find host in which to create the volume
-            (host, rp, folder, summary) = self._select_ds_for_volume(volume)
+            (_host, rp, folder, summary) = self._select_ds_for_volume(volume)
         except error_util.VimException as excep:
             err_msg = (_("Exception in _select_ds_for_volume: "
                          "%s."), excep)
@@ -1646,7 +1646,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                   {'name': name,
                    'path': tmp_file_path})
 
-        (host, rp, folder, summary) = self._select_ds_for_volume(volume)
+        (_host, rp, folder, summary) = self._select_ds_for_volume(volume)
         LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.",
                   {'ds': summary.name,
                    'name': name})
@@ -1708,7 +1708,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         renamed = False
         try:
             # Find datastore for clone.
-            (host, rp, folder, summary) = self._select_ds_for_volume(volume)
+            (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
 
             disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
@@ -1981,7 +1981,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         datastore = None
         if not clone_type == volumeops.LINKED_CLONE_TYPE:
             # Pick a datastore where to create the full clone under any host
-            (host, rp, folder, summary) = self._select_ds_for_volume(volume)
+            (_host, _rp, _folder, summary) = self._select_ds_for_volume(volume)
             datastore = summary.datastore
         clone = self.volumeops.clone_backing(volume['name'], backing,
                                              snapshot, clone_type, datastore)
index c2385c20544f247a31479c82326fff5e1cf9ca81..4a10ea75bd98d6b6b4195ca9f7c51879730eff5a 100644 (file)
@@ -323,7 +323,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
 
     def _get_vpsa_volume_name(self, name):
         """Return VPSA's name for the volume."""
-        (vol_name, size) = self._get_vpsa_volume_name_and_size(name)
+        (vol_name, _size) = self._get_vpsa_volume_name_and_size(name)
         return vol_name
 
     def _get_volume_cg_name(self, name):
index a7021ded5863a45ede6227a91c438cd8b6e349da..b8ffa2ab308985e95487704b984db70ac6c11c66 100644 (file)
--- a/pylintrc
+++ b/pylintrc
@@ -29,3 +29,7 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
 max-public-methods=100
 min-public-methods=0
 max-args=6
+
+[Variables]
+
+dummy-variables-rgx=_