]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Replace xrange() with six.moves.range()
authorVictor Stinner <vstinner@redhat.com>
Mon, 25 May 2015 15:08:00 +0000 (17:08 +0200)
committerVictor Stinner <vstinner@redhat.com>
Tue, 16 Jun 2015 08:46:40 +0000 (10:46 +0200)
Add "from six.moves import range" to replace the builtin range()
function with six.moves.range() to always create an iterator, instead of
creating a temporary list.

Replace "xrange" with "range".

Don't add the import for ranges of 1024 items or less.

Blueprint cinder-python3
Change-Id: If618b4e810e444f7eb6592bb2398805e9d14d548

24 files changed:
cinder/backup/drivers/ceph.py
cinder/common/sqlalchemyutils.py
cinder/image/glance.py
cinder/tests/unit/api/openstack/test_wsgi.py
cinder/tests/unit/api/v1/test_limits.py
cinder/tests/unit/api/v2/test_limits.py
cinder/tests/unit/api/v2/test_volumes.py
cinder/tests/unit/backup/drivers/test_backup_nfs.py
cinder/tests/unit/scheduler/test_chance_weigher.py
cinder/tests/unit/scheduler/test_host_manager.py
cinder/tests/unit/test_backup_ceph.py
cinder/tests/unit/test_backup_swift.py
cinder/tests/unit/test_db_api.py
cinder/tests/unit/test_utils.py
cinder/tests/unit/test_volume.py
cinder/volume/drivers/emc/emc_vnx_cli.py
cinder/volume/drivers/eqlx.py
cinder/volume/drivers/netapp/eseries/host_mapper.py
cinder/volume/drivers/srb.py
cinder/volume/drivers/violin/v6000_common.py
cinder/volume/drivers/violin/v6000_fcp.py
cinder/volume/drivers/violin/v6000_iscsi.py
cinder/volume/utils.py
doc/ext/cinder_todo.py

index 001c39a17ef497bc2f62d016bda8c1b6934b633b..62de904099a7bb688de8d89b996f0d2c3c47f13e 100644 (file)
@@ -54,6 +54,7 @@ from oslo_log import log as logging
 from oslo_utils import encodeutils
 from oslo_utils import excutils
 from oslo_utils import units
+from six.moves import range
 
 from cinder.backup import driver
 from cinder import exception
@@ -284,7 +285,7 @@ class CephBackupDriver(driver.BackupDriver):
             else:
                 zeroes = '\0' * length
                 chunks = int(length / self.chunk_size)
-                for chunk in xrange(0, chunks):
+                for chunk in range(0, chunks):
                     LOG.debug("Writing zeroes chunk %d", chunk)
                     volume.write(zeroes)
                     volume.flush()
@@ -306,7 +307,7 @@ class CephBackupDriver(driver.BackupDriver):
         LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
                   {'chunks': chunks, 'bytes': self.chunk_size})
 
-        for chunk in xrange(0, chunks):
+        for chunk in range(0, chunks):
             before = time.time()
             data = src.read(self.chunk_size)
             # If we have reach end of source, discard any extraneous bytes from
index 4ff6711d2bfddd9bd995a97e6683e2f68c9397da..4ff438ce365ac0e0e0190072d32475bf8689d8d4 100644 (file)
@@ -19,6 +19,7 @@
 """Implementation of paginate query."""
 
 from oslo_log import log as logging
+from six.moves import range
 import sqlalchemy
 
 from cinder import exception
@@ -100,9 +101,9 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
 
         # Build up an array of sort criteria as in the docstring
         criteria_list = []
-        for i in xrange(0, len(sort_keys)):
+        for i in range(0, len(sort_keys)):
             crit_attrs = []
-            for j in xrange(0, i):
+            for j in range(0, i):
                 model_attr = getattr(model, sort_keys[j])
                 crit_attrs.append((model_attr == marker_values[j]))
 
index 6825b1e6ec942e328cb81474015598314a891318..893e1ab3c88ebd0d2d250e16b628ca16580001fc 100644 (file)
@@ -32,6 +32,7 @@ from oslo_log import log as logging
 from oslo_serialization import jsonutils
 from oslo_utils import timeutils
 import six
+from six.moves import range
 from six.moves import urllib
 
 from cinder import exception
@@ -169,7 +170,7 @@ class GlanceClientWrapper(object):
                       glanceclient.exc.CommunicationError)
         num_attempts = 1 + CONF.glance_num_retries
 
-        for attempt in xrange(1, num_attempts + 1):
+        for attempt in range(1, num_attempts + 1):
             client = self.client or self._create_onetime_client(context,
                                                                 version)
             try:
index 82f49ea78245699ed913cbfcc69b0f3632083530..bcf52a5eee919e1cdbece954cce8e9526882b07e 100644 (file)
@@ -114,7 +114,7 @@ class RequestTest(test.TestCase):
         self.assertIsNone(request.cached_resource_by_id('r-0'))
 
         resources = []
-        for x in xrange(3):
+        for x in range(3):
             resources.append({'id': 'r-%s' % x})
 
         # Cache an empty list of resources using the default name
@@ -161,7 +161,7 @@ class RequestTest(test.TestCase):
 
         r = wsgi.Request.blank('/foo')
         resources = []
-        for x in xrange(3):
+        for x in range(3):
             resources.append({'id': 'id%s' % x})
 
         # Store 2
index 9d0f31c334eec7ea5fe79c4c0f757f82c97c4b17..d87cb424fc5f524a454fa8beb2761a15a1c2d014 100644 (file)
@@ -23,6 +23,7 @@ from lxml import etree
 from oslo_serialization import jsonutils
 import six
 from six.moves import http_client
+from six.moves import range
 import webob
 
 from cinder.api.v1 import limits
@@ -401,7 +402,7 @@ class LimiterTest(BaseLimitTestSuite):
 
     def _check(self, num, verb, url, username=None):
         """Check and yield results from checks."""
-        for x in xrange(num):
+        for x in range(num):
             yield self.limiter.check_for_delay(verb, url, username)[0]
 
     def _check_sum(self, num, verb, url, username=None):
index 9d529107992fde62f801cc85f41008b2418b1ba6..9f3a02755292d5134b9ae1198388921b5a637529 100644 (file)
@@ -23,6 +23,7 @@ from lxml import etree
 from oslo_serialization import jsonutils
 import six
 from six.moves import http_client
+from six.moves import range
 import webob
 
 from cinder.api.v2 import limits
@@ -406,7 +407,7 @@ class LimiterTest(BaseLimitTestSuite):
 
     def _check(self, num, verb, url, username=None):
         """Check and yield results from checks."""
-        for x in xrange(num):
+        for x in range(num):
             yield self.limiter.check_for_delay(verb, url, username)[0]
 
     def _check_sum(self, num, verb, url, username=None):
index 941300667c7eaf0d8911bc5502b3305184685f3f..bf675d2d7e27fa17136649b4e1bbe3661010a3a1 100644 (file)
@@ -21,6 +21,7 @@ import mock
 from oslo_config import cfg
 from oslo_utils import timeutils
 import six
+from six.moves import range
 from six.moves import urllib
 import webob
 
@@ -1118,7 +1119,7 @@ class VolumeApiTest(test.TestCase):
                                 filters=None,
                                 viewable_admin_meta=False):
             vols = [stubs.stub_volume(i)
-                    for i in xrange(CONF.osapi_max_limit)]
+                    for i in range(CONF.osapi_max_limit)]
             if limit is None or limit >= len(vols):
                 return vols
             return vols[:limit]
@@ -1136,7 +1137,7 @@ class VolumeApiTest(test.TestCase):
                                  filters=None,
                                  viewable_admin_meta=False):
             vols = [stubs.stub_volume(i)
-                    for i in xrange(100)]
+                    for i in range(100)]
             if limit is None or limit >= len(vols):
                 return vols
             return vols[:limit]
@@ -1154,7 +1155,7 @@ class VolumeApiTest(test.TestCase):
                                  filters=None,
                                  viewable_admin_meta=False):
             vols = [stubs.stub_volume(i)
-                    for i in xrange(CONF.osapi_max_limit + 100)]
+                    for i in range(CONF.osapi_max_limit + 100)]
             if limit is None or limit >= len(vols):
                 return vols
             return vols[:limit]
index 183dd59f9e81e0ed8dc8131c00ee51df0478de46..8c340fca0a08526a86dba56a359b6d1993235fcb 100644 (file)
@@ -289,7 +289,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
                          mock.Mock(return_value=mock_remotefsclient))
         # Remove tempdir.
         self.addCleanup(shutil.rmtree, self.temp_dir)
-        for _i in xrange(0, 128):
+        for _i in range(0, 128):
             self.volume_file.write(os.urandom(1024))
 
     def test_backup_uncompressed(self):
index 6c47daab06a254a9c44eaa6341479bc663459926..e80a66d3f09966505830e944c6b5f9e64c9e659e 100644 (file)
@@ -62,6 +62,6 @@ class ChanceWeigherTestCase(test.TestCase):
         # the ChanceWeigher
         hm = host_manager.HostManager()
         fake_hosts = [host_manager.HostState('fake_host%s' % x)
-                      for x in xrange(1, 5)]
+                      for x in range(1, 5)]
         weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher')
         self.assertEqual(4, len(weighed_hosts))
index 101151b7afc22d2bd1b3c98fad494faaebb6061e..61284590e8bd9558c0c0b7d4af3baa1ad548dfe1 100644 (file)
@@ -46,7 +46,7 @@ class HostManagerTestCase(test.TestCase):
         super(HostManagerTestCase, self).setUp()
         self.host_manager = host_manager.HostManager()
         self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
-                           for x in xrange(1, 5)]
+                           for x in range(1, 5)]
 
     def test_choose_host_filters_not_found(self):
         self.flags(scheduler_default_filters='FakeFilterClass3')
@@ -254,7 +254,7 @@ class HostManagerTestCase(test.TestCase):
         # Get host_state_map and make sure we have the first 4 hosts
         host_state_map = self.host_manager.host_state_map
         self.assertEqual(len(host_state_map), 3)
-        for i in xrange(3):
+        for i in range(3):
             volume_node = services[i]
             host = volume_node['host']
             self.assertEqual(host_state_map[host].service, volume_node)
@@ -280,7 +280,7 @@ class HostManagerTestCase(test.TestCase):
         # down, host4 is missing capabilities)
         host_state_map = self.host_manager.host_state_map
         self.assertEqual(len(host_state_map), 2)
-        for i in xrange(2):
+        for i in range(2):
             volume_node = services[i]
             host = volume_node['host']
             self.assertEqual(host_state_map[host].service,
index f65f3a48780a02977dcaba94de5fd56432d24a8f..5e8e83d4f39fcebd6392fb3b5244dc8973e95249 100644 (file)
@@ -24,6 +24,7 @@ from oslo_concurrency import processutils
 from oslo_log import log as logging
 from oslo_serialization import jsonutils
 import six
+from six.moves import range
 
 from cinder.backup import driver
 from cinder.backup.drivers import ceph
@@ -171,7 +172,7 @@ class BackupCephTestCase(test.TestCase):
         # Create a file with some data in it.
         self.volume_file = tempfile.NamedTemporaryFile()
         self.addCleanup(self.volume_file.close)
-        for _i in xrange(0, self.num_chunks):
+        for _i in range(0, self.num_chunks):
             data = os.urandom(self.chunk_size)
             self.checksum.update(data)
             self.volume_file.write(data)
@@ -285,7 +286,7 @@ class BackupCephTestCase(test.TestCase):
 
             checksum = hashlib.sha256()
             test_file.seek(0)
-            for _c in xrange(0, self.num_chunks):
+            for _c in range(0, self.num_chunks):
                 checksum.update(test_file.read(self.chunk_size))
 
             # Ensure the files are equal
@@ -350,7 +351,7 @@ class BackupCephTestCase(test.TestCase):
 
             checksum = hashlib.sha256()
             test_file.seek(0)
-            for _c in xrange(0, self.num_chunks):
+            for _c in range(0, self.num_chunks):
                 checksum.update(test_file.read(self.chunk_size))
 
             # Ensure the files are equal
@@ -627,7 +628,7 @@ class BackupCephTestCase(test.TestCase):
 
                     checksum = hashlib.sha256()
                     test_file.seek(0)
-                    for _c in xrange(0, self.num_chunks):
+                    for _c in range(0, self.num_chunks):
                         checksum.update(test_file.read(self.chunk_size))
 
                     # Ensure the files are equal
index 3f0afe3b1599ba3584c36025b2d9a14804995764..01840f8e5dbb438327a9313683960b10a7df3cec 100644 (file)
@@ -90,7 +90,7 @@ class BackupSwiftTestCase(test.TestCase):
         self.addCleanup(self.volume_file.close)
         # Remove tempdir.
         self.addCleanup(shutil.rmtree, self.temp_dir)
-        for _i in xrange(0, 128):
+        for _i in range(0, 128):
             self.volume_file.write(os.urandom(1024))
 
     def test_backup_swift_url(self):
index 2fc79fb2c14cee9b3d7d91610934488a45ff4000..74cb105b7e57d117c830c500bce8749686cf2fac 100644 (file)
@@ -268,34 +268,34 @@ class DBAPIVolumeTestCase(BaseTest):
         self.assertEqual(attachment['attached_host'], host_name)
 
     def test_volume_data_get_for_host(self):
-        for i in xrange(THREE):
-            for j in xrange(THREE):
+        for i in range(THREE):
+            for j in range(THREE):
                 db.volume_create(self.ctxt, {'host': 'h%d' % i,
                                              'size': ONE_HUNDREDS})
-        for i in xrange(THREE):
+        for i in range(THREE):
             self.assertEqual((THREE, THREE_HUNDREDS),
                              db.volume_data_get_for_host(
                                  self.ctxt, 'h%d' % i))
 
     def test_volume_data_get_for_host_for_multi_backend(self):
-        for i in xrange(THREE):
-            for j in xrange(THREE):
+        for i in range(THREE):
+            for j in range(THREE):
                 db.volume_create(self.ctxt, {'host':
                                              'h%d@lvmdriver-1#lvmdriver-1' % i,
                                              'size': ONE_HUNDREDS})
-        for i in xrange(THREE):
+        for i in range(THREE):
             self.assertEqual((THREE, THREE_HUNDREDS),
                              db.volume_data_get_for_host(
                                  self.ctxt, 'h%d@lvmdriver-1' % i))
 
     def test_volume_data_get_for_project(self):
-        for i in xrange(THREE):
-            for j in xrange(THREE):
+        for i in range(THREE):
+            for j in range(THREE):
                 db.volume_create(self.ctxt, {'project_id': 'p%d' % i,
                                              'size': ONE_HUNDREDS,
                                              'host': 'h-%d-%d' % (i, j),
                                              })
-        for i in xrange(THREE):
+        for i in range(THREE):
             self.assertEqual((THREE, THREE_HUNDREDS),
                              db.volume_data_get_for_project(
                                  self.ctxt, 'p%d' % i))
@@ -349,7 +349,7 @@ class DBAPIVolumeTestCase(BaseTest):
     def test_volume_get_all(self):
         volumes = [db.volume_create(self.ctxt,
                    {'host': 'h%d' % i, 'size': i})
-                   for i in xrange(3)]
+                   for i in range(3)]
         self._assertEqualListsOfObjects(volumes, db.volume_get_all(
                                         self.ctxt, None, None, ['host'], None))
 
@@ -366,10 +366,10 @@ class DBAPIVolumeTestCase(BaseTest):
 
     def test_volume_get_all_by_host(self):
         volumes = []
-        for i in xrange(3):
+        for i in range(3):
             volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
-                            for j in xrange(3)])
-        for i in xrange(3):
+                            for j in range(3)])
+        for i in range(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_host(
                                             self.ctxt, 'h%d' % i))
@@ -377,7 +377,7 @@ class DBAPIVolumeTestCase(BaseTest):
     def test_volume_get_all_by_host_with_pools(self):
         volumes = []
         vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'})
-                               for j in xrange(3)]
+                               for j in range(3)]
         vol_on_host_w_pool = [db.volume_create(
             self.ctxt, {'host': 'foo#pool0'})]
         volumes.append((vol_on_host_wo_pool +
@@ -424,10 +424,10 @@ class DBAPIVolumeTestCase(BaseTest):
 
     def test_volume_get_all_by_group(self):
         volumes = []
-        for i in xrange(3):
+        for i in range(3):
             volumes.append([db.volume_create(self.ctxt, {
-                'consistencygroup_id': 'g%d' % i}) for j in xrange(3)])
-        for i in xrange(3):
+                'consistencygroup_id': 'g%d' % i}) for j in range(3)])
+        for i in range(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_group(
                                             self.ctxt, 'g%d' % i))
@@ -462,10 +462,10 @@ class DBAPIVolumeTestCase(BaseTest):
 
     def test_volume_get_all_by_project(self):
         volumes = []
-        for i in xrange(3):
+        for i in range(3):
             volumes.append([db.volume_create(self.ctxt, {
-                'project_id': 'p%d' % i}) for j in xrange(3)])
-        for i in xrange(3):
+                'project_id': 'p%d' % i}) for j in range(3)])
+        for i in range(3):
             self._assertEqualListsOfObjects(volumes[i],
                                             db.volume_get_all_by_project(
                                             self.ctxt, 'p%d' % i, None,
@@ -567,21 +567,21 @@ class DBAPIVolumeTestCase(BaseTest):
                                       {'project_id': 'g1',
                                        'display_name': 'name_%d' % i,
                                        'size': 1})
-                     for i in xrange(2)])
+                     for i in range(2)])
         vols.extend([db.volume_create(self.ctxt,
                                       {'project_id': 'g1',
                                        'display_name': 'name_%d' % i,
                                        'size': 2})
-                     for i in xrange(2)])
+                     for i in range(2)])
         vols.extend([db.volume_create(self.ctxt,
                                       {'project_id': 'g1',
                                        'display_name': 'name_%d' % i})
-                     for i in xrange(2)])
+                     for i in range(2)])
         vols.extend([db.volume_create(self.ctxt,
                                       {'project_id': 'g2',
                                        'display_name': 'name_%d' % i,
                                        'size': 1})
-                     for i in xrange(2)])
+                     for i in range(2)])
 
         # By project, filter on size and name
         filters = {'size': '1'}
index 4631a723b9f7d10574457331ee6838027e8d0444..ecb8d7c384c0939abacb24dd7ea1aad1ca204004 100644 (file)
@@ -25,6 +25,7 @@ from oslo_config import cfg
 from oslo_utils import timeutils
 import paramiko
 import six
+from six.moves import range
 
 import cinder
 from cinder import exception
@@ -1479,7 +1480,7 @@ class TestRetryDecorator(test.TestCase):
 
             expected_sleep_arg = []
 
-            for i in xrange(retries):
+            for i in range(retries):
                 if i > 0:
                     interval *= backoff_rate
                     expected_sleep_arg.append(float(interval))
index 7b7fedfb91734065b0c6b54be8d4756ad2455a57..651a8174b7567d67e37324acaaff1456c7772304 100644 (file)
@@ -2790,7 +2790,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # FIXME(jdg): What is this actually testing?
         # We never call the internal _check method?
-        for _index in xrange(100):
+        for _index in range(100):
             tests_utils.create_volume(self.context, **self.volume_params)
         for volume_id in volume_ids:
             self.volume.delete_volume(self.context, volume_id)
@@ -5832,7 +5832,7 @@ class ISCSITestCase(DriverTestCase):
     def _attach_volume(self):
         """Attach volumes to an instance."""
         volume_id_list = []
-        for index in xrange(3):
+        for index in range(3):
             vol = {}
             vol['size'] = 0
             vol_ref = db.volume_create(self.context, vol)
index 88dfa60e7b7b1184ed998f55628e2480dfc502c8..f9702101bcfcca52faccd425ecfa5521d32d2776 100644 (file)
@@ -31,6 +31,7 @@ from oslo_serialization import jsonutils as json
 from oslo_utils import excutils
 from oslo_utils import timeutils
 import six
+from six.moves import range
 import taskflow.engines
 from taskflow.patterns import linear_flow
 from taskflow import task
@@ -1694,7 +1695,7 @@ class EMCVnxCliBase(object):
             LOG.info(_LI("initiator_auto_registration: False. "
                          "Initiator auto registration is not enabled. "
                          "Please register initiator manually."))
-        self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
+        self.hlu_set = set(range(1, self.max_luns_per_sg + 1))
         self._client = CommandLineHelper(self.configuration)
         conf_pools = self.configuration.safe_get("storage_vnx_pool_names")
         self.storage_pools = self._get_managed_storage_pools(conf_pools)
index 3a5843ea51d1273c3e27d77cacbb61d32d9836cc..622982bda2aa5daa540806be2003f9be347e2b1d 100644 (file)
@@ -25,6 +25,7 @@ from oslo_concurrency import processutils
 from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import excutils
+from six.moves import range
 
 from cinder import exception
 from cinder.i18n import _, _LE, _LW, _LI
@@ -344,7 +345,7 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
         """
         lines = [line for line in out if line != '']
         # Every record has 2 lines
-        for i in xrange(0, len(lines), 2):
+        for i in range(0, len(lines), 2):
             try:
                 int(lines[i][0])
                 # sanity check
index e464da451088daea6f8595512c90759f31d0d059..507531fa05f7249fb0b953b8c774f12e83b388f4 100644 (file)
@@ -21,6 +21,7 @@ import collections
 import random
 
 from oslo_log import log as logging
+from six.moves import range
 
 from cinder import exception
 from cinder.i18n import _
index 1ef094959e8e95fb626413185e8b9e718a430563..30305a677ea3cde9d9e144eb87c387f9f9bd4f08 100644 (file)
@@ -32,6 +32,7 @@ from oslo_log import log as logging
 from oslo_utils import excutils
 from oslo_utils import units
 import six
+from six.moves import range
 
 from cinder.brick.local_dev import lvm
 from cinder import exception
@@ -84,7 +85,7 @@ class retry(object):
             sleep_time = self._sleep_factor
             exc_info = None
 
-            for attempt in xrange(self._count):
+            for attempt in range(self._count):
                 if attempt != 0:
                     LOG.warning(_LW('Retrying failed call to %(func)s, '
                                     'attempt %(attempt)i.'),
index d308e9190b40e29f3115f9eae91a00f1e99890fb..c14dbfd9a5a4f8c79fc0b0515051b191da84dcfe 100644 (file)
@@ -536,7 +536,7 @@ class V6000Common(object):
             LOG.debug("Entering _wait_for_export_config loop: state=%s.",
                       state)
 
-            for node_id in xrange(2):
+            for node_id in range(2):
                 resp = mg_conns[node_id].basic.get_node_values(bn)
                 if state and len(resp.keys()):
                     status[node_id] = True
index cf4dc9cf9e5055c3f82795ee86d106edaec24ae3..1d9ecb1c0b0f1cb4304c8a670747586f7760b76d 100644 (file)
@@ -37,6 +37,7 @@ driver documentation for more information.
 
 from oslo_log import log as logging
 from oslo_utils import units
+from six.moves import range
 
 from cinder import context
 from cinder.db.sqlalchemy import models
@@ -505,7 +506,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
         output = []
         for w in wwns:
             output.append('wwn.{0}'.format(
-                ':'.join(w[x:x + 2] for x in xrange(0, len(w), 2))))
+                ':'.join(w[x:x + 2] for x in range(0, len(w), 2))))
         return output
 
     def _convert_wwns_vmem_to_openstack(self, wwns):
index c711417e4d7bb8eb168c8953d43928ec00d5f92c..908d2ae309bc0aacf6b51ebfef29619e5a861f4c 100644 (file)
@@ -588,7 +588,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
             LOG.debug("Entering _wait_for_targetstate loop: target=%s.",
                       target_name)
 
-            for node_id in xrange(2):
+            for node_id in range(2):
                 resp = mg_conns[node_id].basic.get_node_values(bn)
                 if len(resp.keys()):
                     status[node_id] = True
index aa14e7fc99e71f58d743be835df76596d86fda3d..dbfa4ee2a7b630d873b8900fcef15270b0d36793 100644 (file)
@@ -24,6 +24,7 @@ from oslo_log import log as logging
 from oslo_utils import strutils
 from oslo_utils import timeutils
 from oslo_utils import units
+from six.moves import range
 
 from cinder.brick.local_dev import lvm as brick_lvm
 from cinder import db
@@ -443,7 +444,7 @@ def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
 
     # then fill with random characters from all symbol groups
     symbols = ''.join(symbolgroups)
-    password.extend([random.choice(symbols) for _i in xrange(length)])
+    password.extend([random.choice(symbols) for _i in range(length)])
 
     # finally shuffle to ensure first x characters aren't from a
     # predictable group
index af2cfda56142d17c7a36163d044fd851debeabf2..bdd64532a3e761935a5353b915867f7788fafa71 100644 (file)
@@ -24,7 +24,7 @@ def process_todo_nodes(app, doctree, fromdocname):
     # remove the item that was added in the constructor, since I'm tired of
     # reading through docutils for the proper way to construct an empty list
     lists = []
-    for i in xrange(5):
+    for i in range(5):
         lists.append(nodes.bullet_list("", nodes.Text('', '')))
         lists[i].remove(lists[i][0])
         lists[i]['classes'].append('todo_list')