]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix Bad indentation pylint issues
authorRajesh Tailor <rajesh.tailor@nttdata.com>
Mon, 8 Jun 2015 11:52:49 +0000 (04:52 -0700)
committerRajesh Tailor <rajesh.tailor@nttdata.com>
Mon, 28 Sep 2015 06:04:26 +0000 (23:04 -0700)
Fixed Bad indentation (warning code W0311) issues in cinder project.

Closes-Bug: 1462992
Change-Id: Idf3c036b1826ed5c0efa2e13ddf289e6880323a6

24 files changed:
cinder/api/contrib/quotas.py
cinder/backup/manager.py
cinder/consistencygroup/api.py
cinder/exception.py
cinder/tests/unit/api/contrib/test_volume_type_access.py
cinder/tests/unit/test_glusterfs.py
cinder/tests/unit/test_gpfs.py
cinder/tests/unit/test_rbd.py
cinder/tests/unit/test_utils.py
cinder/tests/unit/test_vmware_volumeops.py
cinder/volume/drivers/datera.py
cinder/volume/drivers/emc/emc_vmax_common.py
cinder/volume/drivers/hitachi/hnas_backend.py
cinder/volume/drivers/huawei/smartx.py
cinder/volume/drivers/netapp/dataontap/block_7mode.py
cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
cinder/volume/drivers/netapp/utils.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/scality.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/zfssa/zfssaiscsi.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py
cinder/volume/targets/tgt.py

index c6614071bd2e9512981c5ba8855e3bb467db856e..ff4faee39f74d03bb75aab95bee0a70b72287e3c 100644 (file)
@@ -390,11 +390,11 @@ class QuotaSetsController(wsgi.Controller):
         # If the project which is being deleted has allocated part of its quota
         # to its subprojects, then subprojects' quotas should be deleted first.
         for key, value in project_quotas.items():
-                if 'allocated' in project_quotas[key].keys():
-                    if project_quotas[key]['allocated'] != 0:
-                        msg = _("About to delete child projects having "
-                                "non-zero quota. This should not be performed")
-                        raise webob.exc.HTTPBadRequest(explanation=msg)
+            if 'allocated' in project_quotas[key].keys():
+                if project_quotas[key]['allocated'] != 0:
+                    msg = _("About to delete child projects having "
+                            "non-zero quota. This should not be performed")
+                    raise webob.exc.HTTPBadRequest(explanation=msg)
 
         if parent_id:
             # Get the children of the project which the token is scoped to in
index 47ee1d7bc30b0296cde62384dedf38d3798b2e4e..ca238583a6f98b938c96a0e3aaf954166c0b78a4 100644 (file)
@@ -281,14 +281,14 @@ class BackupManager(manager.SchedulerDependentManager):
         for attachment in attachments:
             if (attachment['attached_host'] == self.host and
                     attachment['instance_uuid'] is None):
-                        try:
-                            mgr.detach_volume(ctxt, volume['id'],
-                                              attachment['id'])
-                        except Exception:
-                            LOG.exception(_LE("Detach attachment %(attach_id)s"
-                                              " failed."),
-                                          {'attach_id': attachment['id']},
-                                          resource=volume)
+                try:
+                    mgr.detach_volume(ctxt, volume['id'],
+                                      attachment['id'])
+                except Exception:
+                    LOG.exception(_LE("Detach attachment %(attach_id)s"
+                                      " failed."),
+                                  {'attach_id': attachment['id']},
+                                  resource=volume)
 
     def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
         # NOTE(xyang): If the service crashes or gets restarted during the
index ebf4d8cc85f236508e55a1ccea7d41fc0c553b10..5df29bf42d38a84f3d3fdd77561bf884a25aaa6b 100644 (file)
@@ -531,11 +531,11 @@ class API(base.Base):
 
         if (not name and not description and not add_volumes_new and
                 not remove_volumes_new):
-                msg = (_("Cannot update consistency group %(group_id)s "
-                         "because no valid name, description, add_volumes, "
-                         "or remove_volumes were provided.") %
-                       {'group_id': group.id})
-                raise exception.InvalidConsistencyGroup(reason=msg)
+            msg = (_("Cannot update consistency group %(group_id)s "
+                     "because no valid name, description, add_volumes, "
+                     "or remove_volumes were provided.") %
+                   {'group_id': group.id})
+            raise exception.InvalidConsistencyGroup(reason=msg)
 
         fields = {'updated_at': timeutils.utcnow()}
 
index 5e0715e0e21e3a905883d7af16e3ef2d7ab307e8..adaa7554000d4e7f66ac4d1235b49d82cadd7823 100644 (file)
@@ -932,9 +932,9 @@ class ViolinBackendErrNotFound(CinderException):
 
 # ZFSSA NFS driver exception.
 class WebDAVClientError(CinderException):
-        message = _("The WebDAV request failed. Reason: %(msg)s, "
-                    "Return code/reason: %(code)s, Source Volume: %(src)s, "
-                    "Destination Volume: %(dst)s, Method: %(method)s.")
+    message = _("The WebDAV request failed. Reason: %(msg)s, "
+                "Return code/reason: %(code)s, Source Volume: %(src)s, "
+                "Destination Volume: %(dst)s, Method: %(method)s.")
 
 
 # XtremIO Drivers
index 270686868183d1f766c17562ac366d80de19b6de..14d265ccb70b0db297770222fdc298d9ce44d9fe 100644 (file)
@@ -63,7 +63,7 @@ def _has_type_access(type_id, project_id):
     for access in ACCESS_LIST:
         if access['volume_type_id'] == type_id and \
            access['project_id'] == project_id:
-                return True
+            return True
     return False
 
 
index 510a7cbbdb469f971cb785091e940a2b85b029dc..c24d19d95ffd19f58ec80dc23ffb389cdcb056f9 100644 (file)
@@ -874,7 +874,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv = self._driver
 
         with mock.patch.object(drv, '_execute') as mock_execute,\
-                mock.patch.object(drv, '_ensure_share_mounted') as \
+            mock.patch.object(drv, '_ensure_share_mounted') as \
                 mock_ensure_share_mounted:
             volume = DumbVolume()
             volume['id'] = self.VOLUME_UUID
@@ -890,23 +890,23 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv = self._driver
 
         with mock.patch.object(drv, '_read_file') as mock_read_file:
-                hashed = drv._get_hash_str(self.TEST_EXPORT1)
-                volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
-                                                   hashed,
-                                                   self.VOLUME_UUID)
-                info_path = '%s%s' % (volume_path, '.info')
+            hashed = drv._get_hash_str(self.TEST_EXPORT1)
+            volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
+                                               hashed,
+                                               self.VOLUME_UUID)
+            info_path = '%s%s' % (volume_path, '.info')
 
-                mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
-                    {'id': self.VOLUME_UUID}
+            mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
+                {'id': self.VOLUME_UUID}
 
-                volume = DumbVolume()
-                volume['id'] = self.VOLUME_UUID
-                volume['name'] = 'volume-%s' % self.VOLUME_UUID
+            volume = DumbVolume()
+            volume['id'] = self.VOLUME_UUID
+            volume['name'] = 'volume-%s' % self.VOLUME_UUID
 
-                info = drv._read_info_file(info_path)
+            info = drv._read_info_file(info_path)
 
-                self.assertEqual('volume-%s' % self.VOLUME_UUID,
-                                 info[self.VOLUME_UUID])
+            self.assertEqual('volume-%s' % self.VOLUME_UUID,
+                             info[self.VOLUME_UUID])
 
     def test_extend_volume(self):
         drv = self._driver
index 23698e25ed335e686ed450c10d1b949223afa917..e47ef0ead7597a8a2f9dea148b3b20da0a4caa21 100644 (file)
@@ -83,11 +83,11 @@ class GPFSDriverTestCase(test.TestCase):
         CONF.gpfs_images_dir = self.images_dir
 
     def _cleanup(self, images_dir, volumes_path):
-            try:
-                os.rmdir(images_dir)
-                os.rmdir(volumes_path)
-            except OSError:
-                pass
+        try:
+            os.rmdir(images_dir)
+            os.rmdir(volumes_path)
+        except OSError:
+            pass
 
     def test_different(self):
         self.assertTrue(gpfs._different((True, False)))
index dbcd6fa121b8b855845c9a235a8511266164b058..a3d2b68e1d4eb51c7d041be0889ba34ec491372a 100644 (file)
@@ -1236,27 +1236,27 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
             mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
             mock.patch.object(self.volume.driver, '_resize') \
                 as mock_resize:
-                mock_is_cloneable.side_effect = cloneable_side_effect
-                image_loc = ('rbd://bee/bi/bo/bum',
-                             [{'url': 'rbd://bee/bi/bo/bum'},
-                              {'url': 'rbd://fee/fi/fo/fum'}])
-                volume = {'name': 'vol1'}
-                image_meta = mock.sentinel.image_meta
-                image_service = mock.sentinel.image_service
-
-                actual = driver.clone_image(self.context,
-                                            volume,
-                                            image_loc,
-                                            image_meta,
-                                            image_service)
-
-                self.assertEqual(expected, actual)
-                self.assertEqual(2, mock_is_cloneable.call_count)
-                mock_clone.assert_called_once_with(volume,
-                                                   'fi', 'fo', 'fum')
-                mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
-                                                     image_meta)
-                mock_resize.assert_called_once_with(volume)
+            mock_is_cloneable.side_effect = cloneable_side_effect
+            image_loc = ('rbd://bee/bi/bo/bum',
+                         [{'url': 'rbd://bee/bi/bo/bum'},
+                          {'url': 'rbd://fee/fi/fo/fum'}])
+            volume = {'name': 'vol1'}
+            image_meta = mock.sentinel.image_meta
+            image_service = mock.sentinel.image_service
+
+            actual = driver.clone_image(self.context,
+                                        volume,
+                                        image_loc,
+                                        image_meta,
+                                        image_service)
+
+            self.assertEqual(expected, actual)
+            self.assertEqual(2, mock_is_cloneable.call_count)
+            mock_clone.assert_called_once_with(volume,
+                                               'fi', 'fo', 'fum')
+            mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
+                                                 image_meta)
+            mock_resize.assert_called_once_with(volume)
 
     def test_clone_multilocation_failure(self):
         expected = ({}, False)
@@ -1267,24 +1267,24 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
             mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
             mock.patch.object(self.volume.driver, '_resize') \
                 as mock_resize:
-                image_loc = ('rbd://bee/bi/bo/bum',
-                             [{'url': 'rbd://bee/bi/bo/bum'},
-                              {'url': 'rbd://fee/fi/fo/fum'}])
-
-                volume = {'name': 'vol1'}
-                image_meta = mock.sentinel.image_meta
-                image_service = mock.sentinel.image_service
-                actual = driver.clone_image(self.context,
-                                            volume,
-                                            image_loc,
-                                            image_meta,
-                                            image_service)
-
-                self.assertEqual(expected, actual)
-                self.assertEqual(2, mock_is_cloneable.call_count)
-                mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
-                                                  image_meta)
-                mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
-                                                  image_meta)
-                self.assertFalse(mock_clone.called)
-                self.assertFalse(mock_resize.called)
+            image_loc = ('rbd://bee/bi/bo/bum',
+                         [{'url': 'rbd://bee/bi/bo/bum'},
+                          {'url': 'rbd://fee/fi/fo/fum'}])
+
+            volume = {'name': 'vol1'}
+            image_meta = mock.sentinel.image_meta
+            image_service = mock.sentinel.image_service
+            actual = driver.clone_image(self.context,
+                                        volume,
+                                        image_loc,
+                                        image_meta,
+                                        image_service)
+
+            self.assertEqual(expected, actual)
+            self.assertEqual(2, mock_is_cloneable.call_count)
+            mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
+                                              image_meta)
+            mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
+                                              image_meta)
+            self.assertFalse(mock_clone.called)
+            self.assertFalse(mock_resize.called)
index 8ac4d4fb4c768a20940dca8e6a69238d1dffe268..2fa980937b9f8150477e8425a786e82002c8741e 100644 (file)
@@ -1376,7 +1376,7 @@ class IsBlkDeviceTestCase(test.TestCase):
 
 
 class WrongException(Exception):
-        pass
+    pass
 
 
 class TestRetryDecorator(test.TestCase):
index f21712623cdf97c3de54f156b0734ab8769485ef..d1b98c823aa33e736e2d502275d92a2153d5e612 100644 (file)
@@ -945,7 +945,7 @@ class VolumeOpsTestCase(test.TestCase):
                 del obj.eagerlyScrub
             elif (type == "ns0:VirtualMachineRelocateSpec" and
                   delete_disk_attribute):
-                    del obj.disk
+                del obj.disk
             else:
                 pass
             return obj
index e221dd65258017cc2529290a1233dec2acefb35b..d710a6a59bc8a7a2766cce522ff544286182dd64 100644 (file)
@@ -59,25 +59,25 @@ CONF.register_opts(d_opts)
 
 
 def _authenticated(func):
-        """Ensure the driver is authenticated to make a request.
+    """Ensure the driver is authenticated to make a request.
 
-        In do_setup() we fetch an auth token and store it. If that expires when
-        we do API request, we'll fetch a new one.
-        """
-        def func_wrapper(self, *args, **kwargs):
-            try:
-                return func(self, *args, **kwargs)
-            except exception.NotAuthorized:
-                # Prevent recursion loop. After the self arg is the
-                # resource_type arg from _issue_api_request(). If attempt to
-                # login failed, we should just give up.
-                if args[0] == 'login':
-                    raise
-
-                # Token might've expired, get a new one, try again.
-                self._login()
-                return func(self, *args, **kwargs)
-        return func_wrapper
+    In do_setup() we fetch an auth token and store it. If that expires when
+    we do API request, we'll fetch a new one.
+    """
+    def func_wrapper(self, *args, **kwargs):
+        try:
+            return func(self, *args, **kwargs)
+        except exception.NotAuthorized:
+            # Prevent recursion loop. After the self arg is the
+            # resource_type arg from _issue_api_request(). If attempt to
+            # login failed, we should just give up.
+            if args[0] == 'login':
+                raise
+
+            # Token might've expired, get a new one, try again.
+            self._login()
+            return func(self, *args, **kwargs)
+    return func_wrapper
 
 
 class DateraDriver(san.SanISCSIDriver):
index 35ff88afcf67634cc4568921ddc0dd4c91ffd816..880552138a9221db92fa2a77adfb95b913ef5769 100644 (file)
@@ -3830,7 +3830,7 @@ class EMCVMAXCommon(object):
                         self.conn, storageConfigservice,
                         memberInstanceNames, None, extraSpecs)
                     for volumeRef in volumes:
-                            volumeRef['status'] = 'deleted'
+                        volumeRef['status'] = 'deleted'
             except Exception:
                 for volumeRef in volumes:
                     volumeRef['status'] = 'error_deleting'
index a0374549b355aeb8805d33406ac977886aba82d2..ac841924a8dfc5f5aa8130719c86e1b5928b3855 100644 (file)
@@ -126,7 +126,7 @@ class HnasBackend(object):
        """
         if (self.drv_configs['ssh_enabled'] == 'True' and
                 self.drv_configs['cluster_admin_ip0'] is not None):
-                util = 'SMU ' + cmd
+            util = 'SMU ' + cmd
         else:
             out, err = utils.execute(cmd,
                                      "-version",
index fd408d7b5441dee529cb1f0e970e91eee6ffea15..bd6b504e12d3c564b36f099abdc099befe556b74 100644 (file)
@@ -134,7 +134,7 @@ class SmartX(object):
             else:
                 opts['LUNType'] = 1
         if opts['thick_provisioning_support'] == 'true':
-                opts['LUNType'] = 0
+            opts['LUNType'] = 0
 
         return opts
 
index c93fdab17451d2fd87c88d223e33cf415e9e7308..cf3a749aa650ad718e21bfc8ce85b3d1d9fc33e0 100644 (file)
@@ -169,11 +169,11 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
                         initiator_info.get_child_content('initiator-name'))
 
                 if initiator_set == initiator_set_for_igroup:
-                        igroup = initiator_group_info.get_child_content(
-                            'initiator-group-name')
-                        lun_id = initiator_group_info.get_child_content(
-                            'lun-id')
-                        return igroup, lun_id
+                    igroup = initiator_group_info.get_child_content(
+                        'initiator-group-name')
+                    lun_id = initiator_group_info.get_child_content(
+                        'lun-id')
+                    return igroup, lun_id
 
         return None, None
 
index 3e776f225af1e74e813160509bcebd51fdf2d6be..b572a1b04de89e8b3aed9dcf900a47e9ab25fecb 100644 (file)
@@ -191,9 +191,9 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
                           " on this storage family and ontap version.")))
         volume_type = na_utils.get_volume_type_from_volume(volume)
         if volume_type and 'qos_spec_id' in volume_type:
-                raise exception.ManageExistingVolumeTypeMismatch(
-                    reason=_("QoS specs are not supported"
-                             " on this storage family and ONTAP version."))
+            raise exception.ManageExistingVolumeTypeMismatch(
+                reason=_("QoS specs are not supported"
+                         " on this storage family and ONTAP version."))
 
     def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
         """Set QoS policy on backend from volume type information."""
index 0dbb1016c6f847162957e5f95f5fc4f2c706269d..60a6f4450ae61e908525526792b3cb64c8031a9e 100644 (file)
@@ -138,35 +138,35 @@ def round_down(value, precision):
 def log_extra_spec_warnings(extra_specs):
     for spec in (set(extra_specs.keys() if extra_specs else []) &
                  set(OBSOLETE_SSC_SPECS.keys())):
-            LOG.warning(_LW('Extra spec %(old)s is obsolete.  Use %(new)s '
-                            'instead.'), {'old': spec,
-                                          'new': OBSOLETE_SSC_SPECS[spec]})
+        LOG.warning(_LW('Extra spec %(old)s is obsolete.  Use %(new)s '
+                        'instead.'), {'old': spec,
+                                      'new': OBSOLETE_SSC_SPECS[spec]})
     for spec in (set(extra_specs.keys() if extra_specs else []) &
                  set(DEPRECATED_SSC_SPECS.keys())):
-            LOG.warning(_LW('Extra spec %(old)s is deprecated.  Use %(new)s '
-                            'instead.'), {'old': spec,
-                                          'new': DEPRECATED_SSC_SPECS[spec]})
+        LOG.warning(_LW('Extra spec %(old)s is deprecated.  Use %(new)s '
+                        'instead.'), {'old': spec,
+                                      'new': DEPRECATED_SSC_SPECS[spec]})
 
 
 def get_iscsi_connection_properties(lun_id, volume, iqn,
                                     address, port):
 
-        properties = {}
-        properties['target_discovered'] = False
-        properties['target_portal'] = '%s:%s' % (address, port)
-        properties['target_iqn'] = iqn
-        properties['target_lun'] = int(lun_id)
-        properties['volume_id'] = volume['id']
-        auth = volume['provider_auth']
-        if auth:
-            (auth_method, auth_username, auth_secret) = auth.split()
-            properties['auth_method'] = auth_method
-            properties['auth_username'] = auth_username
-            properties['auth_password'] = auth_secret
-        return {
-            'driver_volume_type': 'iscsi',
-            'data': properties,
-        }
+    properties = {}
+    properties['target_discovered'] = False
+    properties['target_portal'] = '%s:%s' % (address, port)
+    properties['target_iqn'] = iqn
+    properties['target_lun'] = int(lun_id)
+    properties['volume_id'] = volume['id']
+    auth = volume['provider_auth']
+    if auth:
+        (auth_method, auth_username, auth_secret) = auth.split()
+        properties['auth_method'] = auth_method
+        properties['auth_username'] = auth_username
+        properties['auth_password'] = auth_secret
+    return {
+        'driver_volume_type': 'iscsi',
+        'data': properties,
+    }
 
 
 def validate_qos_spec(qos_spec):
index 98afbcbc85a406b9bbbdb6b88408b966b36e4bb4..32d51e097f5700e599383d97b5efc3a6ad9254c6 100644 (file)
@@ -729,17 +729,17 @@ class HP3PARCommon(object):
                 if (not _convert_to_base and
                     isinstance(ex, hpexceptions.HTTPForbidden) and
                         ex.get_code() == 150):
-                        # Error code 150 means 'invalid operation: Cannot grow
-                        # this type of volume'.
-                        # Suppress raising this exception because we can
-                        # resolve it by converting it into a base volume.
-                        # Afterwards, extending the volume should succeed, or
-                        # fail with a different exception/error code.
-                        ex_ctxt.reraise = False
-                        model_update = self._extend_volume(
-                            volume, volume_name,
-                            growth_size_mib,
-                            _convert_to_base=True)
+                    # Error code 150 means 'invalid operation: Cannot grow
+                    # this type of volume'.
+                    # Suppress raising this exception because we can
+                    # resolve it by converting it into a base volume.
+                    # Afterwards, extending the volume should succeed, or
+                    # fail with a different exception/error code.
+                    ex_ctxt.reraise = False
+                    model_update = self._extend_volume(
+                        volume, volume_name,
+                        growth_size_mib,
+                        _convert_to_base=True)
                 else:
                     LOG.error(_LE("Error extending volume: %(vol)s. "
                                   "Exception: %(ex)s"),
index e14d0a6116cdd6ebeb00e2e4063727b7e88bdb2a..c339a7685eb06efe7688fc40bf0855c1f9cc4fc9 100644 (file)
@@ -125,7 +125,7 @@ class ScalityDriver(remotefs_drv.RemoteFSSnapDriver):
             parts = mount.split()
             if (parts[0].endswith('fuse') and
                     parts[1].rstrip('/') == mount_path):
-                        return True
+                return True
         return False
 
     @lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
index 579caa4511c441833849fc8a478893f07adbb3f9..d7024033772b05e5257cb052bf554fa5eefdbb0c 100644 (file)
@@ -1634,13 +1634,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                        'backup_id': backup['id']})
             self._download_vmdk(context, volume, backing, tmp_file_path)
             with open(tmp_file_path, "rb") as tmp_file:
-                    LOG.debug("Calling backup service to backup file: %s.",
-                              tmp_file_path)
-                    backup_service.backup(backup, tmp_file)
-                    LOG.debug("Created backup: %(backup_id)s for volume: "
-                              "%(name)s.",
-                              {'backup_id': backup['id'],
-                               'name': volume['name']})
+                LOG.debug("Calling backup service to backup file: %s.",
+                          tmp_file_path)
+                backup_service.backup(backup, tmp_file)
+                LOG.debug("Created backup: %(backup_id)s for volume: "
+                          "%(name)s.",
+                          {'backup_id': backup['id'],
+                           'name': volume['name']})
 
     def _create_backing_from_stream_optimized_file(
             self, context, name, volume, tmp_file_path, file_size_bytes):
@@ -1787,35 +1787,35 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         tmp_vmdk_name = uuidutils.generate_uuid()
         with self._temporary_file(suffix=".vmdk",
                                   prefix=tmp_vmdk_name) as tmp_file_path:
-                LOG.debug("Using temporary file: %(tmp_path)s for restoring "
-                          "backup: %(backup_id)s.",
-                          {'tmp_path': tmp_file_path,
-                           'backup_id': backup['id']})
-                with open(tmp_file_path, "wb") as tmp_file:
-                    LOG.debug("Calling backup service to restore backup: "
-                              "%(backup_id)s to file: %(tmp_path)s.",
-                              {'backup_id': backup['id'],
-                               'tmp_path': tmp_file_path})
-                    backup_service.restore(backup, volume['id'], tmp_file)
-                    LOG.debug("Backup: %(backup_id)s restored to file: "
-                              "%(tmp_path)s.",
-                              {'backup_id': backup['id'],
-                               'tmp_path': tmp_file_path})
-                self._restore_backing(context, volume, backing, tmp_file_path,
-                                      backup['size'] * units.Gi)
-
-                if backup['size'] < volume['size']:
-                    # Current backing size is backup size.
-                    LOG.debug("Backup size: %(backup_size)d is less than "
-                              "volume size: %(vol_size)d; extending volume.",
-                              {'backup_size': backup['size'],
-                               'vol_size': volume['size']})
-                    self.extend_volume(volume, volume['size'])
-
-                LOG.debug("Backup: %(backup_id)s restored to volume: "
-                          "%(name)s.",
+            LOG.debug("Using temporary file: %(tmp_path)s for restoring "
+                      "backup: %(backup_id)s.",
+                      {'tmp_path': tmp_file_path,
+                       'backup_id': backup['id']})
+            with open(tmp_file_path, "wb") as tmp_file:
+                LOG.debug("Calling backup service to restore backup: "
+                          "%(backup_id)s to file: %(tmp_path)s.",
                           {'backup_id': backup['id'],
-                           'name': volume['name']})
+                           'tmp_path': tmp_file_path})
+                backup_service.restore(backup, volume['id'], tmp_file)
+                LOG.debug("Backup: %(backup_id)s restored to file: "
+                          "%(tmp_path)s.",
+                          {'backup_id': backup['id'],
+                           'tmp_path': tmp_file_path})
+            self._restore_backing(context, volume, backing, tmp_file_path,
+                                  backup['size'] * units.Gi)
+
+            if backup['size'] < volume['size']:
+                # Current backing size is backup size.
+                LOG.debug("Backup size: %(backup_size)d is less than "
+                          "volume size: %(vol_size)d; extending volume.",
+                          {'backup_size': backup['size'],
+                           'vol_size': volume['size']})
+                self.extend_volume(volume, volume['size'])
+
+            LOG.debug("Backup: %(backup_id)s restored to volume: "
+                      "%(name)s.",
+                      {'backup_id': backup['id'],
+                       'name': volume['name']})
 
 
 class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
index f446f924cab99fd4aa0bcba3e40bf8ac08d3cb28..389182b0bd55651bcc02f960a3e3b5112f510383 100644 (file)
@@ -324,7 +324,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
 
         if ('origin' in lun2del and
                 lun2del['origin']['project'] == lcfg.zfssa_cache_project):
-                self._check_origin(lun2del, volume['name'])
+            self._check_origin(lun2del, volume['name'])
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot of a volume.
index 7a455861d9430ceac0466d5ef5f4ea2d75c8a5b4..4fdc354285d5c7ea8fd4e7dc92b6d18bb3f3b676 100644 (file)
@@ -772,9 +772,9 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                 # Update the newly created volume db entry before we clone it
                 # for the image-volume creation.
                 if model_update:
-                        volume_ref = self.db.volume_update(context,
-                                                           volume_ref['id'],
-                                                           model_update)
+                    volume_ref = self.db.volume_update(context,
+                                                       volume_ref['id'],
+                                                       model_update)
                 self.manager._create_image_cache_volume_entry(internal_context,
                                                               volume_ref,
                                                               image_id,
index 7a7d0cc189711f5e6efc1910b2225872a9986d2c..dfc7c20dd8eacd5da420d181150ef2934a11b545 100644 (file)
@@ -3077,10 +3077,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                             metadata['key']: metadata['value']
                             for metadata in volume.get('volume_metadata')}
                 elif key == 'admin_metadata':
-                        model_update_new[key] = {
-                            metadata['key']: metadata['value']
-                            for metadata in volume.get(
-                                'volume_admin_metadata')}
+                    model_update_new[key] = {
+                        metadata['key']: metadata['value']
+                        for metadata in volume.get('volume_admin_metadata')}
                 else:
                     model_update_new[key] = volume[key]
             self.db.volume_update(ctxt.elevated(), new_volume['id'],
index 5d6ac6132ca0dad089341eb2a30f9ca8bd97cb39..912c1eec3b73f4158b0173738a52891291c72dbb 100644 (file)
@@ -145,10 +145,10 @@ class TgtAdm(iscsi.ISCSITarget):
 
     @utils.retry(putils.ProcessExecutionError)
     def _do_tgt_update(self, name):
-            (out, err) = utils.execute('tgt-admin', '--update', name,
-                                       run_as_root=True)
-            LOG.debug("StdOut from tgt-admin --update: %s", out)
-            LOG.debug("StdErr from tgt-admin --update: %s", err)
+        (out, err) = utils.execute('tgt-admin', '--update', name,
+                                   run_as_root=True)
+        LOG.debug("StdOut from tgt-admin --update: %s", out)
+        LOG.debug("StdErr from tgt-admin --update: %s", err)
 
     def create_iscsi_target(self, name, tid, lun, path,
                             chap_auth=None, **kwargs):