From ca2be0a73e4a55783bfceb7f45f7f3796e359434 Mon Sep 17 00:00:00 2001
From: Rajesh Tailor <rajesh.tailor@nttdata.com>
Date: Mon, 8 Jun 2015 04:52:49 -0700
Subject: [PATCH] Fix Bad indentation pylint issues

Fixed Bad indentation (warning code W0311) issues in cinder project.

Closes-Bug: 1462992
Change-Id: Idf3c036b1826ed5c0efa2e13ddf289e6880323a6
---
 cinder/api/contrib/quotas.py                  | 10 +--
 cinder/backup/manager.py                      | 16 ++--
 cinder/consistencygroup/api.py                | 10 +--
 cinder/exception.py                           |  6 +-
 .../api/contrib/test_volume_type_access.py    |  2 +-
 cinder/tests/unit/test_glusterfs.py           | 28 +++----
 cinder/tests/unit/test_gpfs.py                | 10 +--
 cinder/tests/unit/test_rbd.py                 | 84 +++++++++----------
 cinder/tests/unit/test_utils.py               |  2 +-
 cinder/tests/unit/test_vmware_volumeops.py    |  2 +-
 cinder/volume/drivers/datera.py               | 36 ++++----
 cinder/volume/drivers/emc/emc_vmax_common.py  |  2 +-
 cinder/volume/drivers/hitachi/hnas_backend.py |  2 +-
 cinder/volume/drivers/huawei/smartx.py        |  2 +-
 .../drivers/netapp/dataontap/block_7mode.py   | 10 +--
 .../drivers/netapp/dataontap/nfs_7mode.py     |  6 +-
 cinder/volume/drivers/netapp/utils.py         | 44 +++++-----
 .../volume/drivers/san/hp/hp_3par_common.py   | 22 ++---
 cinder/volume/drivers/scality.py              |  2 +-
 cinder/volume/drivers/vmware/vmdk.py          | 70 ++++++++--------
 cinder/volume/drivers/zfssa/zfssaiscsi.py     |  2 +-
 cinder/volume/flows/manager/create_volume.py  |  6 +-
 cinder/volume/manager.py                      |  7 +-
 cinder/volume/targets/tgt.py                  |  8 +-
 24 files changed, 194 insertions(+), 195 deletions(-)

diff --git a/cinder/api/contrib/quotas.py b/cinder/api/contrib/quotas.py
index c6614071b..ff4faee39 100644
--- a/cinder/api/contrib/quotas.py
+++ b/cinder/api/contrib/quotas.py
@@ -390,11 +390,11 @@ class QuotaSetsController(wsgi.Controller):
         # If the project which is being deleted has allocated part of its quota
         # to its subprojects, then subprojects' quotas should be deleted first.
         for key, value in project_quotas.items():
-                if 'allocated' in project_quotas[key].keys():
-                    if project_quotas[key]['allocated'] != 0:
-                        msg = _("About to delete child projects having "
-                                "non-zero quota. This should not be performed")
-                        raise webob.exc.HTTPBadRequest(explanation=msg)
+            if 'allocated' in project_quotas[key].keys():
+                if project_quotas[key]['allocated'] != 0:
+                    msg = _("About to delete child projects having "
+                            "non-zero quota. This should not be performed")
+                    raise webob.exc.HTTPBadRequest(explanation=msg)
 
         if parent_id:
             # Get the children of the project which the token is scoped to in
diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py
index 47ee1d7bc..ca238583a 100644
--- a/cinder/backup/manager.py
+++ b/cinder/backup/manager.py
@@ -281,14 +281,14 @@ class BackupManager(manager.SchedulerDependentManager):
         for attachment in attachments:
             if (attachment['attached_host'] == self.host and
                     attachment['instance_uuid'] is None):
-                        try:
-                            mgr.detach_volume(ctxt, volume['id'],
-                                              attachment['id'])
-                        except Exception:
-                            LOG.exception(_LE("Detach attachment %(attach_id)s"
-                                              " failed."),
-                                          {'attach_id': attachment['id']},
-                                          resource=volume)
+                try:
+                    mgr.detach_volume(ctxt, volume['id'],
+                                      attachment['id'])
+                except Exception:
+                    LOG.exception(_LE("Detach attachment %(attach_id)s"
+                                      " failed."),
+                                  {'attach_id': attachment['id']},
+                                  resource=volume)
 
     def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
         # NOTE(xyang): If the service crashes or gets restarted during the
diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py
index ebf4d8cc8..5df29bf42 100644
--- a/cinder/consistencygroup/api.py
+++ b/cinder/consistencygroup/api.py
@@ -531,11 +531,11 @@ class API(base.Base):
 
         if (not name and not description and not add_volumes_new and
                 not remove_volumes_new):
-                msg = (_("Cannot update consistency group %(group_id)s "
-                         "because no valid name, description, add_volumes, "
-                         "or remove_volumes were provided.") %
-                       {'group_id': group.id})
-                raise exception.InvalidConsistencyGroup(reason=msg)
+            msg = (_("Cannot update consistency group %(group_id)s "
+                     "because no valid name, description, add_volumes, "
+                     "or remove_volumes were provided.") %
+                   {'group_id': group.id})
+            raise exception.InvalidConsistencyGroup(reason=msg)
 
         fields = {'updated_at': timeutils.utcnow()}
 
diff --git a/cinder/exception.py b/cinder/exception.py
index 5e0715e0e..adaa75540 100644
--- a/cinder/exception.py
+++ b/cinder/exception.py
@@ -932,9 +932,9 @@ class ViolinBackendErrNotFound(CinderException):
 
 # ZFSSA NFS driver exception.
 class WebDAVClientError(CinderException):
-        message = _("The WebDAV request failed. Reason: %(msg)s, "
-                    "Return code/reason: %(code)s, Source Volume: %(src)s, "
-                    "Destination Volume: %(dst)s, Method: %(method)s.")
+    message = _("The WebDAV request failed. Reason: %(msg)s, "
+                "Return code/reason: %(code)s, Source Volume: %(src)s, "
+                "Destination Volume: %(dst)s, Method: %(method)s.")
 
 
 # XtremIO Drivers
diff --git a/cinder/tests/unit/api/contrib/test_volume_type_access.py b/cinder/tests/unit/api/contrib/test_volume_type_access.py
index 270686868..14d265ccb 100644
--- a/cinder/tests/unit/api/contrib/test_volume_type_access.py
+++ b/cinder/tests/unit/api/contrib/test_volume_type_access.py
@@ -63,7 +63,7 @@ def _has_type_access(type_id, project_id):
     for access in ACCESS_LIST:
         if access['volume_type_id'] == type_id and \
            access['project_id'] == project_id:
-                return True
+            return True
     return False
 
 
diff --git a/cinder/tests/unit/test_glusterfs.py b/cinder/tests/unit/test_glusterfs.py
index 510a7cbbd..c24d19d95 100644
--- a/cinder/tests/unit/test_glusterfs.py
+++ b/cinder/tests/unit/test_glusterfs.py
@@ -874,7 +874,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv = self._driver
 
         with mock.patch.object(drv, '_execute') as mock_execute,\
-                mock.patch.object(drv, '_ensure_share_mounted') as \
+            mock.patch.object(drv, '_ensure_share_mounted') as \
                 mock_ensure_share_mounted:
             volume = DumbVolume()
             volume['id'] = self.VOLUME_UUID
@@ -890,23 +890,23 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv = self._driver
 
         with mock.patch.object(drv, '_read_file') as mock_read_file:
-                hashed = drv._get_hash_str(self.TEST_EXPORT1)
-                volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
-                                                   hashed,
-                                                   self.VOLUME_UUID)
-                info_path = '%s%s' % (volume_path, '.info')
+            hashed = drv._get_hash_str(self.TEST_EXPORT1)
+            volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
+                                               hashed,
+                                               self.VOLUME_UUID)
+            info_path = '%s%s' % (volume_path, '.info')
 
-                mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
-                    {'id': self.VOLUME_UUID}
+            mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
+                {'id': self.VOLUME_UUID}
 
-                volume = DumbVolume()
-                volume['id'] = self.VOLUME_UUID
-                volume['name'] = 'volume-%s' % self.VOLUME_UUID
+            volume = DumbVolume()
+            volume['id'] = self.VOLUME_UUID
+            volume['name'] = 'volume-%s' % self.VOLUME_UUID
 
-                info = drv._read_info_file(info_path)
+            info = drv._read_info_file(info_path)
 
-                self.assertEqual('volume-%s' % self.VOLUME_UUID,
-                                 info[self.VOLUME_UUID])
+            self.assertEqual('volume-%s' % self.VOLUME_UUID,
+                             info[self.VOLUME_UUID])
 
     def test_extend_volume(self):
         drv = self._driver
diff --git a/cinder/tests/unit/test_gpfs.py b/cinder/tests/unit/test_gpfs.py
index 23698e25e..e47ef0ead 100644
--- a/cinder/tests/unit/test_gpfs.py
+++ b/cinder/tests/unit/test_gpfs.py
@@ -83,11 +83,11 @@ class GPFSDriverTestCase(test.TestCase):
         CONF.gpfs_images_dir = self.images_dir
 
     def _cleanup(self, images_dir, volumes_path):
-            try:
-                os.rmdir(images_dir)
-                os.rmdir(volumes_path)
-            except OSError:
-                pass
+        try:
+            os.rmdir(images_dir)
+            os.rmdir(volumes_path)
+        except OSError:
+            pass
 
     def test_different(self):
         self.assertTrue(gpfs._different((True, False)))
diff --git a/cinder/tests/unit/test_rbd.py b/cinder/tests/unit/test_rbd.py
index dbcd6fa12..a3d2b68e1 100644
--- a/cinder/tests/unit/test_rbd.py
+++ b/cinder/tests/unit/test_rbd.py
@@ -1236,27 +1236,27 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
             mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
             mock.patch.object(self.volume.driver, '_resize') \
                 as mock_resize:
-                mock_is_cloneable.side_effect = cloneable_side_effect
-                image_loc = ('rbd://bee/bi/bo/bum',
-                             [{'url': 'rbd://bee/bi/bo/bum'},
-                              {'url': 'rbd://fee/fi/fo/fum'}])
-                volume = {'name': 'vol1'}
-                image_meta = mock.sentinel.image_meta
-                image_service = mock.sentinel.image_service
-
-                actual = driver.clone_image(self.context,
-                                            volume,
-                                            image_loc,
-                                            image_meta,
-                                            image_service)
-
-                self.assertEqual(expected, actual)
-                self.assertEqual(2, mock_is_cloneable.call_count)
-                mock_clone.assert_called_once_with(volume,
-                                                   'fi', 'fo', 'fum')
-                mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
-                                                     image_meta)
-                mock_resize.assert_called_once_with(volume)
+            mock_is_cloneable.side_effect = cloneable_side_effect
+            image_loc = ('rbd://bee/bi/bo/bum',
+                         [{'url': 'rbd://bee/bi/bo/bum'},
+                          {'url': 'rbd://fee/fi/fo/fum'}])
+            volume = {'name': 'vol1'}
+            image_meta = mock.sentinel.image_meta
+            image_service = mock.sentinel.image_service
+
+            actual = driver.clone_image(self.context,
+                                        volume,
+                                        image_loc,
+                                        image_meta,
+                                        image_service)
+
+            self.assertEqual(expected, actual)
+            self.assertEqual(2, mock_is_cloneable.call_count)
+            mock_clone.assert_called_once_with(volume,
+                                               'fi', 'fo', 'fum')
+            mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
+                                                 image_meta)
+            mock_resize.assert_called_once_with(volume)
 
     def test_clone_multilocation_failure(self):
         expected = ({}, False)
@@ -1267,24 +1267,24 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
             mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
             mock.patch.object(self.volume.driver, '_resize') \
                 as mock_resize:
-                image_loc = ('rbd://bee/bi/bo/bum',
-                             [{'url': 'rbd://bee/bi/bo/bum'},
-                              {'url': 'rbd://fee/fi/fo/fum'}])
-
-                volume = {'name': 'vol1'}
-                image_meta = mock.sentinel.image_meta
-                image_service = mock.sentinel.image_service
-                actual = driver.clone_image(self.context,
-                                            volume,
-                                            image_loc,
-                                            image_meta,
-                                            image_service)
-
-                self.assertEqual(expected, actual)
-                self.assertEqual(2, mock_is_cloneable.call_count)
-                mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
-                                                  image_meta)
-                mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
-                                                  image_meta)
-                self.assertFalse(mock_clone.called)
-                self.assertFalse(mock_resize.called)
+            image_loc = ('rbd://bee/bi/bo/bum',
+                         [{'url': 'rbd://bee/bi/bo/bum'},
+                          {'url': 'rbd://fee/fi/fo/fum'}])
+
+            volume = {'name': 'vol1'}
+            image_meta = mock.sentinel.image_meta
+            image_service = mock.sentinel.image_service
+            actual = driver.clone_image(self.context,
+                                        volume,
+                                        image_loc,
+                                        image_meta,
+                                        image_service)
+
+            self.assertEqual(expected, actual)
+            self.assertEqual(2, mock_is_cloneable.call_count)
+            mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
+                                              image_meta)
+            mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
+                                              image_meta)
+            self.assertFalse(mock_clone.called)
+            self.assertFalse(mock_resize.called)
diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py
index 8ac4d4fb4..2fa980937 100644
--- a/cinder/tests/unit/test_utils.py
+++ b/cinder/tests/unit/test_utils.py
@@ -1376,7 +1376,7 @@ class IsBlkDeviceTestCase(test.TestCase):
 
 
 class WrongException(Exception):
-        pass
+    pass
 
 
 class TestRetryDecorator(test.TestCase):
diff --git a/cinder/tests/unit/test_vmware_volumeops.py b/cinder/tests/unit/test_vmware_volumeops.py
index f21712623..d1b98c823 100644
--- a/cinder/tests/unit/test_vmware_volumeops.py
+++ b/cinder/tests/unit/test_vmware_volumeops.py
@@ -945,7 +945,7 @@ class VolumeOpsTestCase(test.TestCase):
                 del obj.eagerlyScrub
             elif (type == "ns0:VirtualMachineRelocateSpec" and
                   delete_disk_attribute):
-                    del obj.disk
+                del obj.disk
             else:
                 pass
             return obj
diff --git a/cinder/volume/drivers/datera.py b/cinder/volume/drivers/datera.py
index e221dd652..d710a6a59 100644
--- a/cinder/volume/drivers/datera.py
+++ b/cinder/volume/drivers/datera.py
@@ -59,25 +59,25 @@ CONF.register_opts(d_opts)
 
 
 def _authenticated(func):
-        """Ensure the driver is authenticated to make a request.
+    """Ensure the driver is authenticated to make a request.
 
-        In do_setup() we fetch an auth token and store it. If that expires when
-        we do API request, we'll fetch a new one.
-        """
-        def func_wrapper(self, *args, **kwargs):
-            try:
-                return func(self, *args, **kwargs)
-            except exception.NotAuthorized:
-                # Prevent recursion loop. After the self arg is the
-                # resource_type arg from _issue_api_request(). If attempt to
-                # login failed, we should just give up.
-                if args[0] == 'login':
-                    raise
-
-                # Token might've expired, get a new one, try again.
-                self._login()
-                return func(self, *args, **kwargs)
-        return func_wrapper
+    In do_setup() we fetch an auth token and store it. If that expires when
+    we do API request, we'll fetch a new one.
+    """
+    def func_wrapper(self, *args, **kwargs):
+        try:
+            return func(self, *args, **kwargs)
+        except exception.NotAuthorized:
+            # Prevent recursion loop. After the self arg is the
+            # resource_type arg from _issue_api_request(). If attempt to
+            # login failed, we should just give up.
+            if args[0] == 'login':
+                raise
+
+            # Token might've expired, get a new one, try again.
+            self._login()
+            return func(self, *args, **kwargs)
+    return func_wrapper
 
 
 class DateraDriver(san.SanISCSIDriver):
diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py
index 35ff88afc..880552138 100644
--- a/cinder/volume/drivers/emc/emc_vmax_common.py
+++ b/cinder/volume/drivers/emc/emc_vmax_common.py
@@ -3830,7 +3830,7 @@ class EMCVMAXCommon(object):
                         self.conn, storageConfigservice,
                         memberInstanceNames, None, extraSpecs)
                     for volumeRef in volumes:
-                            volumeRef['status'] = 'deleted'
+                        volumeRef['status'] = 'deleted'
             except Exception:
                 for volumeRef in volumes:
                     volumeRef['status'] = 'error_deleting'
diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py
index a0374549b..ac841924a 100644
--- a/cinder/volume/drivers/hitachi/hnas_backend.py
+++ b/cinder/volume/drivers/hitachi/hnas_backend.py
@@ -126,7 +126,7 @@ class HnasBackend(object):
        """
         if (self.drv_configs['ssh_enabled'] == 'True' and
                 self.drv_configs['cluster_admin_ip0'] is not None):
-                util = 'SMU ' + cmd
+            util = 'SMU ' + cmd
         else:
             out, err = utils.execute(cmd,
                                      "-version",
diff --git a/cinder/volume/drivers/huawei/smartx.py b/cinder/volume/drivers/huawei/smartx.py
index fd408d7b5..bd6b504e1 100644
--- a/cinder/volume/drivers/huawei/smartx.py
+++ b/cinder/volume/drivers/huawei/smartx.py
@@ -134,7 +134,7 @@ class SmartX(object):
             else:
                 opts['LUNType'] = 1
         if opts['thick_provisioning_support'] == 'true':
-                opts['LUNType'] = 0
+            opts['LUNType'] = 0
 
         return opts
 
diff --git a/cinder/volume/drivers/netapp/dataontap/block_7mode.py b/cinder/volume/drivers/netapp/dataontap/block_7mode.py
index c93fdab17..cf3a749aa 100644
--- a/cinder/volume/drivers/netapp/dataontap/block_7mode.py
+++ b/cinder/volume/drivers/netapp/dataontap/block_7mode.py
@@ -169,11 +169,11 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
                         initiator_info.get_child_content('initiator-name'))
 
                 if initiator_set == initiator_set_for_igroup:
-                        igroup = initiator_group_info.get_child_content(
-                            'initiator-group-name')
-                        lun_id = initiator_group_info.get_child_content(
-                            'lun-id')
-                        return igroup, lun_id
+                    igroup = initiator_group_info.get_child_content(
+                        'initiator-group-name')
+                    lun_id = initiator_group_info.get_child_content(
+                        'lun-id')
+                    return igroup, lun_id
 
         return None, None
 
diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py b/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
index 3e776f225..b572a1b04 100644
--- a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
+++ b/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
@@ -191,9 +191,9 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
                           " on this storage family and ontap version.")))
         volume_type = na_utils.get_volume_type_from_volume(volume)
         if volume_type and 'qos_spec_id' in volume_type:
-                raise exception.ManageExistingVolumeTypeMismatch(
-                    reason=_("QoS specs are not supported"
-                             " on this storage family and ONTAP version."))
+            raise exception.ManageExistingVolumeTypeMismatch(
+                reason=_("QoS specs are not supported"
+                         " on this storage family and ONTAP version."))
 
     def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
         """Set QoS policy on backend from volume type information."""
diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py
index 0dbb1016c..60a6f4450 100644
--- a/cinder/volume/drivers/netapp/utils.py
+++ b/cinder/volume/drivers/netapp/utils.py
@@ -138,35 +138,35 @@ def round_down(value, precision):
 def log_extra_spec_warnings(extra_specs):
     for spec in (set(extra_specs.keys() if extra_specs else []) &
                  set(OBSOLETE_SSC_SPECS.keys())):
-            LOG.warning(_LW('Extra spec %(old)s is obsolete.  Use %(new)s '
-                            'instead.'), {'old': spec,
-                                          'new': OBSOLETE_SSC_SPECS[spec]})
+        LOG.warning(_LW('Extra spec %(old)s is obsolete.  Use %(new)s '
+                        'instead.'), {'old': spec,
+                                      'new': OBSOLETE_SSC_SPECS[spec]})
     for spec in (set(extra_specs.keys() if extra_specs else []) &
                  set(DEPRECATED_SSC_SPECS.keys())):
-            LOG.warning(_LW('Extra spec %(old)s is deprecated.  Use %(new)s '
-                            'instead.'), {'old': spec,
-                                          'new': DEPRECATED_SSC_SPECS[spec]})
+        LOG.warning(_LW('Extra spec %(old)s is deprecated.  Use %(new)s '
+                        'instead.'), {'old': spec,
+                                      'new': DEPRECATED_SSC_SPECS[spec]})
 
 
 def get_iscsi_connection_properties(lun_id, volume, iqn,
                                     address, port):
 
-        properties = {}
-        properties['target_discovered'] = False
-        properties['target_portal'] = '%s:%s' % (address, port)
-        properties['target_iqn'] = iqn
-        properties['target_lun'] = int(lun_id)
-        properties['volume_id'] = volume['id']
-        auth = volume['provider_auth']
-        if auth:
-            (auth_method, auth_username, auth_secret) = auth.split()
-            properties['auth_method'] = auth_method
-            properties['auth_username'] = auth_username
-            properties['auth_password'] = auth_secret
-        return {
-            'driver_volume_type': 'iscsi',
-            'data': properties,
-        }
+    properties = {}
+    properties['target_discovered'] = False
+    properties['target_portal'] = '%s:%s' % (address, port)
+    properties['target_iqn'] = iqn
+    properties['target_lun'] = int(lun_id)
+    properties['volume_id'] = volume['id']
+    auth = volume['provider_auth']
+    if auth:
+        (auth_method, auth_username, auth_secret) = auth.split()
+        properties['auth_method'] = auth_method
+        properties['auth_username'] = auth_username
+        properties['auth_password'] = auth_secret
+    return {
+        'driver_volume_type': 'iscsi',
+        'data': properties,
+    }
 
 
 def validate_qos_spec(qos_spec):
diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py
index 98afbcbc8..32d51e097 100644
--- a/cinder/volume/drivers/san/hp/hp_3par_common.py
+++ b/cinder/volume/drivers/san/hp/hp_3par_common.py
@@ -729,17 +729,17 @@ class HP3PARCommon(object):
                 if (not _convert_to_base and
                     isinstance(ex, hpexceptions.HTTPForbidden) and
                         ex.get_code() == 150):
-                        # Error code 150 means 'invalid operation: Cannot grow
-                        # this type of volume'.
-                        # Suppress raising this exception because we can
-                        # resolve it by converting it into a base volume.
-                        # Afterwards, extending the volume should succeed, or
-                        # fail with a different exception/error code.
-                        ex_ctxt.reraise = False
-                        model_update = self._extend_volume(
-                            volume, volume_name,
-                            growth_size_mib,
-                            _convert_to_base=True)
+                    # Error code 150 means 'invalid operation: Cannot grow
+                    # this type of volume'.
+                    # Suppress raising this exception because we can
+                    # resolve it by converting it into a base volume.
+                    # Afterwards, extending the volume should succeed, or
+                    # fail with a different exception/error code.
+                    ex_ctxt.reraise = False
+                    model_update = self._extend_volume(
+                        volume, volume_name,
+                        growth_size_mib,
+                        _convert_to_base=True)
                 else:
                     LOG.error(_LE("Error extending volume: %(vol)s. "
                                   "Exception: %(ex)s"),
diff --git a/cinder/volume/drivers/scality.py b/cinder/volume/drivers/scality.py
index e14d0a611..c339a7685 100644
--- a/cinder/volume/drivers/scality.py
+++ b/cinder/volume/drivers/scality.py
@@ -125,7 +125,7 @@ class ScalityDriver(remotefs_drv.RemoteFSSnapDriver):
             parts = mount.split()
             if (parts[0].endswith('fuse') and
                     parts[1].rstrip('/') == mount_path):
-                        return True
+                return True
         return False
 
     @lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py
index 579caa451..d70240337 100644
--- a/cinder/volume/drivers/vmware/vmdk.py
+++ b/cinder/volume/drivers/vmware/vmdk.py
@@ -1634,13 +1634,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                        'backup_id': backup['id']})
             self._download_vmdk(context, volume, backing, tmp_file_path)
             with open(tmp_file_path, "rb") as tmp_file:
-                    LOG.debug("Calling backup service to backup file: %s.",
-                              tmp_file_path)
-                    backup_service.backup(backup, tmp_file)
-                    LOG.debug("Created backup: %(backup_id)s for volume: "
-                              "%(name)s.",
-                              {'backup_id': backup['id'],
-                               'name': volume['name']})
+                LOG.debug("Calling backup service to backup file: %s.",
+                          tmp_file_path)
+                backup_service.backup(backup, tmp_file)
+                LOG.debug("Created backup: %(backup_id)s for volume: "
+                          "%(name)s.",
+                          {'backup_id': backup['id'],
+                           'name': volume['name']})
 
     def _create_backing_from_stream_optimized_file(
             self, context, name, volume, tmp_file_path, file_size_bytes):
@@ -1787,35 +1787,35 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         tmp_vmdk_name = uuidutils.generate_uuid()
         with self._temporary_file(suffix=".vmdk",
                                   prefix=tmp_vmdk_name) as tmp_file_path:
-                LOG.debug("Using temporary file: %(tmp_path)s for restoring "
-                          "backup: %(backup_id)s.",
-                          {'tmp_path': tmp_file_path,
-                           'backup_id': backup['id']})
-                with open(tmp_file_path, "wb") as tmp_file:
-                    LOG.debug("Calling backup service to restore backup: "
-                              "%(backup_id)s to file: %(tmp_path)s.",
-                              {'backup_id': backup['id'],
-                               'tmp_path': tmp_file_path})
-                    backup_service.restore(backup, volume['id'], tmp_file)
-                    LOG.debug("Backup: %(backup_id)s restored to file: "
-                              "%(tmp_path)s.",
-                              {'backup_id': backup['id'],
-                               'tmp_path': tmp_file_path})
-                self._restore_backing(context, volume, backing, tmp_file_path,
-                                      backup['size'] * units.Gi)
-
-                if backup['size'] < volume['size']:
-                    # Current backing size is backup size.
-                    LOG.debug("Backup size: %(backup_size)d is less than "
-                              "volume size: %(vol_size)d; extending volume.",
-                              {'backup_size': backup['size'],
-                               'vol_size': volume['size']})
-                    self.extend_volume(volume, volume['size'])
-
-                LOG.debug("Backup: %(backup_id)s restored to volume: "
-                          "%(name)s.",
+            LOG.debug("Using temporary file: %(tmp_path)s for restoring "
+                      "backup: %(backup_id)s.",
+                      {'tmp_path': tmp_file_path,
+                       'backup_id': backup['id']})
+            with open(tmp_file_path, "wb") as tmp_file:
+                LOG.debug("Calling backup service to restore backup: "
+                          "%(backup_id)s to file: %(tmp_path)s.",
                           {'backup_id': backup['id'],
-                           'name': volume['name']})
+                           'tmp_path': tmp_file_path})
+                backup_service.restore(backup, volume['id'], tmp_file)
+                LOG.debug("Backup: %(backup_id)s restored to file: "
+                          "%(tmp_path)s.",
+                          {'backup_id': backup['id'],
+                           'tmp_path': tmp_file_path})
+            self._restore_backing(context, volume, backing, tmp_file_path,
+                                  backup['size'] * units.Gi)
+
+            if backup['size'] < volume['size']:
+                # Current backing size is backup size.
+                LOG.debug("Backup size: %(backup_size)d is less than "
+                          "volume size: %(vol_size)d; extending volume.",
+                          {'backup_size': backup['size'],
+                           'vol_size': volume['size']})
+                self.extend_volume(volume, volume['size'])
+
+            LOG.debug("Backup: %(backup_id)s restored to volume: "
+                      "%(name)s.",
+                      {'backup_id': backup['id'],
+                       'name': volume['name']})
 
 
 class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py
index f446f924c..389182b0b 100644
--- a/cinder/volume/drivers/zfssa/zfssaiscsi.py
+++ b/cinder/volume/drivers/zfssa/zfssaiscsi.py
@@ -324,7 +324,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
 
         if ('origin' in lun2del and
                 lun2del['origin']['project'] == lcfg.zfssa_cache_project):
-                self._check_origin(lun2del, volume['name'])
+            self._check_origin(lun2del, volume['name'])
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot of a volume.
diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
index 7a455861d..4fdc35428 100644
--- a/cinder/volume/flows/manager/create_volume.py
+++ b/cinder/volume/flows/manager/create_volume.py
@@ -772,9 +772,9 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                 # Update the newly created volume db entry before we clone it
                 # for the image-volume creation.
                 if model_update:
-                        volume_ref = self.db.volume_update(context,
-                                                           volume_ref['id'],
-                                                           model_update)
+                    volume_ref = self.db.volume_update(context,
+                                                       volume_ref['id'],
+                                                       model_update)
                 self.manager._create_image_cache_volume_entry(internal_context,
                                                               volume_ref,
                                                               image_id,
diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
index 7a7d0cc18..dfc7c20dd 100644
--- a/cinder/volume/manager.py
+++ b/cinder/volume/manager.py
@@ -3077,10 +3077,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                             metadata['key']: metadata['value']
                             for metadata in volume.get('volume_metadata')}
                 elif key == 'admin_metadata':
-                        model_update_new[key] = {
-                            metadata['key']: metadata['value']
-                            for metadata in volume.get(
-                                'volume_admin_metadata')}
+                    model_update_new[key] = {
+                        metadata['key']: metadata['value']
+                        for metadata in volume.get('volume_admin_metadata')}
                 else:
                     model_update_new[key] = volume[key]
             self.db.volume_update(ctxt.elevated(), new_volume['id'],
diff --git a/cinder/volume/targets/tgt.py b/cinder/volume/targets/tgt.py
index 5d6ac6132..912c1eec3 100644
--- a/cinder/volume/targets/tgt.py
+++ b/cinder/volume/targets/tgt.py
@@ -145,10 +145,10 @@ class TgtAdm(iscsi.ISCSITarget):
 
     @utils.retry(putils.ProcessExecutionError)
     def _do_tgt_update(self, name):
-            (out, err) = utils.execute('tgt-admin', '--update', name,
-                                       run_as_root=True)
-            LOG.debug("StdOut from tgt-admin --update: %s", out)
-            LOG.debug("StdErr from tgt-admin --update: %s", err)
+        (out, err) = utils.execute('tgt-admin', '--update', name,
+                                   run_as_root=True)
+        LOG.debug("StdOut from tgt-admin --update: %s", out)
+        LOG.debug("StdErr from tgt-admin --update: %s", err)
 
     def create_iscsi_target(self, name, tid, lun, path,
                             chap_auth=None, **kwargs):
-- 
2.45.2