]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix bad indentation in netapp and san.hp volume drivers
authorDanny Al-Gaaf <danny.al-gaaf@bisect.de>
Wed, 13 Aug 2014 07:49:06 +0000 (09:49 +0200)
committerDanny Al-Gaaf <danny.al-gaaf@bisect.de>
Wed, 13 Aug 2014 07:49:06 +0000 (09:49 +0200)
Closes-Bug: #1356223

Change-Id: I4e9fb3c6b3e098998f41e266b2cfc51a4af1fc65
Signed-off-by: Danny Al-Gaaf <danny.al-gaaf@bisect.de>
cinder/volume/drivers/netapp/common.py
cinder/volume/drivers/netapp/eseries/client.py
cinder/volume/drivers/netapp/iscsi.py
cinder/volume/drivers/netapp/nfs.py
cinder/volume/drivers/netapp/ssc_utils.py
cinder/volume/drivers/netapp/utils.py
cinder/volume/drivers/san/hp/hp_3par_common.py

index d2e04804bdcefdd8ca39c01ed479977ec03b88d8..938a218c087a85bd87b6891dec778d0a51e44170 100644 (file)
@@ -143,8 +143,8 @@ class NetAppDriverFactory(object):
     def check_netapp_driver(location):
         """Checks if the driver requested is a netapp driver."""
         if location.find(".netapp.") == -1:
-                raise exception.InvalidInput(
-                    reason=_("Only loading netapp drivers supported."))
+            raise exception.InvalidInput(
+                reason=_("Only loading netapp drivers supported."))
 
 
 class Deprecated(driver.VolumeDriver):
index 112fb423be7f8d1fed164caf2d558241b580211e..09ac9986c2ecbb99b2ef227b997f7a4ccab1c91a 100644 (file)
@@ -124,13 +124,13 @@ class RestClient(WebserviceClient):
                   " verify: %(v)s, kwargs: %(k)s." % (params))
         url = self._get_resource_url(path, use_system, **kwargs)
         if self._content_type == 'json':
-                headers = {'Accept': 'application/json',
-                           'Content-Type': 'application/json'}
-                data = json.dumps(data) if data else None
-                res = self.invoke_service(method, url, data=data,
-                                          headers=headers,
-                                          timeout=timeout, verify=verify)
-                return res.json() if res.text else None
+            headers = {'Accept': 'application/json',
+                       'Content-Type': 'application/json'}
+            data = json.dumps(data) if data else None
+            res = self.invoke_service(method, url, data=data,
+                                      headers=headers,
+                                      timeout=timeout, verify=verify)
+            return res.json() if res.text else None
         else:
             raise exception.NetAppDriverException(
                 _("Content type not supported."))
index c7988c4353f916eb76d83c1f1f10584b5a8135ab..761c79d46e953a36bc8aa8e693dff109e3260edc 100644 (file)
@@ -1223,7 +1223,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
                     if avl_vol['name'] in self.volume_list:
                         return avl_vol
                 elif self._get_vol_option(avl_vol['name'], 'root') != 'true':
-                        return avl_vol
+                    return avl_vol
         return None
 
     def _get_igroup_by_initiator(self, initiator):
index 1a16415caa5afa7eede73dabfd5c22ab8a3d9a21..d16fd7568864385ad45bdb1d5bddc42ffc68cf80 100644 (file)
@@ -272,13 +272,13 @@ class NetAppNFSDriver(nfs.NfsDriver):
     def _spawn_clean_cache_job(self):
         """Spawns a clean task if not running."""
         if getattr(self, 'cleaning', None):
-                LOG.debug('Image cache cleaning in progress. Returning... ')
-                return
+            LOG.debug('Image cache cleaning in progress. Returning... ')
+            return
         else:
-                #set cleaning to True
-                self.cleaning = True
-                t = Timer(0, self._clean_image_cache)
-                t.start()
+            #set cleaning to True
+            self.cleaning = True
+            t = Timer(0, self._clean_image_cache)
+            t.start()
 
     def _clean_image_cache(self):
         """Clean the image cache files in cache of space crunch."""
@@ -352,9 +352,9 @@ class NetAppNFSDriver(nfs.NfsDriver):
                             return True
                         return False
                     if _do_delete():
-                            bytes_to_free = bytes_to_free - int(f[1])
-                            if bytes_to_free <= 0:
-                                return
+                        bytes_to_free = bytes_to_free - int(f[1])
+                        if bytes_to_free <= 0:
+                            return
 
     def _delete_file(self, path):
         """Delete file from disk and return result as boolean."""
index c6daa5bbbb7d168735f27b30132722825a64355a..d70a8dbd3ca507f44753061ff699ca4f7f09d3cd 100644 (file)
@@ -418,44 +418,44 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
 
         @utils.synchronized(lock_pr)
         def refresh_stale_ssc():
-                stale_vols = backend._update_stale_vols(reset=True)
-                LOG.info(_('Running stale ssc refresh job for %(server)s'
-                           ' and vserver %(vs)s')
-                         % {'server': na_server, 'vs': vserver})
-                # refreshing single volumes can create inconsistency
-                # hence doing manipulations on copy
-                ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
-                refresh_vols = set()
-                expired_vols = set()
-                for vol in stale_vols:
-                    name = vol.id['name']
-                    res = get_cluster_vols_with_ssc(na_server, vserver, name)
-                    if res:
-                        refresh_vols.add(res.pop())
-                    else:
-                        expired_vols.add(vol)
-                for vol in refresh_vols:
-                    for k in ssc_vols_copy:
-                        vol_set = ssc_vols_copy[k]
-                        vol_set.discard(vol)
-                        if k == "mirrored" and vol.mirror.get('mirrored'):
-                            vol_set.add(vol)
-                        if k == "dedup" and vol.sis.get('dedup'):
-                            vol_set.add(vol)
-                        if k == "compression" and vol.sis.get('compression'):
-                            vol_set.add(vol)
-                        if k == "thin" and vol.space.get('thin_provisioned'):
-                            vol_set.add(vol)
-                        if k == "all":
-                            vol_set.add(vol)
-                for vol in expired_vols:
-                    for k in ssc_vols_copy:
-                        vol_set = ssc_vols_copy[k]
-                        vol_set.discard(vol)
-                backend.refresh_ssc_vols(ssc_vols_copy)
-                LOG.info(_('Successfully completed stale refresh job for'
-                           ' %(server)s and vserver %(vs)s')
-                         % {'server': na_server, 'vs': vserver})
+            stale_vols = backend._update_stale_vols(reset=True)
+            LOG.info(_('Running stale ssc refresh job for %(server)s'
+                       ' and vserver %(vs)s')
+                     % {'server': na_server, 'vs': vserver})
+            # refreshing single volumes can create inconsistency
+            # hence doing manipulations on copy
+            ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
+            refresh_vols = set()
+            expired_vols = set()
+            for vol in stale_vols:
+                name = vol.id['name']
+                res = get_cluster_vols_with_ssc(na_server, vserver, name)
+                if res:
+                    refresh_vols.add(res.pop())
+                else:
+                    expired_vols.add(vol)
+            for vol in refresh_vols:
+                for k in ssc_vols_copy:
+                    vol_set = ssc_vols_copy[k]
+                    vol_set.discard(vol)
+                    if k == "mirrored" and vol.mirror.get('mirrored'):
+                        vol_set.add(vol)
+                    if k == "dedup" and vol.sis.get('dedup'):
+                        vol_set.add(vol)
+                    if k == "compression" and vol.sis.get('compression'):
+                        vol_set.add(vol)
+                    if k == "thin" and vol.space.get('thin_provisioned'):
+                        vol_set.add(vol)
+                    if k == "all":
+                        vol_set.add(vol)
+            for vol in expired_vols:
+                for k in ssc_vols_copy:
+                    vol_set = ssc_vols_copy[k]
+                    vol_set.discard(vol)
+            backend.refresh_ssc_vols(ssc_vols_copy)
+            LOG.info(_('Successfully completed stale refresh job for'
+                       ' %(server)s and vserver %(vs)s')
+                     % {'server': na_server, 'vs': vserver})
 
         refresh_stale_ssc()
     finally:
@@ -503,8 +503,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
         raise exception.InvalidInput(reason=_("Backend server not NaServer."))
     delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
     if getattr(backend, 'ssc_job_running', None):
-            LOG.warn(_('ssc job in progress. Returning... '))
-            return
+        LOG.warn(_('ssc job in progress. Returning... '))
+        return
     elif (getattr(backend, 'ssc_run_time', None) is None or
           (backend.ssc_run_time and
            timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
@@ -515,8 +515,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
                       args=[backend, na_server, vserver])
             t.start()
     elif getattr(backend, 'refresh_stale_running', None):
-            LOG.warn(_('refresh stale ssc job in progress. Returning... '))
-            return
+        LOG.warn(_('refresh stale ssc job in progress. Returning... '))
+        return
     else:
         if backend.stale_vols:
             if synchronous:
index eda599ea91a516126da7bfe086c78f5b7123f66c..7b74c3e7391d1b467ecf9805a2b7bd53d60ecb0e 100644 (file)
@@ -194,32 +194,32 @@ def invoke_api(na_server, api_name, api_family='cm', query=None,
 def create_api_request(api_name, query=None, des_result=None,
                        additional_elems=None, is_iter=False,
                        record_step=50, tag=None):
-        """Creates a NetApp api request.
-
-            :param api_name: api name string
-            :param query: api query as dict
-            :param des_result: desired result as dict
-            :param additional_elems: dict other than query and des_result
-            :param is_iter: is iterator api
-            :param record_step: records at a time for iter api
-            :param tag: next tag for iter api
-        """
-        api_el = NaElement(api_name)
-        if query:
-            query_el = NaElement('query')
-            query_el.translate_struct(query)
-            api_el.add_child_elem(query_el)
-        if des_result:
-            res_el = NaElement('desired-attributes')
-            res_el.translate_struct(des_result)
-            api_el.add_child_elem(res_el)
-        if additional_elems:
-            api_el.translate_struct(additional_elems)
-        if is_iter:
-            api_el.add_new_child('max-records', str(record_step))
-        if tag:
-            api_el.add_new_child('tag', tag, True)
-        return api_el
+    """Creates a NetApp api request.
+
+        :param api_name: api name string
+        :param query: api query as dict
+        :param des_result: desired result as dict
+        :param additional_elems: dict other than query and des_result
+        :param is_iter: is iterator api
+        :param record_step: records at a time for iter api
+        :param tag: next tag for iter api
+    """
+    api_el = NaElement(api_name)
+    if query:
+        query_el = NaElement('query')
+        query_el.translate_struct(query)
+        api_el.add_child_elem(query_el)
+    if des_result:
+        res_el = NaElement('desired-attributes')
+        res_el.translate_struct(des_result)
+        api_el.add_child_elem(res_el)
+    if additional_elems:
+        api_el.translate_struct(additional_elems)
+    if is_iter:
+        api_el.add_new_child('max-records', str(record_step))
+    if tag:
+        api_el.add_new_child('tag', tag, True)
+    return api_el
 
 
 def to_bool(val):
@@ -229,7 +229,7 @@ def to_bool(val):
         if (strg == 'true' or strg == 'y'
             or strg == 'yes' or strg == 'enabled'
                 or strg == '1'):
-                    return True
+            return True
         else:
             return False
     else:
index 85edb41bc18b992194073d5978db5a008df13d82..928e8daefdb29aa2da7f423469e93300ce94d0a4 100644 (file)
@@ -412,16 +412,16 @@ class HP3PARCommon(object):
                 if (not _convert_to_base and
                     isinstance(ex, hpexceptions.HTTPForbidden) and
                         ex.get_code() == 150):
-                        # Error code 150 means 'invalid operation: Cannot grow
-                        # this type of volume'.
-                        # Suppress raising this exception because we can
-                        # resolve it by converting it into a base volume.
-                        # Afterwards, extending the volume should succeed, or
-                        # fail with a different exception/error code.
-                        ex_ctxt.reraise = False
-                        self._extend_volume(volume, volume_name,
-                                            growth_size_mib,
-                                            _convert_to_base=True)
+                    # Error code 150 means 'invalid operation: Cannot grow
+                    # this type of volume'.
+                    # Suppress raising this exception because we can
+                    # resolve it by converting it into a base volume.
+                    # Afterwards, extending the volume should succeed, or
+                    # fail with a different exception/error code.
+                    ex_ctxt.reraise = False
+                    self._extend_volume(volume, volume_name,
+                                        growth_size_mib,
+                                        _convert_to_base=True)
                 else:
                     LOG.error(_("Error extending volume: %(vol)s. "
                                 "Exception: %(ex)s") %