]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix incorrect reraising of exceptions
authorSean McGinnis <sean_mcginnis@dell.com>
Sat, 25 Apr 2015 00:15:37 +0000 (19:15 -0500)
committerSean McGinnis <sean_mcginnis@dell.com>
Wed, 13 May 2015 15:51:05 +0000 (10:51 -0500)
There are several places in the code where exception handling raises
the exception variable rather than just calling raise. This results
in the traceback being incorrect.

Had considered adding a hacking check for this, but that becomes a
little tricky. There are valid places where "raise ex" is used that
would prevent a simple check.

Change-Id: Ib2bd745c7ef600c514a94c3fd638d15d17a623a2

cinder/volume/drivers/emc/emc_vnx_cli.py
cinder/volume/drivers/netapp/dataontap/client/client_7mode.py
cinder/volume/drivers/netapp/dataontap/nfs_base.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/quobyte.py
cinder/volume/drivers/rbd.py
cinder/volume/drivers/remotefs.py
cinder/volume/drivers/zfssa/restclient.py
cinder/volume/drivers/zfssa/zfssanfs.py

index c38a1b508d5933d0d9fb08db619d3fd9b916c596..0b996d5dff791a9d8534866c0736ea3cf114e526 100644 (file)
@@ -452,7 +452,7 @@ class CommandLineHelper(object):
                         self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
                     return False
                 else:
-                    raise ex
+                    raise
             return _lun_state_validation(data)
 
         self._wait_for_a_condition(lun_is_ready,
@@ -2428,7 +2428,7 @@ class EMCVnxCliBase(object):
                 self._client.connect_host_to_storage_group(
                     hostname, storage_group)
             else:
-                raise ex
+                raise
         return hostname
 
     def get_lun_owner(self, volume):
@@ -2629,7 +2629,7 @@ class EMCVnxCliBase(object):
                                                     poll=False)
         except exception.EMCVnxCLICmdError as ex:
             if ex.kwargs["rc"] != 83:
-                raise ex
+                raise
             # Storage Group has not existed yet
             self.assure_storage_group(hostname)
             if self.itor_auto_reg:
index 4e3f4004897fd89b5696583479f0cf8f03f848bf..3fa659b29990b0e2106fe147e26f1c53386e4750 100644 (file)
@@ -332,7 +332,7 @@ class Client(client_base.Client):
             except netapp_api.NaApiError as e:
                 if e.code != 'UnknownCloneId':
                     self._clear_clone(clone_id)
-                raise e
+                raise
 
     def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
         """Waits till a clone operation is complete or errored out."""
index e402aac24b392c52c633cc2831858996b5a27e48..acba41ef93c5302289e30a58212370736851b88d 100644 (file)
@@ -206,13 +206,13 @@ class NetAppNfsDriver(nfs.NfsDriver):
             if vol_size != src_vol_size:
                 try:
                     self.extend_volume(volume, vol_size)
-                except Exception as e:
+                except Exception:
                     LOG.error(
                         _LE("Resizing %s failed. Cleaning volume."),
                         volume.name)
                     self._execute('rm', path,
                                   run_as_root=self._execute_as_root)
-                    raise e
+                    raise
         else:
             raise exception.CinderException(
                 _("NFS file %s not discovered.") % volume['name'])
index 0f9d4bd0461da69bea9ffa16ec4a414003b51f6c..dbe77e2b246741b8a3c1ee6d9b97fae52e311359 100644 (file)
@@ -154,7 +154,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
                 msg = _('%s is not installed') % package
                 raise exception.NfsException(msg)
             else:
-                raise exc
+                raise
 
         # Now that all configuration data has been loaded (shares),
         # we can "set" our final NAS file security options.
index 6ee1b87117b7f1404b2eeaca4eb3691d31fa330e..49453568cc603136b27eab0aaaa86cde08e1ac48 100644 (file)
@@ -117,7 +117,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
                 raise exception.VolumeDriverException(
                     'mount.quobyte is not installed')
             else:
-                raise exc
+                raise
 
     def set_nas_security_options(self, is_new_cinder_install):
         self.configuration.nas_secure_file_operations = 'true'
index 85f43f2be9346ddb976759b37cff3cf3831cf05c..b56d6749744d1b91796b20efc51a002128ff9638 100644 (file)
@@ -494,10 +494,10 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
                 LOG.debug("creating snapshot='%s'", clone_snap)
                 src_volume.create_snap(clone_snap)
                 src_volume.protect_snap(clone_snap)
-            except Exception as exc:
+            except Exception:
                 # Only close if exception since we still need it.
                 src_volume.close()
-                raise exc
+                raise
 
             # Now clone source volume snapshot
             try:
@@ -508,10 +508,10 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
                 self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
                                       client.ioctx, dest_name,
                                       features=client.features)
-            except Exception as exc:
+            except Exception:
                 src_volume.unprotect_snap(clone_snap)
                 src_volume.remove_snap(clone_snap)
-                raise exc
+                raise
             finally:
                 src_volume.close()
 
index 88d4c7beb631dd98d78d2adf3a9e555c72805cf4..109e9ca9b3a2b3be1dd96d2c30408e60e7695137 100644 (file)
@@ -1259,8 +1259,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
                 snapshot['volume_id'],
                 connection_info)
             LOG.debug('nova call result: %s', result)
-        except Exception as e:
-            LOG.error(_LE('Call to Nova to create snapshot failed %s'), e)
+        except Exception:
+            LOG.exception(_LE('Call to Nova to create snapshot failed'))
             raise
 
         # Loop and wait for result
@@ -1344,8 +1344,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
                 context,
                 snapshot['id'],
                 delete_info)
-        except Exception as e:
-            LOG.error(_LE('Call to Nova delete snapshot failed %s'), e)
+        except Exception:
+            LOG.exception(_LE('Call to Nova delete snapshot failed'))
             raise
 
         # Loop and wait for result
index 64878ddcb0ebc8c09a414326e258a1cf370001a5..f8cb6ecd764e12c1dd8df7cf477b2d9a39380b98 100644 (file)
@@ -185,9 +185,9 @@ class RestClientURL(object):
                                       message="REST Not Available: \
                                       Please Upgrade")
 
-        except RestClientError as err:
+        except RestClientError:
             del self.headers['authorization']
-            raise err
+            raise
 
     def login(self, auth_str):
         """Login to an appliance using a user name and password.
index a2d23fd16c6f85f7fe851aee4218447f2860106a..9571735dcd04fe4703545f3da09b29aad08c470f 100644 (file)
@@ -97,7 +97,7 @@ class ZFSSANFSDriver(nfs.NfsDriver):
                 msg = _('%s is not installed') % package
                 raise exception.NfsException(msg)
             else:
-                raise exc
+                raise
 
         lcfg = self.configuration
         LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)