]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Only use LOG.exception in exception handler
authorSean McGinnis <sean_mcginnis@dell.com>
Mon, 12 Oct 2015 21:07:55 +0000 (16:07 -0500)
committerSean McGinnis <sean_mcginnis@dell.com>
Wed, 14 Oct 2015 20:33:23 +0000 (15:33 -0500)
There were a few instances of the code calling LOG.exception
in places where there was not an exception in context. This
can cause errors and should just be LOG.error as it effectively
ends up being.

Some instances in vhdutils.py are being left as they are being
addressed in Ib3113f2c4752d37e890f97d259da5d51cbfcfb96 under a
different bug report.

Change-Id: I1898c4d63c16c4d41b3e255a7de21429d1b04ddb
Closes-bug: #1504735

cinder/volume/driver.py
cinder/volume/drivers/lvm.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py

index 2eba2cca3014924a41c3630b57e51d4e5126a898..f409c5b45ac4a5105c634bf72c153d287df47496 100644 (file)
@@ -427,8 +427,8 @@ class BaseVD(object):
         # flag in the interface is for anticipation that it will be enabled
         # in the future.
         if remote:
-            LOG.exception(_LE("Detaching snapshot from a remote node "
-                              "is not supported."))
+            LOG.error(_LE("Detaching snapshot from a remote node "
+                          "is not supported."))
             raise exception.NotSupportedOperation(
                 operation=_("detach snapshot from remote node"))
         else:
@@ -995,8 +995,8 @@ class BaseVD(object):
         # flag in the interface is for anticipation that it will be enabled
         # in the future.
         if remote:
-            LOG.exception(_LE("Attaching snapshot from a remote node "
-                              "is not supported."))
+            LOG.error(_LE("Attaching snapshot from a remote node "
+                          "is not supported."))
             raise exception.NotSupportedOperation(
                 operation=_("attach snapshot from remote node"))
         else:
index 363ec3c3b9ba3ca9ef51184103276f75552035e2..2c163788f64de919fba25b61aef2dc1ff79b88e6 100644 (file)
@@ -697,7 +697,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
                          "check your configuration because source and "
                          "destination are the same Volume Group: %(name)s.") %
                        {'id': volume['id'], 'name': self.vg.vg_name})
-            LOG.exception(message)
+            LOG.error(message)
             raise exception.VolumeBackendAPIException(data=message)
 
     def get_pool(self, volume):
index 4fdc354285d5c7ea8fd4e7dc92b6d18bb3f3b676..e7e69ef29d4266c7111a60c649533e20fcd8e4ce 100644 (file)
@@ -806,8 +806,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
         # we can't do anything if the driver didn't init
         if not self.driver.initialized:
             driver_name = self.driver.__class__.__name__
-            LOG.exception(_LE("Unable to create volume. "
-                              "Volume driver %s not initialized"), driver_name)
+            LOG.error(_LE("Unable to create volume. "
+                          "Volume driver %s not initialized"), driver_name)
             raise exception.DriverNotInitialized()
 
         create_type = volume_spec.pop('type', None)
index 660fef3f5f10f7b459eac80c3c6ee66793df6e19..75369c8f7136e41866f0fd4786436346a0dbb5af 100644 (file)
@@ -2626,9 +2626,9 @@ class VolumeManager(manager.SchedulerDependentManager):
             if model_update:
                 if model_update['status'] in ['error_deleting', 'error']:
                     msg = (_('Delete consistency group failed.'))
-                    LOG.exception(msg,
-                                  resource={'type': 'consistency_group',
-                                            'id': group.id})
+                    LOG.error(msg,
+                              resource={'type': 'consistency_group',
+                                        'id': group.id})
                     raise exception.VolumeDriverException(message=msg)
                 else:
                     group.update(model_update)
@@ -2779,7 +2779,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 if model_update['status'] in ['error']:
                     msg = (_('Error occurred when updating consistency group '
                              '%s.') % group.id)
-                    LOG.exception(msg)
+                    LOG.error(msg)
                     raise exception.VolumeDriverException(message=msg)
                 group.update(model_update)
                 group.save()