]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Improve logging for volume operations via manager
authorJohn Griffith <john.griffith@solidfire.com>
Wed, 27 Feb 2013 20:49:53 +0000 (20:49 +0000)
committerJohn Griffith <john.griffith@solidfire.com>
Wed, 27 Feb 2013 22:00:48 +0000 (22:00 +0000)
A large percentage of the logging information was
being done using debug.  This change moves some
of the logging information to be info level and
also tries to add some consistency in terms of
the general create/delete operations.

This includes moving logging for the basic operations
into the manager and omitting the repetive messages from
the drivers.

Fixes bug: 1133131

Change-Id: I792efde67f30d637431ad2fa45714e50bf9c705d

cinder/volume/drivers/lvm.py
cinder/volume/iscsi.py
cinder/volume/manager.py

index 9a8efe82d5cf3040ab1f959d5999fbb15d6db483..084789efadde5345c5c2cae909073632a6251764 100644 (file)
@@ -187,6 +187,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
         size_in_m = self.configuration.volume_clear_size
 
         if not size_in_g:
+            LOG.warning(_("Size for volume: %s not found, "
+                          "skipping secure delete.") % volume['name'])
             return
 
         if self.configuration.volume_clear == 'none':
@@ -196,7 +198,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
 
         if self.configuration.volume_clear == 'zero':
             if size_in_m == 0:
-                return self._copy_volume('/dev/zero', vol_path, size_in_g,
+                return self._copy_volume('/dev/zero',
+                                         vol_path, size_in_g,
                                          clearing=True)
             else:
                 clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % size_in_m]
@@ -225,6 +228,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
         """Deletes a snapshot."""
         if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
             # If the snapshot isn't present, then don't attempt to delete
+            LOG.warning(_("snapshot: %s not found, "
+                          "skipping delete operations") % snapshot['name'])
             return True
 
         # TODO(yamahata): zeroing out the whole snapshot triggers COW.
index 2219b99f3bb0d04b79e73bbc9635d49ba2cedbce..50c7fe1ebdb40079e1f266d0506c018ebd872128 100644 (file)
@@ -141,7 +141,7 @@ class TgtAdm(TargetAdmin):
                 </target>
             """ % (name, path, chap_auth)
 
-        LOG.info(_('Creating volume: %s') % vol_id)
+        LOG.info(_('Creating iscsi_target for: %s') % vol_id)
         volumes_dir = FLAGS.volumes_dir
         volume_path = os.path.join(volumes_dir, vol_id)
 
@@ -181,7 +181,7 @@ class TgtAdm(TargetAdmin):
         return tid
 
     def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
-        LOG.info(_('Removing volume: %s') % vol_id)
+        LOG.info(_('Removing iscsi_target for: %s') % vol_id)
         vol_uuid_file = FLAGS.volume_name_template % vol_id
         volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
         if os.path.isfile(volume_path):
@@ -195,7 +195,7 @@ class TgtAdm(TargetAdmin):
                           iqn,
                           run_as_root=True)
         except exception.ProcessExecutionError, e:
-            LOG.error(_("Failed to delete iscsi target for volume "
+            LOG.error(_("Failed to remove iscsi target for volume "
                         "id:%(vol_id)s.") % locals())
             raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
 
@@ -246,7 +246,7 @@ class IetAdm(TargetAdmin):
         return tid
 
     def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
-        LOG.info(_('Removing volume: %s') % vol_id)
+        LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
         self._delete_logicalunit(tid, lun, **kwargs)
         self._delete_target(tid, **kwargs)
         vol_uuid_file = FLAGS.volume_name_template % vol_id
@@ -352,7 +352,7 @@ class LioAdm(TargetAdmin):
 
         vol_id = name.split(':')[1]
 
-        LOG.info(_('Creating volume: %s') % vol_id)
+        LOG.info(_('Creating iscsi_target for volume: %s') % vol_id)
 
         # cinder-rtstool requires chap_auth, but unit tests don't provide it
         chap_auth_userid = 'test_id'
@@ -392,7 +392,7 @@ class LioAdm(TargetAdmin):
         return tid
 
     def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
-        LOG.info(_('Removing volume: %s') % vol_id)
+        LOG.info(_('Removing iscsi_target: %s') % vol_id)
         vol_uuid_name = 'volume-%s' % vol_id
         iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name)
 
index a810a9331534b70efe3a6fa2bcb998022b8f5267..3efc9a60cccdf88a2fde6749ff483e4d49dd3a6e 100644 (file)
@@ -125,8 +125,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                        "configuration to the new path."), volume_driver)
             volume_driver = MAPPING[volume_driver]
         self.driver = importutils.import_object(
-                                        volume_driver,
-                                        configuration=self.configuration)
+            volume_driver,
+            configuration=self.configuration)
         # update_service_capabilities needs service_name to be volume
         super(VolumeManager, self).__init__(service_name='volume',
                                             *args, **kwargs)
@@ -200,7 +200,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             filter_properties = {}
         volume_ref = self.db.volume_get(context, volume_id)
         self._notify_about_volume_usage(context, volume_ref, "create.start")
-        LOG.info(_("volume %s: creating"), volume_ref['name'])
 
         # NOTE(vish): so we don't have to get volume from db again
         #             before passing it to the driver.
@@ -223,16 +222,24 @@ class VolumeManager(manager.SchedulerDependentManager):
             image_meta = None
 
             if snapshot_id is not None:
+                LOG.info(_("volume %s: creating from snapshot"),
+                         volume_ref['name'])
                 snapshot_ref = self.db.snapshot_get(context, snapshot_id)
             elif source_volid is not None:
+                LOG.info(_("volume %s: creating from existing volume"),
+                         volume_ref['name'])
                 sourcevol_ref = self.db.volume_get(context, source_volid)
             elif image_id is not None:
+                LOG.info(_("volume %s: creating from image"),
+                         volume_ref['name'])
                 # create the volume from an image
                 image_service, image_id = \
                     glance.get_remote_image_service(context,
                                                     image_id)
                 image_location = image_service.get_location(context, image_id)
                 image_meta = image_service.show(context, image_id)
+            else:
+                LOG.info(_("volume %s: creating"), volume_ref['name'])
 
             try:
                 model_update, cloned = self._create_volume(context,
@@ -272,6 +279,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             with excutils.save_and_reraise_exception():
                 self.db.volume_update(context,
                                       volume_ref['id'], {'status': 'error'})
+                LOG.error(_("volume %s: create failed"), volume_ref['name'])
 
         if snapshot_id:
             # Copy any Glance metadata from the original volume
@@ -301,7 +309,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.db.volume_update(context,
                               volume_ref['id'], {'status': status,
                                                  'launched_at': now})
-        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+        LOG.info(_("volume %s: created successfully"), volume_ref['name'])
         self._reset_stats()
 
         self._notify_about_volume_usage(context, volume_ref, "create.end")
@@ -384,12 +392,13 @@ class VolumeManager(manager.SchedulerDependentManager):
         """Deletes and unexports volume."""
         context = context.elevated()
         volume_ref = self.db.volume_get(context, volume_id)
+        LOG.info(_("volume %s: deleting"), volume_ref['name'])
         if volume_ref['attach_status'] == "attached":
             # Volume is still attached, need to detach first
             raise exception.VolumeAttached(volume_id=volume_id)
         if volume_ref['host'] != self.host:
             raise exception.InvalidVolume(
-                reason=_("Volume is not local to this node"))
+                reason=_("volume is not local to this node"))
 
         self._notify_about_volume_usage(context, volume_ref, "delete.start")
         self._reset_stats()
@@ -420,7 +429,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
         self.db.volume_destroy(context, volume_id)
-        LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
+        LOG.info(_("volume %s: deleted successfully"), volume_ref['name'])
         self._notify_about_volume_usage(context, volume_ref, "delete.end")
 
         # Commit the reservations
@@ -455,13 +464,14 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.db.volume_glance_metadata_copy_to_snapshot(context,
                                                         snapshot_ref['id'],
                                                         volume_id)
-        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
+        LOG.info(_("snapshot %s: created successfully"), snapshot_ref['name'])
         return snapshot_id
 
     def delete_snapshot(self, context, snapshot_id):
         """Deletes and unexports snapshot."""
         context = context.elevated()
         snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+        LOG.info(_("snapshot %s: deleting"), snapshot_ref['name'])
 
         try:
             LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
@@ -480,7 +490,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
         self.db.snapshot_destroy(context, snapshot_id)
-        LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
+        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
         return True
 
     def attach_volume(self, context, volume_id, instance_uuid, mountpoint):