From 9d9a017913e362a30b14fc1b8d4c8642da5df09b Mon Sep 17 00:00:00 2001 From: John Griffith Date: Tue, 27 Jan 2015 22:59:28 -0600 Subject: [PATCH] Add completion logging for snapshots and volumes While trying to debug insufficient resource issues occasionally seen in the gate, it was noted that there's no logging for resource deletion of volumes and snapshots. This patch adds some simple logging in the cinder.volume.api to note when and why a delete call might fail as an info message indicating when the request is successfully issued to the scheduler/rpc layer. In addition, this patch also adds the same sort of logging to the LVM backend driver. This additional logging will help troubleshoot the current resource issues/races we're encountering in the gate. Change-Id: I1ac39a43d63650ab020b180eefb3a24a946328d2 --- cinder/tests/test_volume.py | 5 ++++- cinder/volume/api.py | 29 ++++++++++++++++++++++++++++- cinder/volume/drivers/lvm.py | 2 ++ 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index a04817ffe..93f1f6a14 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -3755,7 +3755,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase): self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot', lambda x: False) self.output = 'x' - self.volume.driver.delete_volume({'name': 'test1', 'size': 1024}) + self.volume.driver.delete_volume( + {'name': 'test1', + 'size': 1024, + 'id': '478e14bc-a6a9-11e4-89d3-123b93f75cba'}) def test_lvm_migrate_volume_no_loc_info(self): host = {'capabilities': {}} diff --git a/cinder/volume/api.py b/cinder/volume/api.py index dd7cd8ed1..0582afbea 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -33,7 +33,7 @@ from cinder import context from cinder.db import base from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE +from cinder.i18n import _, _LE, _LI from cinder.image import glance from cinder import keymgr from cinder.openstack.common import log as logging @@ -282,6 +282,8 @@ class API(base.Base): return if volume['attach_status'] == "attached": # Volume is still attached, need to detach first + LOG.info(_LI('Unable to delete volume: %s, ' + 'volume is attached.'), volume['id']) raise exception.VolumeAttached(volume_id=volume_id) if not force and volume['status'] not in ["available", "error", @@ -289,19 +291,31 @@ class API(base.Base): "error_extending"]: msg = _("Volume status must be available or error, " "but current status is: %s") % volume['status'] + LOG.info(_LI('Unable to delete volume: %(vol_id)s, ' + 'volume must be available or ' + 'error, but is %(vol_status)s.'), + {'vol_id': volume['id'], + 'vol_status': volume['status']}) raise exception.InvalidVolume(reason=msg) if volume['migration_status'] is not None: # Volume is migrating, wait until done + LOG.info(_LI('Unable to delete volume: %s, ' + 'volume is currently migrating.'), volume['id']) msg = _("Volume cannot be deleted while migrating") raise exception.InvalidVolume(reason=msg) if volume['consistencygroup_id'] is not None: msg = _("Volume cannot be deleted while in a consistency group.") + LOG.info(_LI('Unable to delete volume: %s, ' + 'volume is currently part of a ' + 'consistency group.'), volume['id']) raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): + LOG.info(_LI('Unable to delete volume: %s, ' + 'volume currently has snapshots.'), volume['id']) msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) @@ -317,6 +331,8 @@ class API(base.Base): 'terminated_at': now}) self.volume_rpcapi.delete_volume(context, volume, unmanage_only) + LOG.info(_LI('Succesfully issued request to ' + 'delete volume: %s'), volume['id']) @wrap_check_policy def update(self, context, volume, fields): @@ -763,10 +779,19 @@ class API(base.Base): @wrap_check_policy def delete_snapshot(self, context, snapshot, force=False): if not force and snapshot['status'] not in ["available", "error"]: + LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, ' + 'due to invalid status. ' + 'Status must be available or ' + 'error, not %(snap_status)s.'), + {'snap_id': snapshot['id'], + 'snap_status': snapshot['status']}) msg = _("Volume Snapshot status must be available or error") raise exception.InvalidSnapshot(reason=msg) cgsnapshot_id = snapshot.get('cgsnapshot_id', None) if cgsnapshot_id: + LOG.error(_LE('Unable to delete snapshot: %s, ' + 'because it is part of a consistency ' + 'group.'), snapshot['id']) msg = _("Snapshot %s is part of a cgsnapshot and has to be " "deleted together with the cgsnapshot.") % snapshot['id'] LOG.error(msg) @@ -775,6 +800,8 @@ class API(base.Base): {'status': 'deleting'}) volume = self.db.volume_get(context, snapshot['volume_id']) self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host']) + LOG.info(_LI('Succesfully issued request to ' + 'delete snapshot: %s'), snapshot['id']) @wrap_check_policy def update_snapshot(self, context, snapshot, fields): diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index 5c10a866d..47259309f 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -307,6 +307,7 @@ class LVMVolumeDriver(driver.VolumeDriver): raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume) + LOG.info(_LI('Succesfully deleted volume: %s'), volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -321,6 +322,7 @@ class LVMVolumeDriver(driver.VolumeDriver): # If the snapshot isn't present, then don't attempt to delete LOG.warning(_LW("snapshot: %s not found, " "skipping delete operations") % snapshot['name']) + LOG.info(_LI('Succesfully deleted snapshot: %s'), snapshot['id']) return True # TODO(yamahata): zeroing out the whole snapshot triggers COW. -- 2.45.2