From: Zhang Lei (Sneeze) Date: Mon, 22 Jul 2013 02:08:06 +0000 (+0800) Subject: Handle 'detaching' state of Volume X-Git-Tag: 2014.1~341^2 X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=cd7acb5027e6e0c2e2bac80669f468ef95a1187b;p=openstack-build%2Fheat-build.git Handle 'detaching' state of Volume When detaching volume, the state of volume changed from 'in-use' to 'detaching', and then from 'detaching' to 'available'. The code used to ignore 'detaching' state by assuming the volume has been detached when its state is not 'in-use' any more. Now we take care 'detaching' state and raise error if detaching failed. Fixes bug #1197747 Change-Id: I555eea19409142ccb306c0cceaf7c55e71385bc6 --- diff --git a/heat/engine/resources/volume.py b/heat/engine/resources/volume.py index 9d461257..fc92fad1 100644 --- a/heat/engine/resources/volume.py +++ b/heat/engine/resources/volume.py @@ -211,7 +211,7 @@ class VolumeDetachTask(object): try: vol.get() - while vol.status == 'in-use': + while vol.status in ('in-use', 'detaching'): logger.debug('%s - volume still in use' % str(self)) yield @@ -223,6 +223,8 @@ class VolumeDetachTask(object): vol.get() logger.info('%s - status: %s' % (str(self), vol.status)) + if vol.status != 'available': + raise exception.Error(vol.status) except clients.cinderclient.exceptions.NotFound: logger.warning('%s - volume not found' % str(self)) diff --git a/heat/tests/test_volume.py b/heat/tests/test_volume.py index 59a50a72..0b5d6e98 100644 --- a/heat/tests/test_volume.py +++ b/heat/tests/test_volume.py @@ -103,6 +103,22 @@ class VolumeTest(HeatTestCase): self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE)) return rsrc + def _mock_create_volume(self, fv, stack_name): + clients.OpenStackClients.cinder().MultipleTimes().AndReturn( + self.cinder_fc) + vol_name = utils.PhysName(stack_name, 'DataVolume') + self.cinder_fc.volumes.create( + size=u'1', availability_zone='nova', + display_description=vol_name, + display_name=vol_name).AndReturn(fv) + + def _mock_create_server_volume_script(self, fva): + clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc) + self.fc.volumes.create_server_volume( + device=u'/dev/vdc', server_id=u'WikiDatabase', + volume_id=u'vol-123').AndReturn(fva) + self.cinder_fc.volumes.get('vol-123').AndReturn(fva) + def test_volume(self): fv = FakeVolume('creating', 'available') stack_name = 'test_volume_stack' @@ -358,6 +374,66 @@ class VolumeTest(HeatTestCase): self.m.VerifyAll() + def test_volume_detach_with_latency(self): + fv = FakeVolume('creating', 'available') + fva = FakeVolume('attaching', 'in-use') + stack_name = 'test_volume_attach_stack' + + self._mock_create_volume(fv, stack_name) + + self._mock_create_server_volume_script(fva) + + # delete script + volume_detach_cycle = 'in-use', 'detaching', 'available' + fva = FakeLatencyVolume(life_cycle=volume_detach_cycle) + self.fc.volumes.delete_server_volume( + 'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None) + self.cinder_fc.volumes.get('vol-123').AndReturn(fva) + + self.m.ReplayAll() + + t = template_format.parse(volume_template) + t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova' + stack = parse_stack(t, stack_name=stack_name) + + scheduler.TaskRunner(stack['DataVolume'].create)() + self.assertEqual(fv.status, 'available') + rsrc = self.create_attachment(t, stack, 'MountPoint') + + self.assertEqual(rsrc.delete(), None) + + self.m.VerifyAll() + + def test_volume_detach_with_error(self): + fv = FakeVolume('creating', 'available') + fva = FakeVolume('attaching', 'in-use') + stack_name = 'test_volume_attach_stack' + + self._mock_create_volume(fv, stack_name) + + self._mock_create_server_volume_script(fva) + + # delete script + fva = FakeVolume('in-use', 'error') + self.fc.volumes.delete_server_volume('WikiDatabase', + 'vol-123').AndReturn(None) + self.cinder_fc.volumes.get('vol-123').AndReturn(fva) + + self.m.ReplayAll() + + t = template_format.parse(volume_template) + t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova' + stack = parse_stack(t, stack_name=stack_name) + + scheduler.TaskRunner(stack['DataVolume'].create)() + self.assertEqual(fv.status, 'available') + rsrc = self.create_attachment(t, stack, 'MountPoint') + detach_task = scheduler.TaskRunner(rsrc.delete) + + self.assertRaises(exception.ResourceFailure, detach_task) + + self.m.VerifyAll() + @skipIf(volume_backups is None, 'unable to import volume_backups') def test_snapshot(self): stack_name = 'test_volume_stack' @@ -721,6 +797,27 @@ class FakeVolume: pass +class FakeLatencyVolume(object): + status = 'attaching' + id = 'vol-123' + + def __init__(self, life_cycle=('creating', 'available'), **attrs): + if not isinstance(life_cycle, tuple): + raise exception.Error('life_cycle need to be a tuple.') + if not len(life_cycle): + raise exception.Error('life_cycle should not be an empty tuple.') + self.life_cycle = iter(life_cycle) + self.status = next(self.life_cycle) + for key, value in attrs.iteritems(): + setattr(self, key, value) + + def get(self): + self.status = next(self.life_cycle) + + def update(self, **kw): + pass + + class FakeBackup(FakeVolume): status = 'creating' id = 'backup-123'