]> review.fuel-infra Code Review - openstack-build/heat-build.git/commitdiff
Handle 'detaching' state of Volume
authorZhang Lei (Sneeze) <sneezezhang@cienet.com.cn>
Mon, 22 Jul 2013 02:08:06 +0000 (10:08 +0800)
committerZhang Lei (Sneeze) <sneezezhang@cienet.com.cn>
Mon, 22 Jul 2013 02:10:56 +0000 (10:10 +0800)
When detaching volume, the state of volume changed from 'in-use'
to 'detaching', and then from 'detaching' to 'available'.
The code used to ignore 'detaching' state by assuming the volume
has been detached when its state is not 'in-use' any more.
Now we take care 'detaching' state and raise error if detaching
failed.

Fixes bug #1197747

Change-Id: I555eea19409142ccb306c0cceaf7c55e71385bc6

heat/engine/resources/volume.py
heat/tests/test_volume.py

index 9d46125799fd40a4ecf2a3bde3f47dbe9ad81e9a..fc92fad11b669a4e18a2db77b17da1a45adb82b0 100644 (file)
@@ -211,7 +211,7 @@ class VolumeDetachTask(object):
 
         try:
             vol.get()
-            while vol.status == 'in-use':
+            while vol.status in ('in-use', 'detaching'):
                 logger.debug('%s - volume still in use' % str(self))
                 yield
 
@@ -223,6 +223,8 @@ class VolumeDetachTask(object):
                 vol.get()
 
             logger.info('%s - status: %s' % (str(self), vol.status))
+            if vol.status != 'available':
+                raise exception.Error(vol.status)
 
         except clients.cinderclient.exceptions.NotFound:
             logger.warning('%s - volume not found' % str(self))
index 59a50a72e1831be78ad1d57616f67f199df05437..0b5d6e98e798cbbb8e81a9673a8855e2e477dfd1 100644 (file)
@@ -103,6 +103,22 @@ class VolumeTest(HeatTestCase):
         self.assertEqual(rsrc.state, (rsrc.CREATE, rsrc.COMPLETE))
         return rsrc
 
+    def _mock_create_volume(self, fv, stack_name):
+        clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
+            self.cinder_fc)
+        vol_name = utils.PhysName(stack_name, 'DataVolume')
+        self.cinder_fc.volumes.create(
+            size=u'1', availability_zone='nova',
+            display_description=vol_name,
+            display_name=vol_name).AndReturn(fv)
+
+    def _mock_create_server_volume_script(self, fva):
+        clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
+        self.fc.volumes.create_server_volume(
+            device=u'/dev/vdc', server_id=u'WikiDatabase',
+            volume_id=u'vol-123').AndReturn(fva)
+        self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
+
     def test_volume(self):
         fv = FakeVolume('creating', 'available')
         stack_name = 'test_volume_stack'
@@ -358,6 +374,66 @@ class VolumeTest(HeatTestCase):
 
         self.m.VerifyAll()
 
+    def test_volume_detach_with_latency(self):
+        fv = FakeVolume('creating', 'available')
+        fva = FakeVolume('attaching', 'in-use')
+        stack_name = 'test_volume_attach_stack'
+
+        self._mock_create_volume(fv, stack_name)
+
+        self._mock_create_server_volume_script(fva)
+
+        # delete script
+        volume_detach_cycle = 'in-use', 'detaching', 'available'
+        fva = FakeLatencyVolume(life_cycle=volume_detach_cycle)
+        self.fc.volumes.delete_server_volume(
+            'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
+        self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
+
+        self.m.ReplayAll()
+
+        t = template_format.parse(volume_template)
+        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
+        stack = parse_stack(t, stack_name=stack_name)
+
+        scheduler.TaskRunner(stack['DataVolume'].create)()
+        self.assertEqual(fv.status, 'available')
+        rsrc = self.create_attachment(t, stack, 'MountPoint')
+
+        self.assertEqual(rsrc.delete(), None)
+
+        self.m.VerifyAll()
+
+    def test_volume_detach_with_error(self):
+        fv = FakeVolume('creating', 'available')
+        fva = FakeVolume('attaching', 'in-use')
+        stack_name = 'test_volume_attach_stack'
+
+        self._mock_create_volume(fv, stack_name)
+
+        self._mock_create_server_volume_script(fva)
+
+        # delete script
+        fva = FakeVolume('in-use', 'error')
+        self.fc.volumes.delete_server_volume('WikiDatabase',
+                                             'vol-123').AndReturn(None)
+        self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
+
+        self.m.ReplayAll()
+
+        t = template_format.parse(volume_template)
+        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
+        stack = parse_stack(t, stack_name=stack_name)
+
+        scheduler.TaskRunner(stack['DataVolume'].create)()
+        self.assertEqual(fv.status, 'available')
+        rsrc = self.create_attachment(t, stack, 'MountPoint')
+        detach_task = scheduler.TaskRunner(rsrc.delete)
+
+        self.assertRaises(exception.ResourceFailure, detach_task)
+
+        self.m.VerifyAll()
+
     @skipIf(volume_backups is None, 'unable to import volume_backups')
     def test_snapshot(self):
         stack_name = 'test_volume_stack'
@@ -721,6 +797,27 @@ class FakeVolume:
         pass
 
 
+class FakeLatencyVolume(object):
+    status = 'attaching'
+    id = 'vol-123'
+
+    def __init__(self, life_cycle=('creating', 'available'), **attrs):
+        if not isinstance(life_cycle, tuple):
+            raise exception.Error('life_cycle need to be a tuple.')
+        if not len(life_cycle):
+            raise exception.Error('life_cycle should not be an empty tuple.')
+        self.life_cycle = iter(life_cycle)
+        self.status = next(self.life_cycle)
+        for key, value in attrs.iteritems():
+            setattr(self, key, value)
+
+    def get(self):
+        self.status = next(self.life_cycle)
+
+    def update(self, **kw):
+        pass
+
+
 class FakeBackup(FakeVolume):
     status = 'creating'
     id = 'backup-123'