]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Adds synchronization to attach volume.
authorRohit Karajgi <rohit.karajgi@nttdata.com>
Fri, 28 Dec 2012 12:58:46 +0000 (04:58 -0800)
committerRohit Karajgi <rohit.karajgi@nttdata.com>
Wed, 2 Jan 2013 05:19:04 +0000 (21:19 -0800)
This patch adds the following changes:
1. Move db update for volume status during attach to volume.manager
2. Add lock synchronization to volume attach operation
3. Related unit tests

Fixes Bug 1087253
Change-Id: I15242766bf4cfa4da67789c485fdf6886983eb45

cinder/tests/api/contrib/test_admin_actions.py
cinder/tests/test_volume.py
cinder/volume/api.py
cinder/volume/manager.py

index c99ab65ea538bba9ceb908aa7c3241b0c15d0080..bdfeb08731b4fa1926439db1bca721cebf9e6673 100644 (file)
@@ -290,3 +290,44 @@ class AdminActionsTest(test.TestCase):
         self.assertEquals(volume['instance_uuid'], None)
         self.assertEquals(volume['mountpoint'], None)
         self.assertEquals(volume['attach_status'], 'detached')
+
+    def test_attach_in_use_volume(self):
+        """Test that attaching to an in-use volume fails."""
+        # admin context
+        ctx = context.RequestContext('admin', 'fake', True)
+        # current status is available
+        volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+                                        'provider_location': ''})
+        # start service to handle rpc messages for attach requests
+        self.start_service('volume', host='test')
+        self.volume_api.reserve_volume(ctx, volume)
+        self.volume_api.initialize_connection(ctx, volume, {})
+        mountpoint = '/dev/vbd'
+        self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint)
+        self.assertRaises(exception.InvalidVolume,
+                          self.volume_api.attach,
+                          ctx,
+                          volume,
+                          fakes.get_fake_uuid(),
+                          mountpoint)
+
+    def test_attach_attaching_volume_with_different_instance(self):
+        """Test that attaching volume reserved for another instance fails."""
+        # admin context
+        ctx = context.RequestContext('admin', 'fake', True)
+        # current status is available
+        volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+                                        'provider_location': ''})
+        # start service to handle rpc messages for attach requests
+        self.start_service('volume', host='test')
+        self.volume_api.initialize_connection(ctx, volume, {})
+        values = {'status': 'attaching',
+                  'instance_uuid': fakes.get_fake_uuid()}
+        db.volume_update(ctx, volume['id'], values)
+        mountpoint = '/dev/vbd'
+        self.assertRaises(exception.InvalidVolume,
+                          self.volume_api.attach,
+                          ctx,
+                          volume,
+                          stubs.FAKE_UUID,
+                          mountpoint)
index 5c3cd94cced464ee6bffb87fd97a708df8161971..1b7c95bcd493dcbb41f0bf9edc8b49fda1d22e3b 100644 (file)
@@ -277,7 +277,6 @@ class VolumeTestCase(test.TestCase):
 
     def test_preattach_status_volume(self):
         """Ensure volume goes into pre-attaching state"""
-
         instance_uuid = '12345678-1234-5678-1234-567812345678'
         mountpoint = "/dev/sdf"
         volume = db.volume_create(self.context, {'size': 1,
@@ -288,9 +287,9 @@ class VolumeTestCase(test.TestCase):
         volume_api.attach(self.context, volume, instance_uuid, mountpoint)
 
         vol = db.volume_get(self.context, volume_id)
-        self.assertEqual(vol['status'], "attaching")
+        self.assertEqual(vol['status'], "available")
         self.assertEqual(vol['attach_status'], None)
-        self.assertEqual(vol['instance_uuid'], instance_uuid)
+        self.assertEqual(vol['instance_uuid'], None)
 
     def test_concurrent_volumes_get_different_targets(self):
         """Ensure multiple concurrent volumes get different targets."""
index 5c95233689d4e5aef7749f7ec17c158278a4f7e9..5a9e366aaed54551ba9d7feef174233749d3ec99 100644 (file)
@@ -383,8 +383,6 @@ class API(base.Base):
 
     @wrap_check_policy
     def attach(self, context, volume, instance_uuid, mountpoint):
-        self.update(context, volume, {"instance_uuid": instance_uuid,
-                                      "status": "attaching"})
         return self.volume_rpcapi.attach_volume(context,
                                                 volume,
                                                 instance_uuid,
index 1244f6f4be9de6086186f2791b6300c48fbac911..e24a91a3d37b2b407f65fe190d4c7c6864b45989 100644 (file)
@@ -45,6 +45,7 @@ from cinder import manager
 from cinder.openstack.common import cfg
 from cinder.openstack.common import excutils
 from cinder.openstack.common import importutils
+from cinder.openstack.common import lockutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder.openstack.common import uuidutils
@@ -337,26 +338,44 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
         """Updates db to show volume is attached"""
-        # TODO(vish): refactor this into a more general "reserve"
-        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
-        if not uuidutils.is_uuid_like(instance_uuid):
-            raise exception.InvalidUUID(instance_uuid)
-
-        try:
-            self.driver.attach_volume(context,
-                                      volume_id,
-                                      instance_uuid,
-                                      mountpoint)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                self.db.volume_update(context,
-                                      volume_id,
-                                      {'status': 'error_attaching'})
 
-        self.db.volume_attached(context.elevated(),
-                                volume_id,
-                                instance_uuid,
-                                mountpoint)
+        @lockutils.synchronized(volume_id, 'cinder-', external=True)
+        def do_attach():
+            # check the volume status before attaching
+            volume = self.db.volume_get(context, volume_id)
+            if volume['status'] == 'attaching':
+                if (volume['instance_uuid'] and volume['instance_uuid'] !=
+                        instance_uuid):
+                    msg = _("being attached by another instance")
+                    raise exception.InvalidVolume(reason=msg)
+            elif volume['status'] != "available":
+                msg = _("status must be available")
+                raise exception.InvalidVolume(reason=msg)
+            self.db.volume_update(context, volume_id,
+                                  {"instance_uuid": instance_uuid,
+                                   "status": "attaching"})
+
+            # TODO(vish): refactor this into a more general "reserve"
+            # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
+            if not uuidutils.is_uuid_like(instance_uuid):
+                raise exception.InvalidUUID(instance_uuid)
+
+            try:
+                self.driver.attach_volume(context,
+                                          volume_id,
+                                          instance_uuid,
+                                          mountpoint)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    self.db.volume_update(context,
+                                          volume_id,
+                                          {'status': 'error_attaching'})
+
+            self.db.volume_attached(context.elevated(),
+                                    volume_id,
+                                    instance_uuid,
+                                    mountpoint)
+        return do_attach()
 
     def detach_volume(self, context, volume_id):
         """Updates db to show volume is detached"""