From 2331d3336a6adf4fc13a3b187e91a5d1b1f7c723 Mon Sep 17 00:00:00 2001 From: Rohit Karajgi Date: Fri, 28 Dec 2012 04:58:46 -0800 Subject: [PATCH] Adds synchronization to attach volume. This patch adds the following changes: 1. Move db update for volume status during attach to volume.manager 2. Add lock synchronization to volume attach operation 3. Related unit tests Fixes Bug 1087253 Change-Id: I15242766bf4cfa4da67789c485fdf6886983eb45 --- .../tests/api/contrib/test_admin_actions.py | 41 +++++++++++++ cinder/tests/test_volume.py | 5 +- cinder/volume/api.py | 2 - cinder/volume/manager.py | 57 ++++++++++++------- 4 files changed, 81 insertions(+), 24 deletions(-) diff --git a/cinder/tests/api/contrib/test_admin_actions.py b/cinder/tests/api/contrib/test_admin_actions.py index c99ab65ea..bdfeb0873 100644 --- a/cinder/tests/api/contrib/test_admin_actions.py +++ b/cinder/tests/api/contrib/test_admin_actions.py @@ -290,3 +290,44 @@ class AdminActionsTest(test.TestCase): self.assertEquals(volume['instance_uuid'], None) self.assertEquals(volume['mountpoint'], None) self.assertEquals(volume['attach_status'], 'detached') + + def test_attach_in_use_volume(self): + """Test that attaching to an in-use volume fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + self.volume_api.initialize_connection(ctx, volume, {}) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + fakes.get_fake_uuid(), + mountpoint) + + def test_attach_attaching_volume_with_different_instance(self): + """Test that attaching volume reserved for another instance fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + self.start_service('volume', host='test') + self.volume_api.initialize_connection(ctx, volume, {}) + values = {'status': 'attaching', + 'instance_uuid': fakes.get_fake_uuid()} + db.volume_update(ctx, volume['id'], values) + mountpoint = '/dev/vbd' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + stubs.FAKE_UUID, + mountpoint) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index 5c3cd94cc..1b7c95bcd 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -277,7 +277,6 @@ class VolumeTestCase(test.TestCase): def test_preattach_status_volume(self): """Ensure volume goes into pre-attaching state""" - instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = db.volume_create(self.context, {'size': 1, @@ -288,9 +287,9 @@ class VolumeTestCase(test.TestCase): volume_api.attach(self.context, volume, instance_uuid, mountpoint) vol = db.volume_get(self.context, volume_id) - self.assertEqual(vol['status'], "attaching") + self.assertEqual(vol['status'], "available") self.assertEqual(vol['attach_status'], None) - self.assertEqual(vol['instance_uuid'], instance_uuid) + self.assertEqual(vol['instance_uuid'], None) def test_concurrent_volumes_get_different_targets(self): """Ensure multiple concurrent volumes get different targets.""" diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 5c9523368..5a9e366aa 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -383,8 +383,6 @@ class API(base.Base): @wrap_check_policy def attach(self, context, volume, instance_uuid, mountpoint): - self.update(context, volume, {"instance_uuid": instance_uuid, - "status": "attaching"}) return self.volume_rpcapi.attach_volume(context, volume, instance_uuid, diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 1244f6f4b..e24a91a3d 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -45,6 +45,7 @@ from cinder import manager from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import importutils +from cinder.openstack.common import lockutils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder.openstack.common import uuidutils @@ -337,26 +338,44 @@ class VolumeManager(manager.SchedulerDependentManager): def attach_volume(self, context, volume_id, instance_uuid, mountpoint): """Updates db to show volume is attached""" - # TODO(vish): refactor this into a more general "reserve" - # TODO(sleepsonthefloor): Is this 'elevated' appropriate? - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(instance_uuid) - - try: - self.driver.attach_volume(context, - volume_id, - instance_uuid, - mountpoint) - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_id, - {'status': 'error_attaching'}) - self.db.volume_attached(context.elevated(), - volume_id, - instance_uuid, - mountpoint) + @lockutils.synchronized(volume_id, 'cinder-', external=True) + def do_attach(): + # check the volume status before attaching + volume = self.db.volume_get(context, volume_id) + if volume['status'] == 'attaching': + if (volume['instance_uuid'] and volume['instance_uuid'] != + instance_uuid): + msg = _("being attached by another instance") + raise exception.InvalidVolume(reason=msg) + elif volume['status'] != "available": + msg = _("status must be available") + raise exception.InvalidVolume(reason=msg) + self.db.volume_update(context, volume_id, + {"instance_uuid": instance_uuid, + "status": "attaching"}) + + # TODO(vish): refactor this into a more general "reserve" + # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + if not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(instance_uuid) + + try: + self.driver.attach_volume(context, + volume_id, + instance_uuid, + mountpoint) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_id, + {'status': 'error_attaching'}) + + self.db.volume_attached(context.elevated(), + volume_id, + instance_uuid, + mountpoint) + return do_attach() def detach_volume(self, context, volume_id): """Updates db to show volume is detached""" -- 2.45.2