From: Shyama Venugopal Date: Thu, 30 Apr 2015 10:22:27 +0000 (+0200) Subject: Revert state if attachment already exists X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=709949d904c7f1d0e13b41bf6723d71bcf6b5652;p=openstack-build%2Fcinder-build.git Revert state if attachment already exists When attach is called twice for the same volume and instance the attach_volume checks if the attachment already exists and returns. This leaves the volume in 'Attaching' state. The volume status should be reset to 'in-use' if we see the attachment already exists. Change-Id: I2a2d28de59af234300082ab4655b0bc10699d54c Closes-Bug: #1449980 --- diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index ffe2663f1..8debdf55a 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -2046,6 +2046,53 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, volume_id) + def test_run_attach_twice_multiattach_volume_for_instances(self): + """Make sure volume can be attached to multiple instances.""" + mountpoint = "/dev/sdf" + # attach volume to the instance then to detach + instance_uuid = '12345678-1234-5678-1234-567812345699' + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + multiattach=True, + **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + attachment = self.volume.attach_volume(self.context, volume_id, + instance_uuid, None, + mountpoint, 'ro') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual('in-use', vol['status']) + self.assertTrue(vol['multiattach']) + self.assertEqual('attached', attachment['attach_status']) + self.assertEqual(mountpoint, attachment['mountpoint']) + self.assertEqual(instance_uuid, attachment['instance_uuid']) + self.assertIsNone(attachment['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(2, len(admin_metadata)) + expected = dict(readonly='True', attached_mode='ro') + ret = {} + for item in admin_metadata: + ret.update({item['key']: item['value']}) + self.assertDictMatch(expected, ret) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual('ro', conn_info['data']['access_mode']) + + mountpoint2 = "/dev/sdx" + attachment2 = self.volume.attach_volume(self.context, volume_id, + instance_uuid, None, + mountpoint2, 'ro') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual('in-use', vol['status']) + self.assertEqual(True, vol['multiattach']) + self.assertIsNone(attachment2) + + self.assertRaises(exception.VolumeAttached, + self.volume.delete_volume, + self.context, + volume_id) + def test_attach_detach_not_multiattach_volume_for_instances(self): """Make sure volume can't be attached to more than one instance.""" mountpoint = "/dev/sdf" @@ -2210,6 +2257,51 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, volume_id) + def test_run_attach_twice_multiattach_volume_for_hosts(self): + """Make sure volume can be attached and detached from hosts.""" + mountpoint = "/dev/sdf" + volume = tests_utils.create_volume( + self.context, + admin_metadata={'readonly': 'False'}, + multiattach=True, + **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + attachment = self.volume.attach_volume(self.context, volume_id, None, + 'fake_host', mountpoint, 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual('in-use', vol['status']) + self.assertTrue(vol['multiattach']) + self.assertEqual('attached', attachment['attach_status']) + self.assertEqual(mountpoint, attachment['mountpoint']) + self.assertIsNone(attachment['instance_uuid']) + # sanitized, conforms to RFC-952 and RFC-1123 specs. + self.assertEqual('fake-host', attachment['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(2, len(admin_metadata)) + expected = dict(readonly='False', attached_mode='rw') + ret = {} + for item in admin_metadata: + ret.update({item['key']: item['value']}) + self.assertDictMatch(expected, ret) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual('rw', conn_info['data']['access_mode']) + + mountpoint2 = "/dev/sdx" + attachment2 = self.volume.attach_volume(self.context, volume_id, None, + 'fake_host', mountpoint2, + 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual('in-use', vol['status']) + self.assertIsNone(attachment2) + + self.assertRaises(exception.VolumeAttached, + self.volume.delete_volume, + self.context, + volume_id) + def test_run_attach_detach_not_multiattach_volume_for_hosts(self): """Make sure volume can't be attached to more than one host.""" mountpoint = "/dev/sdf" diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 3881c6c20..ee3be401e 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -779,6 +779,8 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_attachment_get_by_host(context, volume_id, host_name_sanitized) if attachment is not None: + self.db.volume_update(context, volume_id, + {'status': 'in-use'}) return self._notify_about_volume_usage(context, volume,