From 474f70e9edaccc87ecefafd5618d542497055212 Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Thu, 7 May 2015 13:53:25 +0800 Subject: [PATCH] Implement the update_migrated_volume for the drivers This patch implements update_migrated_volume for LVM, Storwize and updates Dell, StorPool and Infortrend drivers accordingly. It makes sure that after a successful volume migration, the cinder volume name(id) is the same as the backend volume name(id). Other back-end drivers can take this patch as a reference to implement update_migrated_volume. PS: Not applicable to multi-attached volumes, since we need to wait until the multi-attach lands in Nova. This patch also adds a unit test for the StorPool driver's update_migrated_volume() implementation. Co-Authored-By: Peter Penchev Change-Id: I69707340ddf2b55286ff0d84319529b2f502cefa Partial-Bug: #1450649 --- cinder/db/sqlalchemy/api.py | 7 +- cinder/tests/unit/db/test_finish_migration.py | 7 +- cinder/tests/unit/test_dellsc.py | 24 +++--- cinder/tests/unit/test_infortrend_cli.py | 1 + cinder/tests/unit/test_infortrend_common.py | 16 ++-- cinder/tests/unit/test_storpool.py | 59 +++++++++++++- cinder/tests/unit/test_storwize_svc.py | 29 +++++++ cinder/tests/unit/test_volume.py | 77 +++++++++++++++++++ cinder/volume/driver.py | 13 +++- .../drivers/dell/dell_storagecenter_common.py | 6 +- .../drivers/ibm/storwize_svc/__init__.py | 31 ++++++++ .../infortrend/eonstor_ds_cli/common_cli.py | 13 +++- .../drivers/infortrend/infortrend_fc_cli.py | 7 +- .../infortrend/infortrend_iscsi_cli.py | 7 +- cinder/volume/drivers/lvm.py | 35 +++++++++ cinder/volume/drivers/storpool.py | 24 ++++-- cinder/volume/manager.py | 31 +++++--- cinder/volume/rpcapi.py | 6 +- 18 files changed, 334 insertions(+), 59 deletions(-) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 7dc908013..6fb4ea416 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1151,12 +1151,13 @@ def finish_volume_migration(context, src_vol_id, dest_vol_id): return attr in inst.__class__.__table__.columns for key, value in dest_volume_ref.iteritems(): - if key == 'id' or not is_column(dest_volume_ref, key): + # The implementation of update_migrated_volume will decide the + # values for _name_id and provider_location. + if (key in ('id', '_name_id', 'provider_location') + or not is_column(dest_volume_ref, key)): continue elif key == 'migration_status': value = None - elif key == '_name_id': - value = dest_volume_ref['_name_id'] or dest_volume_ref['id'] setattr(src_volume_ref, key, value) diff --git a/cinder/tests/unit/db/test_finish_migration.py b/cinder/tests/unit/db/test_finish_migration.py index 38b6e9b42..3993c226a 100644 --- a/cinder/tests/unit/db/test_finish_migration.py +++ b/cinder/tests/unit/db/test_finish_migration.py @@ -38,9 +38,6 @@ class FinishVolumeMigrationTestCase(test.TestCase): dest_volume['id']) src_volume = db.volume_get(ctxt, src_volume['id']) - expected_name = 'volume-%s' % dest_volume['id'] - self.assertEqual(src_volume['_name_id'], dest_volume['id']) - self.assertEqual(src_volume['name'], expected_name) - self.assertEqual(src_volume['host'], 'dest') - self.assertEqual(src_volume['status'], 'available') + self.assertEqual('dest', src_volume['host']) + self.assertEqual('available', src_volume['status']) self.assertIsNone(src_volume['migration_status']) diff --git a/cinder/tests/unit/test_dellsc.py b/cinder/tests/unit/test_dellsc.py index e8bb59a06..95bc8c674 100644 --- a/cinder/tests/unit/test_dellsc.py +++ b/cinder/tests/unit/test_dellsc.py @@ -1058,7 +1058,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): volume = {'id': 111} backend_volume = {'id': 112} model_update = {'_name_id': None} - rt = self.driver.update_migrated_volume(None, volume, backend_volume) + rt = self.driver.update_migrated_volume(None, volume, backend_volume, + 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) self.assertEqual(model_update, rt) @@ -1080,20 +1081,22 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): mock_open_connection, mock_init): volume = {'id': 111} - backend_volume = {'id': 112} - rt = self.driver.update_migrated_volume(None, volume, backend_volume) + backend_volume = {'id': 112, '_name_id': 113} + rt = self.driver.update_migrated_volume(None, volume, backend_volume, + 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) - self.assertEqual(None, rt) + self.assertEqual({'_name_id': 113}, rt) def test_update_migrated_volume_no_volume_id(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': None} - backend_volume = {'id': 112} - rt = self.driver.update_migrated_volume(None, volume, backend_volume) - self.assertEqual(None, rt) + backend_volume = {'id': 112, '_name_id': 113} + rt = self.driver.update_migrated_volume(None, volume, backend_volume, + 'available') + self.assertEqual({'_name_id': 113}, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', @@ -1108,8 +1111,9 @@ class DellSCSanISCSIDriverTestCase(test.TestCase): mock_open_connection, mock_init): volume = {'id': 111} - backend_volume = {'id': None} - rt = self.driver.update_migrated_volume(None, volume, backend_volume) + backend_volume = {'id': None, '_name_id': None} + rt = self.driver.update_migrated_volume(None, volume, backend_volume, + 'available') mock_find_sc.assert_called_once_with() mock_find_volume.assert_called_once_with(None) - self.assertEqual(None, rt) + self.assertEqual({'_name_id': None}, rt) diff --git a/cinder/tests/unit/test_infortrend_cli.py b/cinder/tests/unit/test_infortrend_cli.py index 9573c6385..9bac35985 100644 --- a/cinder/tests/unit/test_infortrend_cli.py +++ b/cinder/tests/unit/test_infortrend_cli.py @@ -90,6 +90,7 @@ class InfortrendCLITestData(object): 'provider_auth': None, 'project_id': 'project', 'display_name': None, + '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'display_description': 'Part-1-Copy', 'volume_type_id': None, 'provider_location': '', diff --git a/cinder/tests/unit/test_infortrend_common.py b/cinder/tests/unit/test_infortrend_common.py index 65600cb23..aefc139e9 100644 --- a/cinder/tests/unit/test_infortrend_common.py +++ b/cinder/tests/unit/test_infortrend_common.py @@ -1983,6 +1983,7 @@ class InfortrendiSCSICommonTestCase(InfortrendTestCass): dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) test_model_update = { + '_name_id': None, 'provider_location': dst_volume['provider_location'], } @@ -1992,19 +1993,20 @@ class InfortrendiSCSICommonTestCase(InfortrendTestCass): self._driver_setup(mock_commands) model_update = self.driver.update_migrated_volume( - None, src_volume, dst_volume) + None, src_volume, dst_volume, 'available') expect_cli_cmd = [ mock.call('SetPartition', test_dst_part_id, 'name=%s' % src_volume['id'].replace('-', '')), ] self._assert_cli_has_calls(expect_cli_cmd) - self.assertDictMatch(model_update, test_model_update) + self.assertDictMatch(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) def test_update_migrated_volume_rename_fail(self): src_volume = self.cli_data.test_volume dst_volume = self.cli_data.test_dst_volume + dst_volume['_name_id'] = 'fake_name_id' test_dst_part_id = self.cli_data.fake_partition_id[1] dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) @@ -2013,10 +2015,6 @@ class InfortrendiSCSICommonTestCase(InfortrendTestCass): 'SetPartition': FAKE_ERROR_RETURN } self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.update_migrated_volume, - None, - src_volume, - dst_volume) + model_update = self.driver.update_migrated_volume( + None, src_volume, dst_volume, 'available') + self.assertEqual({'_name_id': 'fake_name_id'}, model_update) diff --git a/cinder/tests/unit/test_storpool.py b/cinder/tests/unit/test_storpool.py index 816b7c255..780e06271 100644 --- a/cinder/tests/unit/test_storpool.py +++ b/cinder/tests/unit/test_storpool.py @@ -77,6 +77,11 @@ class MockDisk(object): self.agAllocated = 1 +class MockVolume(object): + def __init__(self, v): + self.name = v['name'] + + class MockTemplate(object): def __init__(self, name): self.name = name @@ -114,14 +119,22 @@ class MockAPI(object): def volumeDelete(self, name): del volumes[name] + def volumesList(self): + return [MockVolume(v[1]) for v in volumes.items()] + def volumeTemplatesList(self): return self._templates def volumesReassign(self, json): pass - def volumeUpdate(self, name, size): - volumes[name]['size'] = size['size'] + def volumeUpdate(self, name, data): + if 'size' in data: + volumes[name]['size'] = data['size'] + + if 'rename' in data and data['rename'] != name: + volumes[data['rename']] = volumes[name] + del volumes[name] class MockAttachDB(object): @@ -303,6 +316,48 @@ class StorPoolTestCase(test.TestCase): self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) + @mock_volume_types + def test_update_migrated_volume(self): + self.assertVolumeNames([]) + self.assertDictEqual({}, volumes) + self.assertDictEqual({}, snapshots) + + # Create two volumes + self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1, + 'volume_type': None}) + self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 1, + 'volume_type': None}) + self.assertListEqual([volumeName('1'), volumeName('2')], + volumes.keys()) + self.assertVolumeNames(('1', '2',)) + + # Failure: the "migrated" volume does not even exist + res = self.driver.update_migrated_volume(None, {'id': '1'}, + {'id': '3', '_name_id': '1'}, + 'available') + self.assertDictEqual({'_name_id': '1'}, res) + + # Failure: a volume with the original volume's name already exists + res = self.driver.update_migrated_volume(None, {'id': '1'}, + {'id': '2', '_name_id': '1'}, + 'available') + self.assertDictEqual({'_name_id': '1'}, res) + + # Success: rename the migrated volume to match the original + res = self.driver.update_migrated_volume(None, {'id': '3'}, + {'id': '2', '_name_id': '3'}, + 'available') + self.assertDictEqual({'_name_id': None}, res) + self.assertListEqual([volumeName('1'), volumeName('3')], + volumes.keys()) + self.assertVolumeNames(('1', '3',)) + + for vid in ('1', '3'): + self.driver.delete_volume({'id': vid}) + self.assertVolumeNames([]) + self.assertDictEqual({}, volumes) + self.assertDictEqual({}, snapshots) + def test_clone_extend_volume(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) diff --git a/cinder/tests/unit/test_storwize_svc.py b/cinder/tests/unit/test_storwize_svc.py index 7653dc549..5ccceab97 100644 --- a/cinder/tests/unit/test_storwize_svc.py +++ b/cinder/tests/unit/test_storwize_svc.py @@ -2998,6 +2998,35 @@ class StorwizeSVCDriverTestCase(test.TestCase): self.assertEqual((7, 2, 0, 0), res['code_level'], 'Get code level error') + @mock.patch.object(helpers.StorwizeHelpers, 'rename_vdisk') + def test_storwize_update_migrated_volume(self, rename_vdisk): + ctxt = testutils.get_test_admin_context() + current_volume_id = 'fake_volume_id' + original_volume_id = 'fake_original_volume_id' + current_name = 'volume-' + current_volume_id + original_name = 'volume-' + original_volume_id + backend_volume = self._create_volume(id=current_volume_id) + volume = self._create_volume(id=original_volume_id) + model_update = self.driver.update_migrated_volume(ctxt, volume, + backend_volume, + 'available') + rename_vdisk.assert_called_once_with(current_name, original_name) + self.assertEqual({'_name_id': None}, model_update) + + rename_vdisk.reset_mock() + rename_vdisk.side_effect = exception.VolumeBackendAPIException + model_update = self.driver.update_migrated_volume(ctxt, volume, + backend_volume, + 'available') + self.assertEqual({'_name_id': current_volume_id}, model_update) + + rename_vdisk.reset_mock() + rename_vdisk.side_effect = exception.VolumeBackendAPIException + model_update = self.driver.update_migrated_volume(ctxt, volume, + backend_volume, + 'attached') + self.assertEqual({'_name_id': current_volume_id}, model_update) + def test_storwize_vdisk_copy_ops(self): ctxt = testutils.get_test_admin_context() volume = self._create_volume() diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index e61331fca..26bd8fe4a 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -3917,6 +3917,46 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_volume(self.context, volume_src['id']) + @mock.patch('cinder.db.volume_update') + def test_update_migrated_volume(self, volume_update): + fake_host = 'fake_host' + fake_new_host = 'fake_new_host' + fake_update = {'_name_id': None, 'provider_location': None} + fake_elevated = 'fake_elevated' + volume = tests_utils.create_volume(self.context, size=1, + status='available', + host=fake_host) + new_volume = tests_utils.create_volume(self.context, size=1, + status='available', + host=fake_new_host) + new_volume['_name_id'] = 'fake_name_id' + new_volume['provider_location'] = 'fake_provider_location' + fake_update_error = {'_name_id': new_volume['_name_id'], + 'provider_location': + new_volume['provider_location']} + with mock.patch.object(self.volume.driver, + 'update_migrated_volume') as \ + migrate_update,\ + mock.patch.object(self.context, 'elevated') as elevated: + migrate_update.return_value = fake_update + elevated.return_value = fake_elevated + self.volume.update_migrated_volume(self.context, volume, + new_volume, 'available') + volume_update.assert_called_once_with(fake_elevated, + volume['id'], + fake_update) + + # Test the case for update_migrated_volume not implemented + # for the driver. + migrate_update.reset_mock() + volume_update.reset_mock() + migrate_update.side_effect = NotImplementedError + self.volume.update_migrated_volume(self.context, volume, + new_volume, 'available') + volume_update.assert_called_once_with(fake_elevated, + volume['id'], + fake_update_error) + def test_list_availability_zones_enabled_service(self): services = [ {'availability_zone': 'ping', 'disabled': 0}, @@ -5868,6 +5908,43 @@ class LVMVolumeDriverTestCase(DriverTestCase): mock_volume_get.assert_called_with(self.context, vol['id']) + def test_update_migrated_volume(self): + fake_volume_id = 'vol1' + fake_new_volume_id = 'vol2' + fake_provider = 'fake_provider' + original_volume_name = CONF.volume_name_template % fake_volume_id + current_name = CONF.volume_name_template % fake_new_volume_id + fake_volume = tests_utils.create_volume(self.context) + fake_volume['id'] = fake_volume_id + fake_new_volume = tests_utils.create_volume(self.context) + fake_new_volume['id'] = fake_new_volume_id + fake_new_volume['provider_location'] = fake_provider + fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, + None, 'default') + with mock.patch.object(self.volume.driver, 'vg') as vg: + vg.return_value = fake_vg + vg.rename_volume.return_value = None + update = self.volume.driver.update_migrated_volume(self.context, + fake_volume, + fake_new_volume, + 'available') + vg.rename_volume.assert_called_once_with(current_name, + original_volume_name) + self.assertEqual({'_name_id': None, + 'provider_location': None}, update) + + vg.rename_volume.reset_mock() + vg.rename_volume.side_effect = processutils.ProcessExecutionError + update = self.volume.driver.update_migrated_volume(self.context, + fake_volume, + fake_new_volume, + 'available') + vg.rename_volume.assert_called_once_with(current_name, + original_volume_name) + self.assertEqual({'_name_id': fake_new_volume_id, + 'provider_location': fake_provider}, + update) + class ISCSITestCase(DriverTestCase): """Test Case for ISCSIDriver""" diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 61142ffa0..ba5c52a2b 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -1358,15 +1358,24 @@ class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD, """ return None - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): """Return model update for migrated volume. + Each driver implementing this method needs to be responsible for the + values of _name_id and provider_location. If None is returned or either + key is not set, it means the volume table does not need to change the + value(s) for the key(s). + The return format is {"_name_id": value, "provider_location": value}. + :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process + :param original_volume_status: The status of the original volume :return model_update to update DB with any needed changes """ - return None + msg = _("The method update_migrated_volume not implemented.") + raise NotImplementedError(msg) def migrate_volume(self, context, volume, host): return (False, None) diff --git a/cinder/volume/drivers/dell/dell_storagecenter_common.py b/cinder/volume/drivers/dell/dell_storagecenter_common.py index b259adba8..adec896c5 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_common.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_common.py @@ -343,12 +343,14 @@ class DellCommonDriver(san.SanDriver): {'total': data['total_capacity_gb'], 'free': data['free_capacity_gb']}) - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): '''Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process + :param original_volume_status: The status of the original volume :return model_update to update DB with any needed changes ''' # We use id as our volume name so we need to rename the backend @@ -369,4 +371,4 @@ class DellCommonDriver(san.SanDriver): # The world was horrible to us so we should error and leave. LOG.error(_LE('Unable to rename the logical volume for volume: %s'), original_volume_name) - return None + return {'_name_id': new_volume['_name_id'] or new_volume['id']} diff --git a/cinder/volume/drivers/ibm/storwize_svc/__init__.py b/cinder/volume/drivers/ibm/storwize_svc/__init__.py index 741fe598a..39165bbe5 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/__init__.py +++ b/cinder/volume/drivers/ibm/storwize_svc/__init__.py @@ -945,6 +945,37 @@ class StorwizeSVCDriver(san.SanDriver): 'host': host['host']}) return True, model_update + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + """Return model update from Storwize for migrated volume. + + This method should rename the back-end volume name(id) on the + destination host back to its original name(id) on the source host. + + :param ctxt: The context used to run the method update_migrated_volume + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :param original_volume_status: The status of the original volume + :return model_update to update DB with any needed changes + """ + current_name = CONF.volume_name_template % new_volume['id'] + original_volume_name = CONF.volume_name_template % volume['id'] + try: + self._helpers.rename_vdisk(current_name, original_volume_name) + except exception.VolumeBackendAPIException: + LOG.error(_LE('Unable to rename the logical volume ' + 'for volume: %s'), volume['id']) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} + # If the back-end name(id) for the volume has been renamed, + # it is OK for the volume to keep the original name(id) and there is + # no need to use the column "_name_id" to establish the mapping + # relationship between the volume id and the back-end volume + # name(id). + # Set the key "_name_id" to None for a successful rename. + model_update = {'_name_id': None} + return model_update + def manage_existing(self, volume, ref): """Manages an existing vdisk. diff --git a/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py b/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py index 59502826a..1bc27ee02 100644 --- a/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py +++ b/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py @@ -1904,7 +1904,8 @@ class InfortrendCommon(object): 'volume_id': volume['id']}) return True - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): """Return model update for migrated volume.""" src_volume_id = volume['id'].replace('-', '') @@ -1919,13 +1920,19 @@ class InfortrendCommon(object): 'Rename partition %(part_id)s ' 'into new volume %(new_volume)s.', { 'part_id': part_id, 'new_volume': dst_volume_id}) - - self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) + try: + self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) + except exception.InfortrendCliException: + LOG.exception(_LE('Failed to rename %(new_volume)s into ' + '%(volume)s.'), {'new_volume': new_volume['id'], + 'volume': volume['id']}) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), { 'new_volume': new_volume['id']}) model_update = { + '_name_id': None, 'provider_location': new_volume['provider_location'], } return model_update diff --git a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py index 42514a6db..33f4c691d 100644 --- a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py +++ b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py @@ -262,16 +262,19 @@ class InfortrendCLIFCDriver(driver.FibreChannelDriver): 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process + :param original_volume_status: The status of the original volume :return model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) - return self.common.update_migrated_volume(ctxt, volume, new_volume) + return self.common.update_migrated_volume(ctxt, volume, new_volume, + original_volume_status) diff --git a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py index 6c8622871..293c19178 100644 --- a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py +++ b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py @@ -234,16 +234,19 @@ class InfortrendCLIISCSIDriver(driver.ISCSIDriver): 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process + :param original_volume_status: The status of the original volume :return model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) - return self.common.update_migrated_volume(ctxt, volume, new_volume) + return self.common.update_migrated_volume(ctxt, volume, new_volume, + original_volume_status) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index b01c9bbb6..3c57b632b 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -306,6 +306,41 @@ class LVMVolumeDriver(driver.VolumeDriver): self.configuration.lvm_type, mirror_count) + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): + """Return model update from LVM for migrated volume. + + This method should rename the back-end volume name(id) on the + destination host back to its original name(id) on the source host. + + :param ctxt: The context used to run the method update_migrated_volume + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :param original_volume_status: The status of the original volume + :return model_update to update DB with any needed changes + """ + name_id = None + provider_location = None + if original_volume_status == 'available': + current_name = CONF.volume_name_template % new_volume['id'] + original_volume_name = CONF.volume_name_template % volume['id'] + try: + self.vg.rename_volume(current_name, original_volume_name) + except processutils.ProcessExecutionError: + LOG.error(_LE('Unable to rename the logical volume ' + 'for volume: %s'), volume['name']) + # If the rename fails, _name_id should be set to the new + # volume id and provider_location should be set to the + # one from the new volume as well. + name_id = new_volume['_name_id'] or new_volume['id'] + provider_location = new_volume['provider_location'] + else: + # The back-end will not be renamed. + name_id = new_volume['_name_id'] or new_volume['id'] + provider_location = new_volume['provider_location'] + return {'_name_id': name_id, 'provider_location': provider_location} + def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self._create_volume(volume['name'], diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py index 78a4e351c..d69feaf88 100644 --- a/cinder/volume/drivers/storpool.py +++ b/cinder/volume/drivers/storpool.py @@ -434,7 +434,8 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD, return True - def update_migrated_volume(self, context, volume, new_volume): + def update_migrated_volume(self, context, volume, new_volume, + original_volume_status): orig_id = volume['id'] orig_name = self._attach.volumeName(orig_id) temp_id = new_volume['id'] @@ -442,14 +443,25 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD, vols = {v.name: True for v in self._attach.api().volumesList()} if temp_name not in vols: LOG.error(_LE('StorPool update_migrated_volume(): it seems ' - 'that the StorPool volume "%(tid)" was not ' + 'that the StorPool volume "%(tid)s" was not ' 'created as part of the migration from ' - '"%(oid)"'), {'tid': temp_id, 'oid': orig_id}) + '"%(oid)s"'), {'tid': temp_id, 'oid': orig_id}) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} elif orig_name in vols: LOG.error(_LE('StorPool update_migrated_volume(): both ' - 'the original volume "%(oid)" and the migrated ' - 'StorPool volume "%(tid)" seem to exist on ' + 'the original volume "%(oid)s" and the migrated ' + 'StorPool volume "%(tid)s" seem to exist on ' 'the StorPool cluster'), {'oid': orig_id, 'tid': temp_id}) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} else: - self._attach.api().volumeUpdate(temp_name, {'rename': orig_name}) + try: + self._attach.api().volumeUpdate(temp_name, + {'rename': orig_name}) + return {'_name_id': None} + except spapi.ApiError as e: + LOG.error(_LE('StorPool update_migrated_volume(): ' + 'could not rename %(tname)s to %(oname)s: ' + '%(err)s'), + {'tname': temp_name, 'oname': orig_name, 'err': e}) + return {'_name_id': new_volume['_name_id'] or new_volume['id']} diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index e77ecca0e..d3b345334 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -1374,11 +1374,11 @@ class VolumeManager(manager.SchedulerDependentManager): {'err': ex}, resource=volume) # Give driver (new_volume) a chance to update things as needed + # after a successful migration. # Note this needs to go through rpc to the host of the new volume - # the current host and driver object is for the "existing" volume - rpcapi.update_migrated_volume(ctxt, - volume, - new_volume) + # the current host and driver object is for the "existing" volume. + rpcapi.update_migrated_volume(ctxt, volume, new_volume, + orig_volume_status) self.db.finish_volume_migration(ctxt, volume_id, new_volume_id) self.db.volume_destroy(ctxt, new_volume_id) if orig_volume_status == 'in-use': @@ -2580,14 +2580,23 @@ class VolumeManager(manager.SchedulerDependentManager): return True - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + volume_status): """Finalize migration process on backend device.""" - model_update = None - model_update = self.driver.update_migrated_volume(ctxt, - volume, - new_volume) + try: + model_update = self.driver.update_migrated_volume(ctxt, + volume, + new_volume, + volume_status) + except NotImplementedError: + # If update_migrated_volume is not implemented for the driver, + # _name_id and provider_location will be set with the values + # from new_volume. + model_update = {'_name_id': new_volume['_name_id'] or + new_volume['id'], + 'provider_location': + new_volume['provider_location']} if model_update: - self.db.volume_update(ctxt.elevated(), - volume['id'], + self.db.volume_update(ctxt.elevated(), volume['id'], model_update) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 68944ca26..a8212ecea 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -255,10 +255,12 @@ class VolumeAPI(object): cctxt = self.client.prepare(server=new_host, version='1.17') cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id']) - def update_migrated_volume(self, ctxt, volume, new_volume): + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status): host = utils.extract_host(new_volume['host']) cctxt = self.client.prepare(server=host, version='1.19') cctxt.call(ctxt, 'update_migrated_volume', volume=volume, - new_volume=new_volume) + new_volume=new_volume, + volume_status=original_volume_status) -- 2.45.2