return attr in inst.__class__.__table__.columns
for key, value in dest_volume_ref.iteritems():
- if key == 'id' or not is_column(dest_volume_ref, key):
+ # The implementation of update_migrated_volume will decide the
+ # values for _name_id and provider_location.
+ if (key in ('id', '_name_id', 'provider_location')
+ or not is_column(dest_volume_ref, key)):
continue
elif key == 'migration_status':
value = None
- elif key == '_name_id':
- value = dest_volume_ref['_name_id'] or dest_volume_ref['id']
setattr(src_volume_ref, key, value)
dest_volume['id'])
src_volume = db.volume_get(ctxt, src_volume['id'])
- expected_name = 'volume-%s' % dest_volume['id']
- self.assertEqual(src_volume['_name_id'], dest_volume['id'])
- self.assertEqual(src_volume['name'], expected_name)
- self.assertEqual(src_volume['host'], 'dest')
- self.assertEqual(src_volume['status'], 'available')
+ self.assertEqual('dest', src_volume['host'])
+ self.assertEqual('available', src_volume['status'])
self.assertIsNone(src_volume['migration_status'])
volume = {'id': 111}
backend_volume = {'id': 112}
model_update = {'_name_id': None}
- rt = self.driver.update_migrated_volume(None, volume, backend_volume)
+ rt = self.driver.update_migrated_volume(None, volume, backend_volume,
+ 'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
self.assertEqual(model_update, rt)
mock_open_connection,
mock_init):
volume = {'id': 111}
- backend_volume = {'id': 112}
- rt = self.driver.update_migrated_volume(None, volume, backend_volume)
+ backend_volume = {'id': 112, '_name_id': 113}
+ rt = self.driver.update_migrated_volume(None, volume, backend_volume,
+ 'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
- self.assertEqual(None, rt)
+ self.assertEqual({'_name_id': 113}, rt)
def test_update_migrated_volume_no_volume_id(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': None}
- backend_volume = {'id': 112}
- rt = self.driver.update_migrated_volume(None, volume, backend_volume)
- self.assertEqual(None, rt)
+ backend_volume = {'id': 112, '_name_id': 113}
+ rt = self.driver.update_migrated_volume(None, volume, backend_volume,
+ 'available')
+ self.assertEqual({'_name_id': 113}, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
mock_open_connection,
mock_init):
volume = {'id': 111}
- backend_volume = {'id': None}
- rt = self.driver.update_migrated_volume(None, volume, backend_volume)
+ backend_volume = {'id': None, '_name_id': None}
+ rt = self.driver.update_migrated_volume(None, volume, backend_volume,
+ 'available')
mock_find_sc.assert_called_once_with()
mock_find_volume.assert_called_once_with(None)
- self.assertEqual(None, rt)
+ self.assertEqual({'_name_id': None}, rt)
'provider_auth': None,
'project_id': 'project',
'display_name': None,
+ '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
'display_description': 'Part-1-Copy',
'volume_type_id': None,
'provider_location': '',
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
test_model_update = {
+ '_name_id': None,
'provider_location': dst_volume['provider_location'],
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
- None, src_volume, dst_volume)
+ None, src_volume, dst_volume, 'available')
expect_cli_cmd = [
mock.call('SetPartition', test_dst_part_id,
'name=%s' % src_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
- self.assertDictMatch(model_update, test_model_update)
+ self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
def test_update_migrated_volume_rename_fail(self):
src_volume = self.cli_data.test_volume
dst_volume = self.cli_data.test_dst_volume
+ dst_volume['_name_id'] = 'fake_name_id'
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
'SetPartition': FAKE_ERROR_RETURN
}
self._driver_setup(mock_commands)
-
- self.assertRaises(
- exception.InfortrendCliException,
- self.driver.update_migrated_volume,
- None,
- src_volume,
- dst_volume)
+ model_update = self.driver.update_migrated_volume(
+ None, src_volume, dst_volume, 'available')
+ self.assertEqual({'_name_id': 'fake_name_id'}, model_update)
self.agAllocated = 1
+class MockVolume(object):
+ def __init__(self, v):
+ self.name = v['name']
+
+
class MockTemplate(object):
def __init__(self, name):
self.name = name
def volumeDelete(self, name):
del volumes[name]
+ def volumesList(self):
+ return [MockVolume(v[1]) for v in volumes.items()]
+
def volumeTemplatesList(self):
return self._templates
def volumesReassign(self, json):
pass
- def volumeUpdate(self, name, size):
- volumes[name]['size'] = size['size']
+ def volumeUpdate(self, name, data):
+ if 'size' in data:
+ volumes[name]['size'] = data['size']
+
+ if 'rename' in data and data['rename'] != name:
+ volumes[data['rename']] = volumes[name]
+ del volumes[name]
class MockAttachDB(object):
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
+ @mock_volume_types
+ def test_update_migrated_volume(self):
+ self.assertVolumeNames([])
+ self.assertDictEqual({}, volumes)
+ self.assertDictEqual({}, snapshots)
+
+ # Create two volumes
+ self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
+ 'volume_type': None})
+ self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 1,
+ 'volume_type': None})
+ self.assertListEqual([volumeName('1'), volumeName('2')],
+ volumes.keys())
+ self.assertVolumeNames(('1', '2',))
+
+ # Failure: the "migrated" volume does not even exist
+ res = self.driver.update_migrated_volume(None, {'id': '1'},
+ {'id': '3', '_name_id': '1'},
+ 'available')
+ self.assertDictEqual({'_name_id': '1'}, res)
+
+ # Failure: a volume with the original volume's name already exists
+ res = self.driver.update_migrated_volume(None, {'id': '1'},
+ {'id': '2', '_name_id': '1'},
+ 'available')
+ self.assertDictEqual({'_name_id': '1'}, res)
+
+ # Success: rename the migrated volume to match the original
+ res = self.driver.update_migrated_volume(None, {'id': '3'},
+ {'id': '2', '_name_id': '3'},
+ 'available')
+ self.assertDictEqual({'_name_id': None}, res)
+ self.assertListEqual([volumeName('1'), volumeName('3')],
+ volumes.keys())
+ self.assertVolumeNames(('1', '3',))
+
+ for vid in ('1', '3'):
+ self.driver.delete_volume({'id': vid})
+ self.assertVolumeNames([])
+ self.assertDictEqual({}, volumes)
+ self.assertDictEqual({}, snapshots)
+
def test_clone_extend_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertEqual((7, 2, 0, 0), res['code_level'],
'Get code level error')
+ @mock.patch.object(helpers.StorwizeHelpers, 'rename_vdisk')
+ def test_storwize_update_migrated_volume(self, rename_vdisk):
+ ctxt = testutils.get_test_admin_context()
+ current_volume_id = 'fake_volume_id'
+ original_volume_id = 'fake_original_volume_id'
+ current_name = 'volume-' + current_volume_id
+ original_name = 'volume-' + original_volume_id
+ backend_volume = self._create_volume(id=current_volume_id)
+ volume = self._create_volume(id=original_volume_id)
+ model_update = self.driver.update_migrated_volume(ctxt, volume,
+ backend_volume,
+ 'available')
+ rename_vdisk.assert_called_once_with(current_name, original_name)
+ self.assertEqual({'_name_id': None}, model_update)
+
+ rename_vdisk.reset_mock()
+ rename_vdisk.side_effect = exception.VolumeBackendAPIException
+ model_update = self.driver.update_migrated_volume(ctxt, volume,
+ backend_volume,
+ 'available')
+ self.assertEqual({'_name_id': current_volume_id}, model_update)
+
+ rename_vdisk.reset_mock()
+ rename_vdisk.side_effect = exception.VolumeBackendAPIException
+ model_update = self.driver.update_migrated_volume(ctxt, volume,
+ backend_volume,
+ 'attached')
+ self.assertEqual({'_name_id': current_volume_id}, model_update)
+
def test_storwize_vdisk_copy_ops(self):
ctxt = testutils.get_test_admin_context()
volume = self._create_volume()
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
+ @mock.patch('cinder.db.volume_update')
+ def test_update_migrated_volume(self, volume_update):
+ fake_host = 'fake_host'
+ fake_new_host = 'fake_new_host'
+ fake_update = {'_name_id': None, 'provider_location': None}
+ fake_elevated = 'fake_elevated'
+ volume = tests_utils.create_volume(self.context, size=1,
+ status='available',
+ host=fake_host)
+ new_volume = tests_utils.create_volume(self.context, size=1,
+ status='available',
+ host=fake_new_host)
+ new_volume['_name_id'] = 'fake_name_id'
+ new_volume['provider_location'] = 'fake_provider_location'
+ fake_update_error = {'_name_id': new_volume['_name_id'],
+ 'provider_location':
+ new_volume['provider_location']}
+ with mock.patch.object(self.volume.driver,
+ 'update_migrated_volume') as \
+ migrate_update,\
+ mock.patch.object(self.context, 'elevated') as elevated:
+ migrate_update.return_value = fake_update
+ elevated.return_value = fake_elevated
+ self.volume.update_migrated_volume(self.context, volume,
+ new_volume, 'available')
+ volume_update.assert_called_once_with(fake_elevated,
+ volume['id'],
+ fake_update)
+
+ # Test the case for update_migrated_volume not implemented
+ # for the driver.
+ migrate_update.reset_mock()
+ volume_update.reset_mock()
+ migrate_update.side_effect = NotImplementedError
+ self.volume.update_migrated_volume(self.context, volume,
+ new_volume, 'available')
+ volume_update.assert_called_once_with(fake_elevated,
+ volume['id'],
+ fake_update_error)
+
def test_list_availability_zones_enabled_service(self):
services = [
{'availability_zone': 'ping', 'disabled': 0},
mock_volume_get.assert_called_with(self.context, vol['id'])
+ def test_update_migrated_volume(self):
+ fake_volume_id = 'vol1'
+ fake_new_volume_id = 'vol2'
+ fake_provider = 'fake_provider'
+ original_volume_name = CONF.volume_name_template % fake_volume_id
+ current_name = CONF.volume_name_template % fake_new_volume_id
+ fake_volume = tests_utils.create_volume(self.context)
+ fake_volume['id'] = fake_volume_id
+ fake_new_volume = tests_utils.create_volume(self.context)
+ fake_new_volume['id'] = fake_new_volume_id
+ fake_new_volume['provider_location'] = fake_provider
+ fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
+ None, 'default')
+ with mock.patch.object(self.volume.driver, 'vg') as vg:
+ vg.return_value = fake_vg
+ vg.rename_volume.return_value = None
+ update = self.volume.driver.update_migrated_volume(self.context,
+ fake_volume,
+ fake_new_volume,
+ 'available')
+ vg.rename_volume.assert_called_once_with(current_name,
+ original_volume_name)
+ self.assertEqual({'_name_id': None,
+ 'provider_location': None}, update)
+
+ vg.rename_volume.reset_mock()
+ vg.rename_volume.side_effect = processutils.ProcessExecutionError
+ update = self.volume.driver.update_migrated_volume(self.context,
+ fake_volume,
+ fake_new_volume,
+ 'available')
+ vg.rename_volume.assert_called_once_with(current_name,
+ original_volume_name)
+ self.assertEqual({'_name_id': fake_new_volume_id,
+ 'provider_location': fake_provider},
+ update)
+
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
"""
return None
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
"""Return model update for migrated volume.
+ Each driver implementing this method needs to be responsible for the
+ values of _name_id and provider_location. If None is returned or either
+ key is not set, it means the volume table does not need to change the
+ value(s) for the key(s).
+ The return format is {"_name_id": value, "provider_location": value}.
+
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
- return None
+ msg = _("The method update_migrated_volume not implemented.")
+ raise NotImplementedError(msg)
def migrate_volume(self, context, volume, host):
return (False, None)
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
'''Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
'''
# We use id as our volume name so we need to rename the backend
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
- return None
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
'host': host['host']})
return True, model_update
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
+ """Return model update from Storwize for migrated volume.
+
+ This method should rename the back-end volume name(id) on the
+ destination host back to its original name(id) on the source host.
+
+ :param ctxt: The context used to run the method update_migrated_volume
+ :param volume: The original volume that was migrated to this backend
+ :param new_volume: The migration volume object that was created on
+ this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
+ :return model_update to update DB with any needed changes
+ """
+ current_name = CONF.volume_name_template % new_volume['id']
+ original_volume_name = CONF.volume_name_template % volume['id']
+ try:
+ self._helpers.rename_vdisk(current_name, original_volume_name)
+ except exception.VolumeBackendAPIException:
+ LOG.error(_LE('Unable to rename the logical volume '
+ 'for volume: %s'), volume['id'])
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
+ # If the back-end name(id) for the volume has been renamed,
+ # it is OK for the volume to keep the original name(id) and there is
+ # no need to use the column "_name_id" to establish the mapping
+ # relationship between the volume id and the back-end volume
+ # name(id).
+ # Set the key "_name_id" to None for a successful rename.
+ model_update = {'_name_id': None}
+ return model_update
+
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
'volume_id': volume['id']})
return True
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
"""Return model update for migrated volume."""
src_volume_id = volume['id'].replace('-', '')
'Rename partition %(part_id)s '
'into new volume %(new_volume)s.', {
'part_id': part_id, 'new_volume': dst_volume_id})
-
- self._execute('SetPartition', part_id, 'name=%s' % src_volume_id)
+ try:
+ self._execute('SetPartition', part_id, 'name=%s' % src_volume_id)
+ except exception.InfortrendCliException:
+ LOG.exception(_LE('Failed to rename %(new_volume)s into '
+ '%(volume)s.'), {'new_volume': new_volume['id'],
+ 'volume': volume['id']})
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), {
'new_volume': new_volume['id']})
model_update = {
+ '_name_id': None,
'provider_location': new_volume['provider_location'],
}
return model_update
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
- return self.common.update_migrated_volume(ctxt, volume, new_volume)
+ return self.common.update_migrated_volume(ctxt, volume, new_volume,
+ original_volume_status)
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
- return self.common.update_migrated_volume(ctxt, volume, new_volume)
+ return self.common.update_migrated_volume(ctxt, volume, new_volume,
+ original_volume_status)
self.configuration.lvm_type,
mirror_count)
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
+ """Return model update from LVM for migrated volume.
+
+ This method should rename the back-end volume name(id) on the
+ destination host back to its original name(id) on the source host.
+
+ :param ctxt: The context used to run the method update_migrated_volume
+ :param volume: The original volume that was migrated to this backend
+ :param new_volume: The migration volume object that was created on
+ this backend as part of the migration process
+ :param original_volume_status: The status of the original volume
+ :return model_update to update DB with any needed changes
+ """
+ name_id = None
+ provider_location = None
+ if original_volume_status == 'available':
+ current_name = CONF.volume_name_template % new_volume['id']
+ original_volume_name = CONF.volume_name_template % volume['id']
+ try:
+ self.vg.rename_volume(current_name, original_volume_name)
+ except processutils.ProcessExecutionError:
+ LOG.error(_LE('Unable to rename the logical volume '
+ 'for volume: %s'), volume['name'])
+ # If the rename fails, _name_id should be set to the new
+ # volume id and provider_location should be set to the
+ # one from the new volume as well.
+ name_id = new_volume['_name_id'] or new_volume['id']
+ provider_location = new_volume['provider_location']
+ else:
+ # The back-end will not be renamed.
+ name_id = new_volume['_name_id'] or new_volume['id']
+ provider_location = new_volume['provider_location']
+ return {'_name_id': name_id, 'provider_location': provider_location}
+
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
return True
- def update_migrated_volume(self, context, volume, new_volume):
+ def update_migrated_volume(self, context, volume, new_volume,
+ original_volume_status):
orig_id = volume['id']
orig_name = self._attach.volumeName(orig_id)
temp_id = new_volume['id']
vols = {v.name: True for v in self._attach.api().volumesList()}
if temp_name not in vols:
LOG.error(_LE('StorPool update_migrated_volume(): it seems '
- 'that the StorPool volume "%(tid)" was not '
+ 'that the StorPool volume "%(tid)s" was not '
'created as part of the migration from '
- '"%(oid)"'), {'tid': temp_id, 'oid': orig_id})
+ '"%(oid)s"'), {'tid': temp_id, 'oid': orig_id})
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
elif orig_name in vols:
LOG.error(_LE('StorPool update_migrated_volume(): both '
- 'the original volume "%(oid)" and the migrated '
- 'StorPool volume "%(tid)" seem to exist on '
+ 'the original volume "%(oid)s" and the migrated '
+ 'StorPool volume "%(tid)s" seem to exist on '
'the StorPool cluster'),
{'oid': orig_id, 'tid': temp_id})
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
else:
- self._attach.api().volumeUpdate(temp_name, {'rename': orig_name})
+ try:
+ self._attach.api().volumeUpdate(temp_name,
+ {'rename': orig_name})
+ return {'_name_id': None}
+ except spapi.ApiError as e:
+ LOG.error(_LE('StorPool update_migrated_volume(): '
+ 'could not rename %(tname)s to %(oname)s: '
+ '%(err)s'),
+ {'tname': temp_name, 'oname': orig_name, 'err': e})
+ return {'_name_id': new_volume['_name_id'] or new_volume['id']}
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
+ # after a successful migration.
# Note this needs to go through rpc to the host of the new volume
- # the current host and driver object is for the "existing" volume
- rpcapi.update_migrated_volume(ctxt,
- volume,
- new_volume)
+ # the current host and driver object is for the "existing" volume.
+ rpcapi.update_migrated_volume(ctxt, volume, new_volume,
+ orig_volume_status)
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
if orig_volume_status == 'in-use':
return True
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ volume_status):
"""Finalize migration process on backend device."""
-
model_update = None
- model_update = self.driver.update_migrated_volume(ctxt,
- volume,
- new_volume)
+ try:
+ model_update = self.driver.update_migrated_volume(ctxt,
+ volume,
+ new_volume,
+ volume_status)
+ except NotImplementedError:
+ # If update_migrated_volume is not implemented for the driver,
+ # _name_id and provider_location will be set with the values
+ # from new_volume.
+ model_update = {'_name_id': new_volume['_name_id'] or
+ new_volume['id'],
+ 'provider_location':
+ new_volume['provider_location']}
if model_update:
- self.db.volume_update(ctxt.elevated(),
- volume['id'],
+ self.db.volume_update(ctxt.elevated(), volume['id'],
model_update)
cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
- def update_migrated_volume(self, ctxt, volume, new_volume):
+ def update_migrated_volume(self, ctxt, volume, new_volume,
+ original_volume_status):
host = utils.extract_host(new_volume['host'])
cctxt = self.client.prepare(server=host, version='1.19')
cctxt.call(ctxt,
'update_migrated_volume',
volume=volume,
- new_volume=new_volume)
+ new_volume=new_volume,
+ volume_status=original_volume_status)