return IMPL.volume_data_get_for_project(context, project_id)
-def finish_volume_migration(context, src_vol_id, dest_vol_id):
- """Perform database updates upon completion of volume migration."""
- return IMPL.finish_volume_migration(context, src_vol_id, dest_vol_id)
-
-
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
return _volume_data_get_for_project(context, project_id, volume_type_id)
-@require_admin_context
-def finish_volume_migration(context, src_vol_id, dest_vol_id):
- """Swap almost all columns between dest and source.
-
- We swap fields between source and destination at the end of migration
- because we want to keep the original volume id in the DB but now pointing
- to the migrated volume.
-
- Original volume will be deleted, after this method original volume will be
- pointed by dest_vol_id, so we set its status and migrating_status to
- 'deleting'. We change status here to keep it in sync with migration_status
- which must be changed here.
-
- param src_vol_id:: ID of the migration original volume
- param dest_vol_id: ID of the migration destination volume
- returns: Tuple with new source and destination ORM objects. Source will be
- the migrated volume and destination will be original volume that
- will be deleted.
- """
- session = get_session()
- with session.begin():
- src_volume_ref = _volume_get(context, src_vol_id, session=session,
- joined_load=False)
- src_original_data = dict(src_volume_ref.iteritems())
- dest_volume_ref = _volume_get(context, dest_vol_id, session=session,
- joined_load=False)
-
- # NOTE(rpodolyaka): we should copy only column values, while model
- # instances also have relationships attributes, which
- # should be ignored
- def is_column(inst, attr):
- return attr in inst.__class__.__table__.columns
-
- for key, value in dest_volume_ref.iteritems():
- value_to_dst = src_original_data.get(key)
- # The implementation of update_migrated_volume will decide the
- # values for _name_id and provider_location.
- if (key in ('id', 'provider_location')
- or not is_column(dest_volume_ref, key)):
- continue
-
- # Destination must have a _name_id since the id no longer matches
- # the volume. If it doesn't have a _name_id we set one.
- elif key == '_name_id':
- if not dest_volume_ref._name_id:
- setattr(dest_volume_ref, key, src_volume_ref.id)
- continue
- elif key == 'migration_status':
- value = None
- value_to_dst = 'deleting'
- elif key == 'display_description':
- value_to_dst = 'migration src for ' + src_volume_ref.id
- elif key == 'status':
- value_to_dst = 'deleting'
-
- setattr(src_volume_ref, key, value)
- setattr(dest_volume_ref, key, value_to_dst)
- return src_volume_ref, dest_volume_ref
-
-
@require_admin_context
@_retry_on_deadlock
def volume_destroy(context, volume_id):
# Version 1.1: Added metadata, admin_metadata, volume_attachment, and
# volume_type
# Version 1.2: Added glance_metadata, consistencygroup and snapshots
- VERSION = '1.2'
+ # Version 1.3: Added finish_volume_migration()
+ VERSION = '1.3'
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment', 'consistencygroup',
if not md_was_changed:
self.obj_reset_changes(['metadata'])
+ def finish_volume_migration(self, dest_volume):
+ # We swap fields between source (i.e. self) and destination at the
+ # end of migration because we want to keep the original volume id
+ # in the DB but now pointing to the migrated volume.
+ skip = ({'id', 'provider_location', 'glance_metadata'} |
+ set(self.obj_extra_fields))
+ for key in set(dest_volume.fields.keys()) - skip:
+ # Only swap attributes that are already set. We do not want to
+ # unexpectedly trigger a lazy-load.
+ if not dest_volume.obj_attr_is_set(key):
+ continue
+
+ value = getattr(dest_volume, key)
+ value_to_dst = getattr(self, key)
+
+ # Destination must have a _name_id since the id no longer matches
+ # the volume. If it doesn't have a _name_id we set one.
+ if key == '_name_id':
+ if not dest_volume._name_id:
+ setattr(dest_volume, key, self.id)
+ continue
+ elif key == 'migration_status':
+ value = None
+ value_to_dst = 'deleting'
+ elif key == 'display_description':
+ value_to_dst = 'migration src for ' + self.id
+ elif key == 'status':
+ value_to_dst = 'deleting'
+
+ setattr(self, key, value)
+ setattr(dest_volume, key, value_to_dst)
+
+ dest_volume.save()
+ return dest_volume
+
@base.CinderObjectRegistry.register
class VolumeList(base.ObjectListBase, base.CinderObject):
+++ /dev/null
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for finish_volume_migration."""
-
-
-from cinder import context
-from cinder import db
-from cinder import objects
-from cinder import test
-from cinder.tests.unit import utils as testutils
-
-
-class FinishVolumeMigrationTestCase(test.TestCase):
- """Test cases for finish_volume_migration."""
-
- def test_finish_volume_migration_no_volume_type(self):
- self._test_finish_volume_migration()
-
- def test_finish_volume_migration_with_volume_type(self):
- source_type = {'name': 'old', 'extra_specs': {}}
- dest_type = {'name': 'new', 'extra_specs': {}}
- self._test_finish_volume_migration(source_type=source_type,
- dest_type=dest_type)
-
- def test_finish_volume_migration_none_to_volume_type(self):
- dest_type = {'name': 'new', 'extra_specs': {}}
- self._test_finish_volume_migration(dest_type=dest_type)
-
- def _test_finish_volume_migration(self, source_type=None, dest_type=None):
- ctxt = context.RequestContext(user_id='user_id',
- project_id='project_id',
- is_admin=True)
- source_type_id = None
- dest_type_id = None
- if source_type:
- source_type_id = db.volume_type_create(ctxt, source_type).id
- if dest_type:
- dest_type_id = db.volume_type_create(ctxt, dest_type).id
-
- src_volume = testutils.create_volume(ctxt, host='src',
- migration_status='migrating',
- status='available',
- volume_type_id=source_type_id)
- dest_volume = testutils.create_volume(ctxt, host='dest',
- migration_status='target:fake',
- status='available',
- volume_type_id=dest_type_id)
- db.finish_volume_migration(ctxt, src_volume.id, dest_volume.id)
-
- # Check that we have copied destination volume DB data into source DB
- # entry so we can keep the id
- src_volume = objects.Volume.get_by_id(ctxt, src_volume.id)
- self.assertEqual('dest', src_volume.host)
- self.assertEqual('available', src_volume.status)
- self.assertIsNone(src_volume.migration_status)
- if dest_type:
- self.assertEqual(dest_type_id, src_volume.volume_type_id)
- else:
- self.assertIsNone(src_volume.volume_type_id)
-
- # Check that we have copied source volume DB data into destination DB
- # entry and we are setting it to deleting
- dest_volume = objects.Volume.get_by_id(ctxt, dest_volume.id)
- self.assertEqual('src', dest_volume.host)
- self.assertEqual('deleting', dest_volume.status)
- self.assertEqual('deleting', dest_volume.migration_status)
- if source_type:
- self.assertEqual(source_type_id, dest_volume.volume_type_id)
- else:
- self.assertIsNone(dest_volume.volume_type_id)
'ServiceList': '1.0-d242d3384b68e5a5a534e090ff1d5161',
'Snapshot': '1.0-a6c33eefeadefb324d79f72f66c54e9a',
'SnapshotList': '1.0-71661e7180ef6cc51501704a9bea4bf1',
- 'Volume': '1.2-97c3977846dae6588381e7bd3e6e6558',
+ 'Volume': '1.3-97c3977846dae6588381e7bd3e6e6558',
'VolumeAttachment': '1.0-f14a7c03ffc5b93701d496251a5263aa',
'VolumeAttachmentList': '1.0-307d2b6c8dd55ef854f6386898e9e98e',
'VolumeList': '1.1-03ba6cb8c546683e64e15c50042cb1a3',
volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}]
self.assertEqual({'prs': 'tuw'}, volume.glance_metadata)
+ @mock.patch('cinder.db.volume_metadata_update', return_value={})
+ @mock.patch('cinder.db.volume_update')
+ def test_finish_volume_migration(self, volume_update, metadata_update):
+ src_volume_db = fake_volume.fake_db_volume(**{'id': '1'})
+ dest_volume_db = fake_volume.fake_db_volume(**{'id': '2'})
+ src_volume = objects.Volume._from_db_object(
+ self.context, objects.Volume(), src_volume_db,
+ expected_attrs=['metadata', 'glance_metadata'])
+ dest_volume = objects.Volume._from_db_object(
+ self.context, objects.Volume(), dest_volume_db,
+ expected_attrs=['metadata', 'glance_metadata'])
+ updated_dest_volume = src_volume.finish_volume_migration(
+ dest_volume)
+ self.assertEqual('deleting', updated_dest_volume.migration_status)
+ self.assertEqual('migration src for ' + src_volume.id,
+ updated_dest_volume.display_description)
+ self.assertEqual(src_volume.id, updated_dest_volume._name_id)
+ self.assertTrue(volume_update.called)
+
+ # Ignore these attributes, since they were updated by
+ # finish_volume_migration
+ ignore_keys = ('id', 'provider_location', '_name_id',
+ 'migration_status', 'display_description', 'status')
+ dest_vol_filtered = {k: v for k, v in updated_dest_volume.iteritems()
+ if k not in ignore_keys}
+ src_vol_filtered = {k: v for k, v in src_volume.iteritems()
+ if k not in ignore_keys}
+ self.assertEqual(src_vol_filtered, dest_vol_filtered)
+
class TestVolumeList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_get_all')
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
+ volume.refresh()
+ new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
- __, updated_new = self.db.finish_volume_migration(
- ctxt, volume.id, new_volume.id)
+ updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
- volume = objects.Volume.get_by_id(context, volume_id)
+ volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled