From: John Griffith Date: Tue, 21 Oct 2014 23:19:22 +0000 (+0000) Subject: Add ability to update migration info on backend X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=f3df1c8d57587bf112c464feedd523080550b7a6;p=openstack-build%2Fcinder-build.git Add ability to update migration info on backend The current migration process creates a new volume, xfr's it's contents, then deletes the original and modifies the new volume to have the previous ID. All in all this is kinda troublesome, but regardless the bigger problem is that the generic impl doesn't provide any method to tell the backend devices that their device names/id's have changed. This patch provides a method to inform backends that a migration operation has been completed on their target volume. It shouldn't be necessary to do anything with the originating or source volume because it's deleted as part of the process. Change-Id: Ib5e6a47fe9eedab3e60e77a6c2d987355c0bf167 Closes-Bug: #1383499 --- diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index ec4a81c12..70fa10169 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -2798,6 +2798,12 @@ class VolumeTestCase(BaseVolumeTestCase): lambda x, y, z, remote='dest': True) self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume', fake_delete_volume_rpc) + self.stubs.Set(volume_rpcapi.VolumeAPI, + 'update_migrated_volume', + lambda *args: self.volume.update_migrated_volume( + self.context, + args[1], + args[2])) error_logs = [] LOG = logging.getLogger('cinder.volume.manager') self.stubs.Set(LOG, 'error', lambda x: error_logs.append(x)) @@ -2835,6 +2841,12 @@ class VolumeTestCase(BaseVolumeTestCase): lambda *args: self.volume.attach_volume(args[1], args[2]['id'], *args[3:])) + + self.stubs.Set(volume_rpcapi.VolumeAPI, 'update_migrated_volume', + lambda *args: self.volume.update_migrated_volume( + elevated, + args[1], + args[2])) self.stubs.Set(self.volume.driver, 'attach_volume', lambda *args, **kwargs: None) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index efa70baf7..750e45baf 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -853,6 +853,16 @@ class VolumeDriver(object): """ return False + def update_migrated_volume(self, ctxt, volume, new_volume): + """Return model update for migrated volume. + + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :return model_update to update DB with any needed changes + """ + return None + class ISCSIDriver(VolumeDriver): """Executes commands relating to ISCSI volumes. diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 2e5b774b4..c869c04b4 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -154,7 +154,7 @@ def locked_snapshot_operation(f): class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - RPC_API_VERSION = '1.18' + RPC_API_VERSION = '1.19' target = messaging.Target(version=RPC_API_VERSION) @@ -263,9 +263,7 @@ class VolumeManager(manager.SchedulerDependentManager): return False def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Perform any required initialization.""" ctxt = context.get_admin_context() LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") % @@ -1058,7 +1056,8 @@ class VolumeManager(manager.SchedulerDependentManager): remote='dest') # The above call is synchronous so we complete the migration self.migrate_volume_completion(ctxt, volume['id'], - new_volume['id'], error=False) + new_volume['id'], + error=False) else: nova_api = compute.API() # This is an async call to Nova, which will call the completion @@ -1132,6 +1131,12 @@ class VolumeManager(manager.SchedulerDependentManager): msg = _("Failed to delete migration source vol %(vol)s: %(err)s") LOG.error(msg % {'vol': volume_id, 'err': ex}) + # Give driver (new_volume) a chance to update things as needed + # Note this needs to go through rpc to the host of the new volume + # the current host and driver object is for the "existing" volume + rpcapi.update_migrated_volume(ctxt, + volume, + new_volume) self.db.finish_volume_migration(ctxt, volume_id, new_volume_id) self.db.volume_destroy(ctxt, new_volume_id) if status_update.get('status') == 'in-use': @@ -1142,6 +1147,8 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_update(ctxt, volume_id, updates) if 'in-use' in (status_update.get('status'), volume['status']): + # NOTE(jdg): if we're passing the ref here, why are we + # also passing in the various fields from that ref? rpcapi.attach_volume(ctxt, volume, volume['instance_uuid'], @@ -1974,3 +1981,15 @@ class VolumeManager(manager.SchedulerDependentManager): context, cgsnapshot_ref, "delete.end") return True + + def update_migrated_volume(self, ctxt, volume, new_volume): + """Finalize migration process on backend device.""" + + model_update = None + model_update = self.driver.update_migrated_volume(ctxt, + volume, + new_volume) + if model_update: + self.db.volume_update(ctxt.elevated(), + volume['id'], + model_update) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 6c7638c35..a48c5f075 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -57,6 +57,7 @@ class VolumeAPI(object): 1.18 - Adds create_consistencygroup, delete_consistencygroup, create_cgsnapshot, and delete_cgsnapshot. Also adds the consistencygroup_id parameter in create_volume. + 1.19 - Adds update_migrated_volume ''' BASE_RPC_API_VERSION = '1.0' @@ -65,7 +66,7 @@ class VolumeAPI(object): super(VolumeAPI, self).__init__() target = messaging.Target(topic=CONF.volume_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, '1.18') + self.client = rpc.get_client(target, '1.19') def create_consistencygroup(self, ctxt, group, host): new_host = utils.extract_host(host) @@ -226,3 +227,11 @@ class VolumeAPI(object): new_host = utils.extract_host(volume['host']) cctxt = self.client.prepare(server=new_host, version='1.17') cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id']) + + def update_migrated_volume(self, ctxt, volume, new_volume): + host = utils.extract_host(new_volume['host']) + cctxt = self.client.prepare(server=host, version='1.19') + cctxt.call(ctxt, + 'update_migrated_volume', + volume=volume, + new_volume=new_volume)