]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add ability to update migration info on backend
authorJohn Griffith <john.griffith@solidfire.com>
Tue, 21 Oct 2014 23:19:22 +0000 (23:19 +0000)
committerJohn Griffith <john.griffith8@gmail.com>
Fri, 24 Oct 2014 20:20:32 +0000 (14:20 -0600)
The current migration process creates a new volume,
xfr's it's contents, then deletes the original and
modifies the new volume to have the previous ID.

All in all this is kinda troublesome, but regardless
the bigger problem is that the generic impl doesn't
provide any method to tell the backend devices that
their device names/id's have changed.

This patch provides a method to inform backends
that a migration operation has been completed on
their target volume.

It shouldn't be necessary to do anything with the originating
or source volume because it's deleted as part of the process.

Change-Id: Ib5e6a47fe9eedab3e60e77a6c2d987355c0bf167
Closes-Bug: #1383499

cinder/tests/test_volume.py
cinder/volume/driver.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index ec4a81c1237e3b18e51e4eae64413f63f47adaa8..70fa10169b05c5e8ba5d973d4b09744ef92b974d 100644 (file)
@@ -2798,6 +2798,12 @@ class VolumeTestCase(BaseVolumeTestCase):
                        lambda x, y, z, remote='dest': True)
         self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
                        fake_delete_volume_rpc)
+        self.stubs.Set(volume_rpcapi.VolumeAPI,
+                       'update_migrated_volume',
+                       lambda *args: self.volume.update_migrated_volume(
+                           self.context,
+                           args[1],
+                           args[2]))
         error_logs = []
         LOG = logging.getLogger('cinder.volume.manager')
         self.stubs.Set(LOG, 'error', lambda x: error_logs.append(x))
@@ -2835,6 +2841,12 @@ class VolumeTestCase(BaseVolumeTestCase):
                        lambda *args: self.volume.attach_volume(args[1],
                                                                args[2]['id'],
                                                                *args[3:]))
+
+        self.stubs.Set(volume_rpcapi.VolumeAPI, 'update_migrated_volume',
+                       lambda *args: self.volume.update_migrated_volume(
+                           elevated,
+                           args[1],
+                           args[2]))
         self.stubs.Set(self.volume.driver, 'attach_volume',
                        lambda *args, **kwargs: None)
 
index efa70baf79604c5369925a82d5fa7a9e48f9e1be..750e45baf12aa77cb276966e0763dd66edd6b2e1 100644 (file)
@@ -853,6 +853,16 @@ class VolumeDriver(object):
         """
         return False
 
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        """Return model update for migrated volume.
+
+        :param volume: The original volume that was migrated to this backend
+        :param new_volume: The migration volume object that was created on
+                           this backend as part of the migration process
+        :return model_update to update DB with any needed changes
+        """
+        return None
+
 
 class ISCSIDriver(VolumeDriver):
     """Executes commands relating to ISCSI volumes.
index 2e5b774b407b96597fe3ecc4a9bb36755280b7b0..c869c04b419a2cc64b569344c119e945caa18078 100644 (file)
@@ -154,7 +154,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.18'
+    RPC_API_VERSION = '1.19'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -263,9 +263,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             return False
 
     def init_host(self):
-        """Do any initialization that needs to be run if this is a
-           standalone service.
-        """
+        """Perform any required initialization."""
 
         ctxt = context.get_admin_context()
         LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
@@ -1058,7 +1056,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                              remote='dest')
                 # The above call is synchronous so we complete the migration
                 self.migrate_volume_completion(ctxt, volume['id'],
-                                               new_volume['id'], error=False)
+                                               new_volume['id'],
+                                               error=False)
             else:
                 nova_api = compute.API()
                 # This is an async call to Nova, which will call the completion
@@ -1132,6 +1131,12 @@ class VolumeManager(manager.SchedulerDependentManager):
             msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
             LOG.error(msg % {'vol': volume_id, 'err': ex})
 
+        # Give driver (new_volume) a chance to update things as needed
+        # Note this needs to go through rpc to the host of the new volume
+        # the current host and driver object is for the "existing" volume
+        rpcapi.update_migrated_volume(ctxt,
+                                      volume,
+                                      new_volume)
         self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
         self.db.volume_destroy(ctxt, new_volume_id)
         if status_update.get('status') == 'in-use':
@@ -1142,6 +1147,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.db.volume_update(ctxt, volume_id, updates)
 
         if 'in-use' in (status_update.get('status'), volume['status']):
+            # NOTE(jdg): if we're passing the ref here, why are we
+            # also passing in the various fields from that ref?
             rpcapi.attach_volume(ctxt,
                                  volume,
                                  volume['instance_uuid'],
@@ -1974,3 +1981,15 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, cgsnapshot_ref, "delete.end")
 
         return True
+
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        """Finalize migration process on backend device."""
+
+        model_update = None
+        model_update = self.driver.update_migrated_volume(ctxt,
+                                                          volume,
+                                                          new_volume)
+        if model_update:
+            self.db.volume_update(ctxt.elevated(),
+                                  volume['id'],
+                                  model_update)
index 6c7638c35dbabf03b1196c389839c45b9da5b170..a48c5f07525227e8a9c6d9ee65dae29381a36711 100644 (file)
@@ -57,6 +57,7 @@ class VolumeAPI(object):
         1.18 - Adds create_consistencygroup, delete_consistencygroup,
                create_cgsnapshot, and delete_cgsnapshot. Also adds
                the consistencygroup_id parameter in create_volume.
+        1.19 - Adds update_migrated_volume
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -65,7 +66,7 @@ class VolumeAPI(object):
         super(VolumeAPI, self).__init__()
         target = messaging.Target(topic=CONF.volume_topic,
                                   version=self.BASE_RPC_API_VERSION)
-        self.client = rpc.get_client(target, '1.18')
+        self.client = rpc.get_client(target, '1.19')
 
     def create_consistencygroup(self, ctxt, group, host):
         new_host = utils.extract_host(host)
@@ -226,3 +227,11 @@ class VolumeAPI(object):
         new_host = utils.extract_host(volume['host'])
         cctxt = self.client.prepare(server=new_host, version='1.17')
         cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
+
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        host = utils.extract_host(new_volume['host'])
+        cctxt = self.client.prepare(server=host, version='1.19')
+        cctxt.call(ctxt,
+                   'update_migrated_volume',
+                   volume=volume,
+                   new_volume=new_volume)