]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix retyping attached volumes requiring migration
authorgit-harry <git-harry@live.co.uk>
Mon, 23 Jun 2014 14:47:34 +0000 (15:47 +0100)
committergit-harry <git-harry@live.co.uk>
Mon, 23 Jun 2014 14:47:34 +0000 (15:47 +0100)
Modifies retype method so that volume db entry only updated if the
volume isn't migrated.

The migrate_volume_completion method is made responsible for detaching
the old volume and attaching the new one. This takes the requirement
from nova because the calls need to be done in a certain order which
isn't guaranteed when performed by nova.

attach_volume now marks the migration_status as complete because it is
the final action by cinder.

Change-Id: Ia15636893be4f0077a4f75e746a8ab1a8798c44b
Partial-Bug: 1316079

cinder/tests/test_volume.py
cinder/volume/manager.py

index 6b53c5d6004197f67ad3f46e133d0210885b9b18..260fa43a35617008fb81ea3974faa1708d345b9f 100644 (file)
@@ -2566,7 +2566,10 @@ class VolumeTestCase(BaseVolumeTestCase):
                                            volume_type_id=old_vol_type['id'])
         if snap:
             self._create_snapshot(volume['id'], size=volume['size'])
-        host_obj = {'host': 'newhost', 'capabilities': {}}
+        if driver or diff_equal:
+            host_obj = {'host': CONF.host, 'capabilities': {}}
+        else:
+            host_obj = {'host': 'newhost', 'capabilities': {}}
 
         reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
         QUOTAS.add_volume_type_opts(self.context,
@@ -2607,10 +2610,15 @@ class VolumeTestCase(BaseVolumeTestCase):
             volumes_in_use = 0
 
         # check properties
-        if not exc:
+        if driver or diff_equal:
             self.assertEqual(volume['volume_type_id'], vol_type['id'])
             self.assertEqual(volume['status'], 'available')
-            self.assertEqual(volume['host'], 'newhost')
+            self.assertEqual(volume['host'], CONF.host)
+            self.assertEqual(volumes_in_use, 1)
+        elif not exc:
+            self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
+            self.assertEqual(volume['status'], 'retyping')
+            self.assertEqual(volume['host'], CONF.host)
             self.assertEqual(volumes_in_use, 1)
         else:
             self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
index 30617a376de51071690b3e5fd13860dd5af33b10..f661f081510cf4de06d849c4034482cfb2e24aba 100644 (file)
@@ -578,7 +578,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                         volume_metadata.get('attached_mode') != mode):
                     msg = _("being attached by different mode")
                     raise exception.InvalidVolume(reason=msg)
-            elif volume['status'] != "available":
+            elif (not volume['migration_status'] and
+                  volume['status'] != "available"):
                 msg = _("status must be available or attaching")
                 raise exception.InvalidVolume(reason=msg)
 
@@ -633,6 +634,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                                              instance_uuid,
                                              host_name_sanitized,
                                              mountpoint)
+            if volume['migration_status']:
+                self.db.volume_update(context, volume_id,
+                                      {'migration_status': None})
             self._notify_about_volume_usage(context, volume, "attach.end")
         return do_attach()
 
@@ -1011,6 +1015,8 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         # Delete the source volume (if it fails, don't fail the migration)
         try:
+            if status_update['status'] == 'in-use':
+                self.detach_volume(ctxt, volume_id)
             self.delete_volume(ctxt, volume_id)
         except Exception as ex:
             msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
@@ -1018,10 +1024,20 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
         self.db.volume_destroy(ctxt, new_volume_id)
-        updates = {'migration_status': None}
         if status_update:
+            updates = {'migration_status': 'completing'}
             updates.update(status_update)
+        else:
+            updates = {'migration_status': None}
         self.db.volume_update(ctxt, volume_id, updates)
+
+        if status_update:
+            rpcapi.attach_volume(ctxt,
+                                 volume,
+                                 volume['instance_uuid'],
+                                 volume['attached_host'],
+                                 volume['mountpoint'],
+                                 'rw')
         return volume['id']
 
     def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
@@ -1279,11 +1295,11 @@ class VolumeManager(manager.SchedulerDependentManager):
                 with excutils.save_and_reraise_exception():
                     _retype_error(context, volume_id, old_reservations,
                                   new_reservations, status_update)
-
-        self.db.volume_update(context, volume_id,
-                              {'volume_type_id': new_type_id,
-                               'host': host['host'],
-                               'status': status_update['status']})
+        else:
+            self.db.volume_update(context, volume_id,
+                                  {'volume_type_id': new_type_id,
+                                   'host': host['host'],
+                                   'status': status_update['status']})
 
         if old_reservations:
             QUOTAS.commit(context, old_reservations, project_id=project_id)