]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Volume status management during migration
authorVincent Hou <sbhou@cn.ibm.com>
Thu, 28 May 2015 03:18:09 +0000 (11:18 +0800)
committerVincent Hou <sbhou@cn.ibm.com>
Fri, 28 Aug 2015 01:54:28 +0000 (09:54 +0800)
This patch proposes a new implementation for the status and
the migration_status for volumes.

* The initial migration_status is None, meaning no migration has been
done; Migration_status 'error' means the previous migration failed.
Migration_status 'success' means the previous migration succeeded.

* If the key 'lock_volume' is set to True from the request, the volume
status should be set to 'maintenance' during migration and goes
back to its original status after migration. Otherwise, if the
key 'lock_volume' is set to False, the volume status will remain the
same as its original status. The default value for lock_volume is
False and it applies to the available volume.

* From the REST's perspectives, all the create, update and delete
actions are not allowed if the volume is in 'maintenance', because
it means the volume is out of service. If it is not in maintenance
mode, the migration can be interrupted if other requests are
issued, e.g. attach. For the termination of migration, another
patch will target to resolve it.

DocImpact
APIImpact The key 'lock_volume' has been added into the API,
telling the volume to change the status to 'maintenance' or not.
The migration_status has been added into results returned
from volume list command, if the request is from an admin.

Change-Id: Ia86421f2d6fce61dcfeb073f8e7b9c9dde517373
Partial-implements: blueprint migration-improvement

13 files changed:
cinder/api/contrib/admin_actions.py
cinder/api/v2/views/volumes.py
cinder/api/v2/volume_metadata.py
cinder/db/sqlalchemy/api.py
cinder/scheduler/manager.py
cinder/tests/unit/api/v1/test_volume_metadata.py
cinder/tests/unit/api/v2/test_volume_metadata.py
cinder/tests/unit/api/v2/test_volumes.py
cinder/tests/unit/scheduler/test_scheduler.py
cinder/tests/unit/test_volume.py
cinder/utils.py
cinder/volume/api.py
cinder/volume/manager.py

index cb665d2875a56a620234db84173f65b096cfc95c..1e8a89f7d8a6bc06a484fa834b794e829578e0f1 100644 (file)
@@ -14,7 +14,6 @@
 
 from oslo_log import log as logging
 import oslo_messaging as messaging
-from oslo_utils import strutils
 import webob
 from webob import exc
 
@@ -26,6 +25,7 @@ from cinder import exception
 from cinder.i18n import _
 from cinder import objects
 from cinder import rpc
+from cinder import utils
 from cinder import volume
 
 
@@ -127,12 +127,12 @@ class VolumeAdminController(AdminController):
     # Perhaps we don't even want any definitions in the abstract
     # parent class?
     valid_status = AdminController.valid_status.union(
-        set(['attaching', 'in-use', 'detaching']))
+        ('attaching', 'in-use', 'detaching', 'maintenance'))
 
-    valid_attach_status = set(['detached', 'attached', ])
-    valid_migration_status = set(['migrating', 'error',
-                                  'completing', 'none',
-                                  'starting', ])
+    valid_attach_status = ('detached', 'attached',)
+    valid_migration_status = ('migrating', 'error',
+                              'success', 'completing',
+                              'none', 'starting',)
 
     def _update(self, *args, **kwargs):
         db.volume_update(*args, **kwargs)
@@ -220,15 +220,11 @@ class VolumeAdminController(AdminController):
         try:
             host = params['host']
         except KeyError:
-            raise exc.HTTPBadRequest(explanation=_("Must specify 'host'"))
-        force_host_copy = params.get('force_host_copy', 'False')
-        try:
-            force_host_copy = strutils.bool_from_string(force_host_copy,
-                                                        strict=True)
-        except ValueError as e:
-            msg = (_("Invalid value for force_host_copy: '%s'") % e.message)
-            raise exc.HTTPBadRequest(explanation=msg)
-        self.volume_api.migrate_volume(context, volume, host, force_host_copy)
+            raise exc.HTTPBadRequest(explanation=_("Must specify 'host'."))
+        force_host_copy = utils.get_bool_param('force_host_copy', params)
+        lock_volume = utils.get_bool_param('lock_volume', params)
+        self.volume_api.migrate_volume(context, volume, host, force_host_copy,
+                                       lock_volume)
         return webob.Response(status_int=202)
 
     @wsgi.action('os-migrate_volume_completion')
index d428827d7decf48a63401f4eb0aa913a6e74a901..c72aec76028c8c0aa832f51c8883b194df303883 100644 (file)
@@ -39,7 +39,7 @@ class ViewBuilder(common.ViewBuilder):
         """Detailed view of a list of volumes."""
         return self._list_view(self.detail, request, volumes,
                                volume_count,
-                               coll_name=self._collection_name + '/detail')
+                               self._collection_name + '/detail')
 
     def summary(self, request, volume):
         """Generic, non-detailed view of an volume."""
@@ -54,7 +54,7 @@ class ViewBuilder(common.ViewBuilder):
 
     def detail(self, request, volume):
         """Detailed view of a single volume."""
-        return {
+        volume_ref = {
             'volume': {
                 'id': volume.get('id'),
                 'status': volume.get('status'),
@@ -74,9 +74,13 @@ class ViewBuilder(common.ViewBuilder):
                 'encrypted': self._is_volume_encrypted(volume),
                 'replication_status': volume.get('replication_status'),
                 'consistencygroup_id': volume.get('consistencygroup_id'),
-                'multiattach': volume.get('multiattach')
+                'multiattach': volume.get('multiattach'),
             }
         }
+        if request.environ['cinder.context'].is_admin:
+            volume_ref['volume']['migration_status'] = (
+                volume.get('migration_status'))
+        return volume_ref
 
     def _is_volume_encrypted(self, volume):
         """Determine if volume is encrypted."""
index 279a1f1c9d2739f5744c4106b04a564396bd06fc..f5f9649a30f9c34e17c80f4a221946d1b91dfc7e 100644 (file)
@@ -30,12 +30,17 @@ class Controller(wsgi.Controller):
         super(Controller, self).__init__()
 
     def _get_metadata(self, context, volume_id):
+        # The metadata is at the second position of the tuple returned
+        # from _get_volume_and_metadata
+        return self._get_volume_and_metadata(context, volume_id)[1]
+
+    def _get_volume_and_metadata(self, context, volume_id):
         try:
             volume = self.volume_api.get(context, volume_id)
             meta = self.volume_api.get_volume_metadata(context, volume)
         except exception.VolumeNotFound as error:
             raise webob.exc.HTTPNotFound(explanation=error.msg)
-        return meta
+        return (volume, meta)
 
     @wsgi.serializers(xml=common.MetadataTemplate)
     def index(self, req, volume_id):
@@ -133,14 +138,13 @@ class Controller(wsgi.Controller):
         """Deletes an existing metadata."""
         context = req.environ['cinder.context']
 
-        metadata = self._get_metadata(context, volume_id)
+        volume, metadata = self._get_volume_and_metadata(context, volume_id)
 
         if id not in metadata:
             msg = _("Metadata item was not found")
             raise webob.exc.HTTPNotFound(explanation=msg)
 
         try:
-            volume = self.volume_api.get(context, volume_id)
             self.volume_api.delete_volume_metadata(
                 context,
                 volume,
index 5906fbd426306f570005874b22a0ae782f5dd055..b3d0877b9a1c50869ea118d2e73bcf52cbeebdbf 100644 (file)
@@ -1285,8 +1285,9 @@ def volume_detached(context, volume_id, attachment_id):
         if not remain_attachment:
             # Hide status update from user if we're performing volume migration
             # or uploading it to image
-            if (not volume_ref['migration_status'] and
-                    not (volume_ref['status'] == 'uploading')):
+            if ((not volume_ref['migration_status'] and
+                    not (volume_ref['status'] == 'uploading')) or
+                    volume_ref['migration_status'] in ('success', 'error')):
                 volume_ref['status'] = 'available'
 
             volume_ref['attach_status'] = 'detached'
index 352db2376aee8044894a910c751aecc267f936f2..542c04c0c1b3fed94d64c5626b0bfbf8d15da4e1 100644 (file)
@@ -146,7 +146,14 @@ class SchedulerManager(manager.Manager):
         self._wait_for_scheduler()
 
         def _migrate_volume_set_error(self, context, ex, request_spec):
-            volume_state = {'volume_state': {'migration_status': None}}
+            volume = db.volume_get(context, request_spec['volume_id'])
+            if volume.status == 'maintenance':
+                previous_status = (
+                    volume.previous_status or 'maintenance')
+                volume_state = {'volume_state': {'migration_status': 'error',
+                                                 'status': previous_status}}
+            else:
+                volume_state = {'volume_state': {'migration_status': 'error'}}
             self._set_volume_state_and_notify('migrate_volume_to_host',
                                               volume_state,
                                               context, ex, request_spec)
@@ -183,12 +190,9 @@ class SchedulerManager(manager.Manager):
                                      volume_ref, msg, reservations):
             if reservations:
                 QUOTAS.rollback(context, reservations)
-            if (volume_ref['volume_attachment'] is None or
-               len(volume_ref['volume_attachment']) == 0):
-                orig_status = 'available'
-            else:
-                orig_status = 'in-use'
-            volume_state = {'volume_state': {'status': orig_status}}
+            previous_status = (
+                volume_ref.previous_status or volume_ref.status)
+            volume_state = {'volume_state': {'status': previous_status}}
             self._set_volume_state_and_notify('retype', volume_state,
                                               context, ex, request_spec, msg)
 
index b8cc95aee920c6ad5266d27957e6678d35ab7b63..9c6a0d4836182b70643990a10760215160c01c57 100644 (file)
@@ -15,6 +15,7 @@
 
 import uuid
 
+import mock
 from oslo_config import cfg
 from oslo_serialization import jsonutils
 import webob
@@ -200,26 +201,42 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.show, req, self.req_id, 'key6')
 
-    def test_delete(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_get',
-                       return_volume_metadata)
-        self.stubs.Set(cinder.db, 'volume_metadata_delete',
-                       delete_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_delete')
+    @mock.patch.object(cinder.db, 'volume_metadata_get')
+    def test_delete(self, metadata_get, metadata_delete):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_volume_metadata
+        metadata_delete.side_effect = delete_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key2')
         req.method = 'DELETE'
-        res = self.controller.delete(req, self.req_id, 'key2')
-
-        self.assertEqual(200, res.status_int)
-
-    def test_delete_nonexistent_volume(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_get',
-                       return_volume_metadata)
-        self.stubs.Set(cinder.db, 'volume_metadata_delete',
-                       return_volume_nonexistent)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res = self.controller.delete(req, self.req_id, 'key2')
+            self.assertEqual(200, res.status_int)
+            get_volume.assert_called_with(fake_context, self.req_id)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_delete')
+    @mock.patch.object(cinder.db, 'volume_metadata_get')
+    def test_delete_nonexistent_volume(self, metadata_get, metadata_delete):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_volume_metadata
+        metadata_delete.side_effect = return_volume_nonexistent
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'DELETE'
-        self.assertRaises(webob.exc.HTTPNotFound,
-                          self.controller.delete, req, self.req_id, 'key1')
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPNotFound,
+                              self.controller.delete, req,
+                              self.req_id, 'key1')
+            get_volume.assert_called_with(fake_context, self.req_id)
 
     def test_delete_meta_not_found(self):
         self.stubs.Set(cinder.db, 'volume_metadata_get',
@@ -229,31 +246,40 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.delete, req, self.req_id, 'key6')
 
-    def test_create(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_get',
-                       return_empty_volume_metadata)
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
-
-        req = fakes.HTTPRequest.blank('/v1/volume_metadata')
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    @mock.patch.object(cinder.db, 'volume_metadata_get')
+    def test_create(self, metadata_get, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_empty_volume_metadata
+        metadata_update.side_effect = return_create_volume_metadata
+        req = fakes.HTTPRequest.blank('/v2/volume_metadata')
         req.method = 'POST'
         req.content_type = "application/json"
         body = {"metadata": {"key1": "value1",
                              "key2": "value2",
                              "key3": "value3", }}
         req.body = jsonutils.dumps(body)
-        res_dict = self.controller.create(req, self.req_id, body)
-        self.assertEqual(body, res_dict)
-
-    def test_create_with_keys_in_uppercase_and_lowercase(self):
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.create(req, self.req_id, body)
+            self.assertEqual(body, res_dict)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    @mock.patch.object(cinder.db, 'volume_metadata_get')
+    def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get,
+                                                         metadata_update):
         # if the keys in uppercase_and_lowercase, should return the one
         # which server added
-        self.stubs.Set(cinder.db, 'volume_metadata_get',
-                       return_empty_volume_metadata)
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata_insensitive)
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_empty_volume_metadata
+        metadata_update.side_effect = return_create_volume_metadata_insensitive
 
-        req = fakes.HTTPRequest.blank('/v1/volume_metadata')
+        req = fakes.HTTPRequest.blank('/v2/volume_metadata')
         req.method = 'POST'
         req.content_type = "application/json"
         body = {"metadata": {"key1": "value1",
@@ -267,8 +293,13 @@ class volumeMetaDataTest(test.TestCase):
                                  "key3": "value3",
                                  "KEY4": "value4"}}
         req.body = jsonutils.dumps(body)
-        res_dict = self.controller.create(req, self.req_id, body)
-        self.assertEqual(expected, res_dict)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.create(req, self.req_id, body)
+            self.assertEqual(expected, res_dict)
 
     def test_create_empty_body(self):
         self.stubs.Set(cinder.db, 'volume_metadata_update',
@@ -321,9 +352,11 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.create, req, self.req_id, body)
 
-    def test_update_all(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_new_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_all(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_new_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
@@ -335,15 +368,24 @@ class volumeMetaDataTest(test.TestCase):
             },
         }
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, expected)
-
-        self.assertEqual(expected, res_dict)
-
-    def test_update_all_with_keys_in_uppercase_and_lowercase(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_get',
-                       return_create_volume_metadata)
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_new_volume_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, expected)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    @mock.patch.object(cinder.db, 'volume_metadata_get')
+    def test_update_all_with_keys_in_uppercase_and_lowercase(self,
+                                                             metadata_get,
+                                                             metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_create_volume_metadata
+        metadata_update.side_effect = return_new_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
@@ -363,21 +405,54 @@ class volumeMetaDataTest(test.TestCase):
             },
         }
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, body)
-
-        self.assertEqual(expected, res_dict)
-
-    def test_update_all_empty_container(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_empty_container_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, body)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_all_empty_container(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_empty_container_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
         expected = {'metadata': {}}
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, expected)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, expected)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_item_value_too_long(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
+        req = fakes.HTTPRequest.blank(self.url + '/key1')
+        req.method = 'PUT'
+        body = {"meta": {"key1": ("a" * 260)}}
+        req.body = jsonutils.dumps(body)
+        req.headers["content-type"] = "application/json"
+        req.environ['cinder.context'] = fake_context
 
-        self.assertEqual(expected, res_dict)
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.update,
+                              req, self.req_id, "key1", body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_all_malformed_container(self):
         self.stubs.Set(cinder.db, 'volume_metadata_update',
@@ -392,18 +467,24 @@ class volumeMetaDataTest(test.TestCase):
                           self.controller.update_all, req, self.req_id,
                           expected)
 
-    def test_update_all_malformed_data(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_all_malformed_data(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
         expected = {'metadata': ['asdf']}
         req.body = jsonutils.dumps(expected)
+        req.environ['cinder.context'] = fake_context
 
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          self.controller.update_all, req, self.req_id,
-                          expected)
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPBadRequest,
+                              self.controller.update_all, req, self.req_id,
+                              expected)
 
     def test_update_all_nonexistent_volume(self):
         self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent)
@@ -416,17 +497,25 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.update_all, req, '100', body)
 
-    def test_update_item(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_item(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {"key1": "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
-        res_dict = self.controller.update(req, self.req_id, 'key1', body)
-        expected = {'meta': {'key1': 'value1'}}
-        self.assertEqual(expected, res_dict)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update(req, self.req_id, 'key1', body)
+            expected = {'meta': {'key1': 'value1'}}
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_item_nonexistent_volume(self):
         self.stubs.Set(cinder.db, 'volume_get',
@@ -452,43 +541,47 @@ class volumeMetaDataTest(test.TestCase):
                           self.controller.update, req, self.req_id, 'key1',
                           None)
 
-    def test_update_item_empty_key(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_item_empty_key(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {"": "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
-
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          self.controller.update, req, self.req_id, '', body)
-
-    def test_update_item_key_too_long(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPBadRequest,
+                              self.controller.update, req, self.req_id,
+                              '', body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_update_item_key_too_long(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {("a" * 260): "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
+        req.environ['cinder.context'] = fake_context
 
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.update,
-                          req, self.req_id, ("a" * 260), body)
-
-    def test_update_item_value_too_long(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
-        req = fakes.HTTPRequest.blank(self.url + '/key1')
-        req.method = 'PUT'
-        body = {"meta": {"key1": ("a" * 260)}}
-        req.body = jsonutils.dumps(body)
-        req.headers["content-type"] = "application/json"
-
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.update,
-                          req, self.req_id, "key1", body)
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.update,
+                              req, self.req_id, ("a" * 260), body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_item_too_many_keys(self):
         self.stubs.Set(cinder.db, 'volume_metadata_update',
@@ -516,9 +609,11 @@ class volumeMetaDataTest(test.TestCase):
                           self.controller.update, req, self.req_id, 'bad',
                           body)
 
-    def test_invalid_metadata_items_on_create(self):
-        self.stubs.Set(cinder.db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(cinder.db, 'volume_metadata_update')
+    def test_invalid_metadata_items_on_create(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
@@ -526,17 +621,32 @@ class volumeMetaDataTest(test.TestCase):
         # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.create, req, self.req_id, data)
 
         # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.create, req, self.req_id, data)
 
         # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPBadRequest,
+                              self.controller.create, req, self.req_id, data)
index 85868b44f3cf2b8c6e48d74f53b7e93408e3b800..de6fa15c6ad787d56bb27618554e41f661d9062a 100644 (file)
@@ -15,6 +15,7 @@
 
 import uuid
 
+import mock
 from oslo_config import cfg
 from oslo_serialization import jsonutils
 import webob
@@ -201,26 +202,61 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.show, req, self.req_id, 'key6')
 
-    def test_delete(self):
-        self.stubs.Set(db, 'volume_metadata_get',
-                       return_volume_metadata)
-        self.stubs.Set(db, 'volume_metadata_delete',
-                       delete_volume_metadata)
+    @mock.patch.object(db, 'volume_metadata_delete')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_delete(self, metadata_get, metadata_delete):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_volume_metadata
+        metadata_delete.side_effect = delete_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key2')
         req.method = 'DELETE'
-        res = self.controller.delete(req, self.req_id, 'key2')
-
-        self.assertEqual(200, res.status_int)
-
-    def test_delete_nonexistent_volume(self):
-        self.stubs.Set(db, 'volume_metadata_get',
-                       return_volume_metadata)
-        self.stubs.Set(db, 'volume_metadata_delete',
-                       return_volume_nonexistent)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res = self.controller.delete(req, self.req_id, 'key2')
+            self.assertEqual(200, res.status_int)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_delete')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_delete_volume_maintenance(self, metadata_get, metadata_delete):
+        fake_volume = {'id': self.req_id, 'status': 'maintenance'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_volume_metadata
+        metadata_delete.side_effect = delete_volume_metadata
+        req = fakes.HTTPRequest.blank(self.url + '/key2')
+        req.method = 'DELETE'
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(exception.InvalidVolume,
+                              self.controller.delete, req,
+                              self.req_id, 'key2')
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_delete')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_delete_nonexistent_volume(self, metadata_get, metadata_delete):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_volume_metadata
+        metadata_delete.side_effect = return_volume_nonexistent
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'DELETE'
-        self.assertRaises(webob.exc.HTTPNotFound,
-                          self.controller.delete, req, self.req_id, 'key1')
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPNotFound,
+                              self.controller.delete, req,
+                              self.req_id, 'key1')
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_delete_meta_not_found(self):
         self.stubs.Set(db, 'volume_metadata_get',
@@ -230,12 +266,13 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.delete, req, self.req_id, 'key6')
 
-    def test_create(self):
-        self.stubs.Set(db, 'volume_metadata_get',
-                       return_empty_volume_metadata)
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
-
+    @mock.patch.object(db, 'volume_metadata_update')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_create(self, metadata_get, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_empty_volume_metadata
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank('/v2/volume_metadata')
         req.method = 'POST'
         req.content_type = "application/json"
@@ -243,16 +280,47 @@ class volumeMetaDataTest(test.TestCase):
                              "key2": "value2",
                              "key3": "value3", }}
         req.body = jsonutils.dumps(body)
-        res_dict = self.controller.create(req, self.req_id, body)
-        self.assertEqual(body, res_dict)
-
-    def test_create_with_keys_in_uppercase_and_lowercase(self):
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.create(req, self.req_id, body)
+            self.assertEqual(body, res_dict)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_create_volume_maintenance(self, metadata_get, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'maintenance'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_empty_volume_metadata
+        metadata_update.side_effect = return_create_volume_metadata
+        req = fakes.HTTPRequest.blank('/v2/volume_metadata')
+        req.method = 'POST'
+        req.content_type = "application/json"
+        body = {"metadata": {"key1": "value1",
+                             "key2": "value2",
+                             "key3": "value3", }}
+        req.body = jsonutils.dumps(body)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(exception.InvalidVolume,
+                              self.controller.create,
+                              req, self.req_id, body)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get,
+                                                         metadata_update):
         # if the keys in uppercase_and_lowercase, should return the one
         # which server added
-        self.stubs.Set(db, 'volume_metadata_get',
-                       return_empty_volume_metadata)
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata_insensitive)
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_empty_volume_metadata
+        metadata_update.side_effect = return_create_volume_metadata_insensitive
 
         req = fakes.HTTPRequest.blank('/v2/volume_metadata')
         req.method = 'POST'
@@ -268,8 +336,13 @@ class volumeMetaDataTest(test.TestCase):
                                  "key3": "value3",
                                  "KEY4": "value4"}}
         req.body = jsonutils.dumps(body)
-        res_dict = self.controller.create(req, self.req_id, body)
-        self.assertEqual(expected, res_dict)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.create(req, self.req_id, body)
+            self.assertEqual(expected, res_dict)
 
     def test_create_empty_body(self):
         self.stubs.Set(db, 'volume_metadata_update',
@@ -322,9 +395,11 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.create, req, self.req_id, body)
 
-    def test_update_all(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_new_volume_metadata)
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_all(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_new_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
@@ -336,15 +411,51 @@ class volumeMetaDataTest(test.TestCase):
             },
         }
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, expected)
-
-        self.assertEqual(expected, res_dict)
-
-    def test_update_all_with_keys_in_uppercase_and_lowercase(self):
-        self.stubs.Set(db, 'volume_metadata_get',
-                       return_create_volume_metadata)
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_new_volume_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, expected)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_all_volume_maintenance(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'maintenance'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_new_volume_metadata
+        req = fakes.HTTPRequest.blank(self.url)
+        req.method = 'PUT'
+        req.content_type = "application/json"
+        expected = {
+            'metadata': {
+                'key10': 'value10',
+                'key99': 'value99',
+                'KEY20': 'value20',
+            },
+        }
+        req.body = jsonutils.dumps(expected)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(exception.InvalidVolume,
+                              self.controller.update_all, req,
+                              self.req_id, expected)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    @mock.patch.object(db, 'volume_metadata_get')
+    def test_update_all_with_keys_in_uppercase_and_lowercase(self,
+                                                             metadata_get,
+                                                             metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_get.side_effect = return_create_volume_metadata
+        metadata_update.side_effect = return_new_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
@@ -364,21 +475,33 @@ class volumeMetaDataTest(test.TestCase):
             },
         }
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, body)
-
-        self.assertEqual(expected, res_dict)
-
-    def test_update_all_empty_container(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_empty_container_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, body)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_all_empty_container(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_empty_container_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'PUT'
         req.content_type = "application/json"
         expected = {'metadata': {}}
         req.body = jsonutils.dumps(expected)
-        res_dict = self.controller.update_all(req, self.req_id, expected)
+        req.environ['cinder.context'] = fake_context
 
-        self.assertEqual(expected, res_dict)
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update_all(req, self.req_id, expected)
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_all_malformed_container(self):
         self.stubs.Set(db, 'volume_metadata_update',
@@ -417,17 +540,46 @@ class volumeMetaDataTest(test.TestCase):
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.update_all, req, '100', body)
 
-    def test_update_item(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_item(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {"key1": "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
-        res_dict = self.controller.update(req, self.req_id, 'key1', body)
-        expected = {'meta': {'key1': 'value1'}}
-        self.assertEqual(expected, res_dict)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            res_dict = self.controller.update(req, self.req_id, 'key1', body)
+            expected = {'meta': {'key1': 'value1'}}
+            self.assertEqual(expected, res_dict)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_item_volume_maintenance(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'maintenance'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
+        req = fakes.HTTPRequest.blank(self.url + '/key1')
+        req.method = 'PUT'
+        body = {"meta": {"key1": "value1"}}
+        req.body = jsonutils.dumps(body)
+        req.headers["content-type"] = "application/json"
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(exception.InvalidVolume,
+                              self.controller.update, req,
+                              self.req_id, 'key1', body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_item_nonexistent_volume(self):
         self.stubs.Set(db, 'volume_get',
@@ -453,43 +605,68 @@ class volumeMetaDataTest(test.TestCase):
                           self.controller.update, req, self.req_id, 'key1',
                           None)
 
-    def test_update_item_empty_key(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_item_empty_key(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {"": "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
-
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          self.controller.update, req, self.req_id, '', body)
-
-    def test_update_item_key_too_long(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPBadRequest,
+                              self.controller.update, req, self.req_id,
+                              '', body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_item_key_too_long(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {("a" * 260): "value1"}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
-
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.update,
-                          req, self.req_id, ("a" * 260), body)
-
-    def test_update_item_value_too_long(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.update,
+                              req, self.req_id, ("a" * 260), body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
+
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_update_item_value_too_long(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url + '/key1')
         req.method = 'PUT'
         body = {"meta": {"key1": ("a" * 260)}}
         req.body = jsonutils.dumps(body)
         req.headers["content-type"] = "application/json"
+        req.environ['cinder.context'] = fake_context
 
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.update,
-                          req, self.req_id, "key1", body)
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.update,
+                              req, self.req_id, "key1", body)
+            self.assertFalse(metadata_update.called)
+            get_volume.assert_called_once_with(fake_context, self.req_id)
 
     def test_update_item_too_many_keys(self):
         self.stubs.Set(db, 'volume_metadata_update',
@@ -517,9 +694,11 @@ class volumeMetaDataTest(test.TestCase):
                           self.controller.update, req, self.req_id, 'bad',
                           body)
 
-    def test_invalid_metadata_items_on_create(self):
-        self.stubs.Set(db, 'volume_metadata_update',
-                       return_create_volume_metadata)
+    @mock.patch.object(db, 'volume_metadata_update')
+    def test_invalid_metadata_items_on_create(self, metadata_update):
+        fake_volume = {'id': self.req_id, 'status': 'available'}
+        fake_context = mock.Mock()
+        metadata_update.side_effect = return_create_volume_metadata
         req = fakes.HTTPRequest.blank(self.url)
         req.method = 'POST'
         req.headers["content-type"] = "application/json"
@@ -527,17 +706,32 @@ class volumeMetaDataTest(test.TestCase):
         # test for long key
         data = {"metadata": {"a" * 260: "value1"}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.create, req, self.req_id, data)
 
         # test for long value
         data = {"metadata": {"key": "v" * 260}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+                              self.controller.create, req, self.req_id, data)
 
         # test for empty key.
         data = {"metadata": {"": "value1"}}
         req.body = jsonutils.dumps(data)
-        self.assertRaises(webob.exc.HTTPBadRequest,
-                          self.controller.create, req, self.req_id, data)
+        req.environ['cinder.context'] = fake_context
+
+        with mock.patch.object(self.controller.volume_api,
+                               'get') as get_volume:
+            get_volume.return_value = fake_volume
+            self.assertRaises(webob.exc.HTTPBadRequest,
+                              self.controller.create, req, self.req_id, data)
index 724a3c4d37eaf2fd960b020ca2d590e22be3855c..73697464c53b2b6f8ea0548f89357f4230bfd1f3 100644 (file)
@@ -161,33 +161,39 @@ class VolumeApiTest(test.TestCase):
             metadata=None,
             attachments=None,
             volume_type=stubs.DEFAULT_VOL_TYPE,
-            status=stubs.DEFAULT_VOL_STATUS):
+            status=stubs.DEFAULT_VOL_STATUS,
+            with_migration_status=False):
         metadata = metadata or {}
         attachments = attachments or []
-        return {'volume':
-                {'attachments': attachments,
-                 'availability_zone': availability_zone,
-                 'bootable': 'false',
-                 'consistencygroup_id': consistencygroup_id,
-                 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
-                 'description': description,
-                 'id': stubs.DEFAULT_VOL_ID,
-                 'links':
-                 [{'href': 'http://localhost/v2/fakeproject/volumes/1',
-                   'rel': 'self'},
-                  {'href': 'http://localhost/fakeproject/volumes/1',
-                   'rel': 'bookmark'}],
-                 'metadata': metadata,
-                 'name': name,
-                 'replication_status': 'disabled',
-                 'multiattach': False,
-                 'size': size,
-                 'snapshot_id': snapshot_id,
-                 'source_volid': source_volid,
-                 'status': status,
-                 'user_id': 'fakeuser',
-                 'volume_type': volume_type,
-                 'encrypted': False}}
+        volume = {'volume':
+                  {'attachments': attachments,
+                   'availability_zone': availability_zone,
+                   'bootable': 'false',
+                   'consistencygroup_id': consistencygroup_id,
+                   'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
+                   'description': description,
+                   'id': stubs.DEFAULT_VOL_ID,
+                   'links':
+                   [{'href': 'http://localhost/v2/fakeproject/volumes/1',
+                     'rel': 'self'},
+                    {'href': 'http://localhost/fakeproject/volumes/1',
+                     'rel': 'bookmark'}],
+                   'metadata': metadata,
+                   'name': name,
+                   'replication_status': 'disabled',
+                   'multiattach': False,
+                   'size': size,
+                   'snapshot_id': snapshot_id,
+                   'source_volid': source_volid,
+                   'status': status,
+                   'user_id': 'fakeuser',
+                   'volume_type': volume_type,
+                   'encrypted': False}}
+
+        if with_migration_status:
+            volume['volume']['migration_status'] = None
+
+        return volume
 
     def _expected_volume_api_create_kwargs(self, snapshot=None,
                                            availability_zone=DEFAULT_AZ,
@@ -652,7 +658,8 @@ class VolumeApiTest(test.TestCase):
                 'host_name': None,
                 'device': '/',
             }],
-            metadata={'key': 'value', 'readonly': 'True'})
+            metadata={'key': 'value', 'readonly': 'True'},
+            with_migration_status=True)
         self.assertEqual(expected, res_dict)
         self.assertEqual(2, len(self.notifier.notifications))
         self.assertTrue(mock_validate.called)
@@ -758,7 +765,8 @@ class VolumeApiTest(test.TestCase):
                           'host_name': None,
                           'id': '1',
                           'volume_id': stubs.DEFAULT_VOL_ID}],
-            metadata={'key': 'value', 'readonly': 'True'})
+            metadata={'key': 'value', 'readonly': 'True'},
+            with_migration_status=True)
         expected = {'volumes': [exp_vol['volume']]}
         self.assertEqual(expected, res_dict)
 
@@ -1174,7 +1182,8 @@ class VolumeApiTest(test.TestCase):
                 'server_id': stubs.FAKE_UUID,
                 'host_name': None,
                 'device': '/'}],
-            metadata={'key': 'value', 'readonly': 'True'})
+            metadata={'key': 'value', 'readonly': 'True'},
+            with_migration_status=True)
         self.assertEqual(expected, res_dict)
 
     def test_volume_show_with_encrypted_volume(self):
index 563f434a3128e56cd73f966023fec73b4ee8c9e0..8e1e8cca7a35d769af52b03db5837e22e9f10d64 100644 (file)
@@ -28,6 +28,7 @@ from cinder.scheduler import filter_scheduler
 from cinder.scheduler import manager
 from cinder import test
 from cinder.tests.unit import fake_consistencygroup
+from cinder.tests.unit import utils as tests_utils
 
 CONF = cfg.CONF
 
@@ -47,7 +48,7 @@ class SchedulerManagerTestCase(test.TestCase):
         self.flags(scheduler_driver=self.driver_cls_name)
         self.manager = self.manager_cls()
         self.manager._startup_delay = False
-        self.context = context.RequestContext('fake_user', 'fake_project')
+        self.context = context.get_admin_context()
         self.topic = 'fake_topic'
         self.fake_args = (1, 2, 3)
         self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
@@ -171,16 +172,42 @@ class SchedulerManagerTestCase(test.TestCase):
                                                    {})
         self.assertFalse(_mock_sleep.called)
 
+    @mock.patch('cinder.db.volume_get')
     @mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
     @mock.patch('cinder.db.volume_update')
     def test_migrate_volume_exception_returns_volume_state(
-            self, _mock_volume_update, _mock_host_passes):
+            self, _mock_volume_update, _mock_host_passes,
+            _mock_volume_get):
         # Test NoValidHost exception behavior for migrate_volume_to_host.
         # Puts the volume in 'error_migrating' state and eats the exception.
-        _mock_host_passes.side_effect = exception.NoValidHost(reason="")
-        fake_volume_id = 1
+        fake_updates = {'migration_status': 'error'}
+        self._test_migrate_volume_exception_returns_volume_state(
+            _mock_volume_update, _mock_host_passes, _mock_volume_get,
+            'available', fake_updates)
+
+    @mock.patch('cinder.db.volume_get')
+    @mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
+    @mock.patch('cinder.db.volume_update')
+    def test_migrate_volume_exception_returns_volume_state_maintenance(
+            self, _mock_volume_update, _mock_host_passes,
+            _mock_volume_get):
+        fake_updates = {'status': 'available',
+                        'migration_status': 'error'}
+        self._test_migrate_volume_exception_returns_volume_state(
+            _mock_volume_update, _mock_host_passes, _mock_volume_get,
+            'maintenance', fake_updates)
+
+    def _test_migrate_volume_exception_returns_volume_state(
+            self, _mock_volume_update, _mock_host_passes,
+            _mock_volume_get, status, fake_updates):
+        volume = tests_utils.create_volume(self.context,
+                                           status=status,
+                                           previous_status='available')
+        fake_volume_id = volume.id
         topic = 'fake_topic'
         request_spec = {'volume_id': fake_volume_id}
+        _mock_host_passes.side_effect = exception.NoValidHost(reason="")
+        _mock_volume_get.return_value = volume
 
         self.manager.migrate_volume_to_host(self.context, topic,
                                             fake_volume_id, 'host', True,
@@ -188,7 +215,7 @@ class SchedulerManagerTestCase(test.TestCase):
                                             filter_properties={})
         _mock_volume_update.assert_called_once_with(self.context,
                                                     fake_volume_id,
-                                                    {'migration_status': None})
+                                                    fake_updates)
         _mock_host_passes.assert_called_once_with(self.context, 'host',
                                                   request_spec, {})
 
@@ -198,24 +225,24 @@ class SchedulerManagerTestCase(test.TestCase):
                                                           _mock_vol_update):
         # Test NoValidHost exception behavior for retype.
         # Puts the volume in original state and eats the exception.
-        fake_volume_id = 1
+        volume = tests_utils.create_volume(self.context,
+                                           status='retyping',
+                                           previous_status='in-use')
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
+        volume = tests_utils.attach_volume(self.context, volume['id'],
+                                           instance_uuid, None, '/dev/fake')
+        fake_volume_id = volume.id
         topic = 'fake_topic'
-        volume_id = fake_volume_id
         request_spec = {'volume_id': fake_volume_id, 'volume_type': {'id': 3},
                         'migration_policy': 'on-demand'}
-        vol_info = {'id': fake_volume_id, 'status': 'in-use',
-                    'volume_attachment': [{'id': 'fake_id',
-                                           'instance_uuid': 'foo',
-                                           'attached_host': None}]}
-
-        _mock_vol_get.return_value = vol_info
+        _mock_vol_get.return_value = volume
         _mock_vol_update.return_value = {'status': 'in-use'}
         _mock_find_retype_host = mock.Mock(
             side_effect=exception.NoValidHost(reason=""))
         orig_retype = self.manager.driver.find_retype_host
         self.manager.driver.find_retype_host = _mock_find_retype_host
 
-        self.manager.retype(self.context, topic, volume_id,
+        self.manager.retype(self.context, topic, fake_volume_id,
                             request_spec=request_spec,
                             filter_properties={})
 
index de71c9a964c5de5310810e81275ac9d4f50833a1..4940b4f3c886314183410a1b327be85523b7d3d0 100644 (file)
@@ -233,6 +233,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         self._clear_patch = mock.patch('cinder.volume.utils.clear_volume',
                                        autospec=True)
         self._clear_patch.start()
+        self.expected_status = 'available'
 
     def tearDown(self):
         super(VolumeTestCase, self).tearDown()
@@ -639,6 +640,22 @@ class VolumeTestCase(BaseVolumeTestCase):
                           False,
                           FAKE_METADATA_TYPE.fake_type)
 
+    def test_update_volume_metadata_maintenance(self):
+        """Test update volume metadata with different metadata type."""
+        test_meta1 = {'fake_key1': 'fake_value1'}
+        FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.update_volume_metadata,
+                          self.context,
+                          volume,
+                          test_meta1,
+                          False,
+                          FAKE_METADATA_TYPE.fake_type)
+
     def test_delete_volume_metadata_with_metatype(self):
         """Test delete volume metadata with different metadata type."""
         test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
@@ -693,6 +710,85 @@ class VolumeTestCase(BaseVolumeTestCase):
                           'fake_key1',
                           FAKE_METADATA_TYPE.fake_type)
 
+    def test_delete_volume_metadata_maintenance(self):
+        """Test delete volume metadata in maintenance."""
+        FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.delete_volume_metadata,
+                          self.context,
+                          volume,
+                          'fake_key1',
+                          FAKE_METADATA_TYPE.fake_type)
+
+    def test_volume_attach_in_maintenance(self):
+        """Test attach the volume in maintenance."""
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.attach,
+                          self.context,
+                          volume, None, None, None, None)
+
+    def test_volume_detach_in_maintenance(self):
+        """Test detach the volume in maintenance."""
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.detach,
+                          self.context,
+                          volume, None)
+
+    def test_initialize_connection_maintenance(self):
+        """Test initialize connection in maintenance."""
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.initialize_connection,
+                          self.context,
+                          volume,
+                          None)
+
+    def test_accept_transfer_maintenance(self):
+        """Test accept transfer in maintenance."""
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.accept_transfer,
+                          self.context,
+                          volume,
+                          None, None)
+
+    def test_copy_volume_to_image_maintenance(self):
+        """Test copy volume to image in maintenance."""
+        test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
+        volume = tests_utils.create_volume(self.context, metadata=test_meta1,
+                                           **self.volume_params)
+        volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.copy_volume_to_image,
+                          self.context,
+                          volume,
+                          test_meta1,
+                          force=True)
+
     @mock.patch.object(cinder.volume.api.API, 'list_availability_zones')
     def test_create_volume_uses_default_availability_zone(self, mock_list_az):
         """Test setting availability_zone correctly during volume create."""
@@ -2815,10 +2911,16 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertTrue(volume_get.called)
         self.assertTrue(volume_update.called)
 
-    def test_reserve_volume_bad_status(self):
+    def test_reserve_volume_in_attaching(self):
+        self._test_reserve_volume_bad_status('attaching')
+
+    def test_reserve_volume_in_maintenance(self):
+        self._test_reserve_volume_bad_status('maintenance')
+
+    def _test_reserve_volume_bad_status(self, status):
         fake_volume = {
             'id': self.FAKE_UUID,
-            'status': 'attaching'
+            'status': status
         }
 
         with mock.patch.object(db, 'volume_get') as mock_volume_get:
@@ -3017,6 +3119,21 @@ class VolumeTestCase(BaseVolumeTestCase):
                           'fake_name',
                           'fake_description')
 
+    def test_create_snapshot_failed_maintenance(self):
+        """Test exception handling when create snapshot in maintenance."""
+        test_volume = tests_utils.create_volume(
+            self.context,
+            **self.volume_params)
+        self.volume.create_volume(self.context, test_volume['id'])
+        test_volume['status'] = 'maintenance'
+        volume_api = cinder.volume.api.API()
+        self.assertRaises(exception.InvalidVolume,
+                          volume_api.create_snapshot,
+                          self.context,
+                          test_volume,
+                          'fake_name',
+                          'fake_description')
+
     @mock.patch.object(QUOTAS, 'commit',
                        side_effect=exception.QuotaError(
                            'Snapshot quota commit failed!'))
@@ -3037,11 +3154,19 @@ class VolumeTestCase(BaseVolumeTestCase):
                           'fake_description')
 
     def test_cannot_delete_volume_in_use(self):
+        """Test volume can't be deleted in in-use status."""
+        self._test_cannot_delete_volume('in-use')
+
+    def test_cannot_delete_volume_maintenance(self):
+        """Test volume can't be deleted in maintenance status."""
+        self._test_cannot_delete_volume('maintenance')
+
+    def _test_cannot_delete_volume(self, status):
         """Test volume can't be deleted in invalid stats."""
         # create a volume and assign to host
         volume = tests_utils.create_volume(self.context, **self.volume_params)
         self.volume.create_volume(self.context, volume['id'])
-        volume['status'] = 'in-use'
+        volume['status'] = status
         volume['host'] = 'fakehost'
 
         volume_api = cinder.volume.api.API()
@@ -3673,6 +3798,12 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume_api.begin_detaching(self.context, volume)
         volume_get.assert_called_once_with(self.context, volume['id'])
 
+        volume_get.reset_mock()
+        volume['status'] = "maintenance"
+        self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
+                          self.context, volume)
+        volume_get.assert_called_once_with(self.context, volume['id'])
+
     def test_begin_roll_detaching_volume(self):
         """Test begin_detaching and roll_detaching functions."""
 
@@ -3702,6 +3833,16 @@ class VolumeTestCase(BaseVolumeTestCase):
         vol = db.volume_get(context.get_admin_context(), volume['id'])
         self.assertEqual('test update name', vol['display_name'])
 
+    def test_volume_api_update_maintenance(self):
+        # create a raw vol
+        volume = tests_utils.create_volume(self.context, **self.volume_params)
+        volume['status'] = 'maintenance'
+        # use volume.api to update name
+        volume_api = cinder.volume.api.API()
+        update_dict = {'display_name': 'test update name'}
+        self.assertRaises(exception.InvalidVolume, volume_api.update,
+                          self.context, volume, update_dict)
+
     def test_volume_api_update_snapshot(self):
         # create raw snapshot
         volume = tests_utils.create_volume(self.context, **self.volume_params)
@@ -4073,14 +4214,14 @@ class VolumeTestCase(BaseVolumeTestCase):
         # check volume properties
         volume = db.volume_get(context.get_admin_context(), volume['id'])
         self.assertEqual('newhost', volume['host'])
-        self.assertIsNone(volume['migration_status'])
+        self.assertEqual('success', volume['migration_status'])
 
-    def test_migrate_volume_error(self):
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'available'})
+    def _fake_create_volume(self, ctxt, volume, host, req_spec, filters,
+                            allow_reschedule=True):
+        return db.volume_update(ctxt, volume['id'],
+                                {'status': self.expected_status})
 
+    def test_migrate_volume_error(self):
         with mock.patch.object(self.volume.driver, 'migrate_volume') as \
                 mock_migrate,\
                 mock.patch.object(self.volume.driver, 'create_export') as \
@@ -4099,7 +4240,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               False)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
 
     @mock.patch.object(nova.API, 'update_server_volume')
@@ -4168,35 +4309,25 @@ class VolumeTestCase(BaseVolumeTestCase):
         fake_volume = tests_utils.create_volume(self.context, size=1,
                                                 host=CONF.host)
 
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'available'})
-
         host_obj = {'host': 'newhost', 'capabilities': {}}
         with mock.patch.object(self.volume.driver, 'migrate_volume') as \
                 mock_migrate_volume,\
                 mock.patch.object(self.volume.driver, 'copy_volume_data'), \
                 mock.patch.object(self.volume.driver, 'delete_volume') as \
                 delete_volume:
-            create_volume.side_effect = fake_create_volume
+            create_volume.side_effect = self._fake_create_volume
             self.volume.migrate_volume(self.context, fake_volume['id'],
                                        host_obj, True)
             volume = db.volume_get(context.get_admin_context(),
                                    fake_volume['id'])
             self.assertEqual('newhost', volume['host'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('success', volume['migration_status'])
             self.assertFalse(mock_migrate_volume.called)
             self.assertFalse(delete_volume.called)
             self.assertTrue(rpc_delete_volume.called)
             self.assertTrue(update_migrated_volume.called)
 
     def test_migrate_volume_generic_copy_error(self):
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'available'})
-
         with mock.patch.object(self.volume.driver, 'migrate_volume'),\
                 mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
                 as mock_create_volume,\
@@ -4208,7 +4339,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
             # Exception case at migrate_volume_generic
             # source_volume['migration_status'] is 'migrating'
-            mock_create_volume.side_effect = fake_create_volume
+            mock_create_volume.side_effect = self._fake_create_volume
             mock_copy_volume.side_effect = processutils.ProcessExecutionError
             volume = tests_utils.create_volume(self.context, size=0,
                                                host=CONF.host)
@@ -4220,7 +4351,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               True)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
 
     def test_clean_temporary_volume(self):
@@ -4268,11 +4399,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertIsNone(volume['migration_status'])
 
     def test_migrate_volume_generic_create_volume_error(self):
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'error'})
-
+        self.expected_status = 'error'
         with mock.patch.object(self.volume.driver, 'migrate_volume'), \
                 mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
                 mock_create_volume, \
@@ -4280,7 +4407,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                 clean_temporary_volume:
 
             # Exception case at the creation of the new temporary volume
-            mock_create_volume.side_effect = fake_create_volume
+            mock_create_volume.side_effect = self._fake_create_volume
             volume = tests_utils.create_volume(self.context, size=0,
                                                host=CONF.host)
             host_obj = {'host': 'newhost', 'capabilities': {}}
@@ -4291,18 +4418,14 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               True)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
             self.assertTrue(clean_temporary_volume.called)
+        self.expected_status = 'available'
 
     def test_migrate_volume_generic_timeout_error(self):
         CONF.set_override("migration_create_volume_timeout_secs", 2)
 
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'creating'})
-
         with mock.patch.object(self.volume.driver, 'migrate_volume'), \
                 mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
                 mock_create_volume, \
@@ -4311,7 +4434,8 @@ class VolumeTestCase(BaseVolumeTestCase):
                 mock.patch.object(time, 'sleep'):
 
             # Exception case at the timeout of the volume creation
-            mock_create_volume.side_effect = fake_create_volume
+            self.expected_status = 'creating'
+            mock_create_volume.side_effect = self._fake_create_volume
             volume = tests_utils.create_volume(self.context, size=0,
                                                host=CONF.host)
             host_obj = {'host': 'newhost', 'capabilities': {}}
@@ -4322,16 +4446,12 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               True)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
             self.assertTrue(clean_temporary_volume.called)
+        self.expected_status = 'available'
 
     def test_migrate_volume_generic_create_export_error(self):
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'available'})
-
         with mock.patch.object(self.volume.driver, 'migrate_volume'),\
                 mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
                 as mock_create_volume,\
@@ -4343,7 +4463,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                 mock_create_export:
 
             # Exception case at create_export
-            mock_create_volume.side_effect = fake_create_volume
+            mock_create_volume.side_effect = self._fake_create_volume
             mock_copy_volume.side_effect = processutils.ProcessExecutionError
             mock_create_export.side_effect = processutils.ProcessExecutionError
             volume = tests_utils.create_volume(self.context, size=0,
@@ -4356,15 +4476,10 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               True)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
 
     def test_migrate_volume_generic_migrate_volume_completion_error(self):
-        def fake_create_volume(ctxt, volume, host, req_spec, filters,
-                               allow_reschedule=True):
-            db.volume_update(ctxt, volume['id'],
-                             {'status': 'available'})
-
         def fake_migrate_volume_completion(ctxt, volume_id, new_volume_id,
                                            error=False):
             db.volume_update(ctxt, volume['id'],
@@ -4382,7 +4497,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
             # Exception case at delete_volume
             # source_volume['migration_status'] is 'completing'
-            mock_create_volume.side_effect = fake_create_volume
+            mock_create_volume.side_effect = self._fake_create_volume
             mock_migrate_compl.side_effect = fake_migrate_volume_completion
             volume = tests_utils.create_volume(self.context, size=0,
                                                host=CONF.host)
@@ -4394,12 +4509,13 @@ class VolumeTestCase(BaseVolumeTestCase):
                               host_obj,
                               True)
             volume = db.volume_get(context.get_admin_context(), volume['id'])
-            self.assertIsNone(volume['migration_status'])
+            self.assertEqual('error', volume['migration_status'])
             self.assertEqual('available', volume['status'])
 
     def _test_migrate_volume_completion(self, status='available',
                                         instance_uuid=None, attached_host=None,
-                                        retyping=False):
+                                        retyping=False,
+                                        previous_status='available'):
         def fake_attach_volume(ctxt, volume, instance_uuid, host_name,
                                mountpoint, mode):
             tests_utils.attach_volume(ctxt, volume['id'],
@@ -4410,7 +4526,8 @@ class VolumeTestCase(BaseVolumeTestCase):
         old_volume = tests_utils.create_volume(self.context, size=0,
                                                host=CONF.host,
                                                status=initial_status,
-                                               migration_status='migrating')
+                                               migration_status='migrating',
+                                               previous_status=previous_status)
         attachment_id = None
         if status == 'in-use':
             vol = tests_utils.attach_volume(self.context, old_volume['id'],
@@ -4460,7 +4577,8 @@ class VolumeTestCase(BaseVolumeTestCase):
             'in-use',
             '83c969d5-065e-4c9c-907d-5394bc2e98e2',
             'some-host',
-            retyping=True)
+            retyping=True,
+            previous_status='in-use')
 
     def test_migrate_volume_completion_migrate_available(self):
         self._test_migrate_volume_completion()
@@ -4469,7 +4587,9 @@ class VolumeTestCase(BaseVolumeTestCase):
         self._test_migrate_volume_completion(
             'in-use',
             '83c969d5-065e-4c9c-907d-5394bc2e98e2',
-            'some-host')
+            'some-host',
+            retyping=False,
+            previous_status='in-use')
 
     def test_retype_setup_fail_volume_is_available(self):
         """Verify volume is still available if retype prepare failed."""
@@ -4513,6 +4633,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                                            host=CONF.host, status='retyping',
                                            volume_type_id=old_vol_type['id'],
                                            replication_status=rep_status)
+        volume['previous_status'] = 'available'
         if snap:
             self._create_snapshot(volume['id'], size=volume['size'])
         if driver or diff_equal:
@@ -4528,27 +4649,30 @@ class VolumeTestCase(BaseVolumeTestCase):
                                       project_id=project_id,
                                       **reserve_opts)
 
-        with mock.patch.object(self.volume.driver, 'retype') as _retype:
-            with mock.patch.object(volume_types, 'volume_types_diff') as _diff:
-                with mock.patch.object(self.volume, 'migrate_volume') as _mig:
-                    _retype.return_value = driver
-                    _diff.return_value = ({}, diff_equal)
-                    if migrate_exc:
-                        _mig.side_effect = KeyError
-                    else:
-                        _mig.return_value = True
-
-                    if not exc:
-                        self.volume.retype(self.context, volume['id'],
-                                           vol_type['id'], host_obj,
-                                           migration_policy=policy,
-                                           reservations=reservations)
-                    else:
-                        self.assertRaises(exc, self.volume.retype,
-                                          self.context, volume['id'],
-                                          vol_type['id'], host_obj,
-                                          migration_policy=policy,
-                                          reservations=reservations)
+        with mock.patch.object(self.volume.driver, 'retype') as _retype,\
+                mock.patch.object(volume_types, 'volume_types_diff') as _diff,\
+                mock.patch.object(self.volume, 'migrate_volume') as _mig,\
+                mock.patch.object(db, 'volume_get') as get_volume:
+            get_volume.return_value = volume
+            _retype.return_value = driver
+            _diff.return_value = ({}, diff_equal)
+            if migrate_exc:
+                _mig.side_effect = KeyError
+            else:
+                _mig.return_value = True
+
+            if not exc:
+                self.volume.retype(self.context, volume['id'],
+                                   vol_type['id'], host_obj,
+                                   migration_policy=policy,
+                                   reservations=reservations)
+            else:
+                self.assertRaises(exc, self.volume.retype,
+                                  self.context, volume['id'],
+                                  vol_type['id'], host_obj,
+                                  migration_policy=policy,
+                                  reservations=reservations)
+            get_volume.assert_called_once_with(self.context, volume['id'])
 
         # get volume/quota properties
         volume = db.volume_get(elevated, volume['id'])
index bde22e8357d9416d809d730b45f52e8956b89ff2..a105963fff6246a3e5ef5d4b806c9b604df86568 100644 (file)
@@ -49,6 +49,7 @@ from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import encodeutils
 from oslo_utils import importutils
+from oslo_utils import strutils
 from oslo_utils import timeutils
 import retrying
 import six
@@ -662,6 +663,16 @@ def _get_disk_of_partition(devpath, st=None):
     return (devpath, st)
 
 
+def get_bool_param(param_string, params):
+    param = params.get(param_string, False)
+    if not is_valid_boolstr(param):
+        msg = _('Value %(param)s for %(param_string)s is not a '
+                'boolean.') % {'param': param, 'param_string': param_string}
+        raise exception.InvalidParameterValue(err=msg)
+
+    return strutils.bool_from_string(param, strict=True)
+
+
 def get_blkdev_major_minor(path, lookup_for_file=True):
     """Get 'major:minor' number of block device.
 
index d247a991d54b54247d1549e4b12fc368ac0b6d5f..8c8382b725f4167ff681fc6a68b59ed9a90d9d0c 100644 (file)
@@ -185,6 +185,17 @@ class API(base.Base):
                 safe = True
         return safe
 
+    def _is_volume_migrating(self, volume):
+        # The migration status 'none' means no migration has ever been done
+        # before. The migration status 'error' means the previous migration
+        # failed. The migration status 'success' means the previous migration
+        # succeeded. The migration status 'deleting' means the source volume
+        # fails to delete after a migration.
+        # All of the statuses above means the volume is not in the process
+        # of a migration.
+        return volume['migration_status'] not in (None, 'deleting',
+                                                  'error', 'success')
+
     def create(self, context, size, name, description, snapshot=None,
                image_id=None, volume_type=None, metadata=None,
                availability_zone=None, source_volume=None,
@@ -355,7 +366,7 @@ class API(base.Base):
                       'vol_status': volume['status']})
             raise exception.InvalidVolume(reason=msg)
 
-        if volume['migration_status'] not in (None, 'deleting'):
+        if self._is_volume_migrating(volume):
             # Volume is migrating, wait until done
             LOG.info(_LI('Unable to delete volume: %s, '
                          'volume is currently migrating.'), volume['id'])
@@ -397,6 +408,12 @@ class API(base.Base):
 
     @wrap_check_policy
     def update(self, context, volume, fields):
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI("Unable to update volume, "
+                         "because it is in maintenance."), resource=volume)
+            msg = _("The volume cannot be updated during maintenance.")
+            raise exception.InvalidVolume(reason=msg)
+
         vref = self.db.volume_update(context, volume['id'], fields)
         LOG.info(_LI("Volume updated successfully."), resource=vref)
 
@@ -572,7 +589,7 @@ class API(base.Base):
         # If we are in the middle of a volume migration, we don't want the user
         # to see that the volume is 'detaching'. Having 'migration_status' set
         # will have the same effect internally.
-        if volume['migration_status']:
+        if self._is_volume_migrating(volume):
             return
 
         if (volume['status'] != 'in-use' or
@@ -599,6 +616,11 @@ class API(base.Base):
     @wrap_check_policy
     def attach(self, context, volume, instance_uuid, host_name,
                mountpoint, mode):
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to attach volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The volume cannot be attached in maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         volume_metadata = self.get_volume_admin_metadata(context.elevated(),
                                                          volume)
         if 'readonly' not in volume_metadata:
@@ -623,6 +645,11 @@ class API(base.Base):
 
     @wrap_check_policy
     def detach(self, context, volume, attachment_id):
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to detach volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The volume cannot be detached in maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         detach_results = self.volume_rpcapi.detach_volume(context, volume,
                                                           attachment_id)
         LOG.info(_LI("Detach volume completed successfully."),
@@ -631,6 +658,13 @@ class API(base.Base):
 
     @wrap_check_policy
     def initialize_connection(self, context, volume, connector):
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to initialize the connection for '
+                         'volume, because it is in '
+                         'maintenance.'), resource=volume)
+            msg = _("The volume connection cannot be initialized in "
+                    "maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         init_results = self.volume_rpcapi.initialize_connection(context,
                                                                 volume,
                                                                 connector)
@@ -651,6 +685,11 @@ class API(base.Base):
 
     @wrap_check_policy
     def accept_transfer(self, context, volume, new_user, new_project):
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to accept transfer for volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The volume cannot accept transfer in maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         results = self.volume_rpcapi.accept_transfer(context,
                                                      volume,
                                                      new_user,
@@ -676,7 +715,13 @@ class API(base.Base):
                               cgsnapshot_id):
         check_policy(context, 'create_snapshot', volume)
 
-        if volume['migration_status'] is not None:
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to create the snapshot for volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The snapshot cannot be created when the volume is in "
+                    "maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
+        if self._is_volume_migrating(volume):
             # Volume is migrating, wait until done
             msg = _("Snapshot cannot be created while volume is migrating.")
             raise exception.InvalidVolume(reason=msg)
@@ -802,7 +847,13 @@ class API(base.Base):
     def _create_snapshot_in_db_validate(self, context, volume, force):
         check_policy(context, 'create_snapshot', volume)
 
-        if volume['migration_status'] is not None:
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to create the snapshot for volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The snapshot cannot be created when the volume is in "
+                    "maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
+        if self._is_volume_migrating(volume):
             # Volume is migrating, wait until done
             msg = _("Snapshot cannot be created while volume is migrating.")
             raise exception.InvalidVolume(reason=msg)
@@ -927,6 +978,12 @@ class API(base.Base):
     def delete_volume_metadata(self, context, volume,
                                key, meta_type=common.METADATA_TYPES.user):
         """Delete the given metadata item from a volume."""
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to delete the volume metadata, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The volume metadata cannot be deleted when the volume "
+                    "is in maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         self.db.volume_metadata_delete(context, volume['id'], key, meta_type)
         LOG.info(_LI("Delete volume metadata completed successfully."),
                  resource=volume)
@@ -959,6 +1016,12 @@ class API(base.Base):
         `metadata` argument will be deleted.
 
         """
+        if volume['status'] == 'maintenance':
+            LOG.info(_LI('Unable to update the metadata for volume, '
+                         'because it is in maintenance.'), resource=volume)
+            msg = _("The volume metadata cannot be updated when the volume "
+                    "is in maintenance mode.")
+            raise exception.InvalidVolume(reason=msg)
         if delete:
             _metadata = metadata
         else:
@@ -1216,10 +1279,10 @@ class API(base.Base):
                  resource=volume)
 
     @wrap_check_policy
-    def migrate_volume(self, context, volume, host, force_host_copy):
+    def migrate_volume(self, context, volume, host, force_host_copy,
+                       lock_volume):
         """Migrate the volume to the specified host."""
 
-        # We only handle "available" volumes for now
         if volume['status'] not in ['available', 'in-use']:
             msg = _('Volume %(vol_id)s status must be available or in-use, '
                     'but current status is: '
@@ -1228,8 +1291,8 @@ class API(base.Base):
             LOG.error(msg)
             raise exception.InvalidVolume(reason=msg)
 
-        # Make sure volume is not part of a migration
-        if volume['migration_status'] is not None:
+        # Make sure volume is not part of a migration.
+        if self._is_volume_migrating(volume):
             msg = _("Volume %s is already part of an active "
                     "migration.") % volume['id']
             LOG.error(msg)
@@ -1279,7 +1342,17 @@ class API(base.Base):
             LOG.error(msg)
             raise exception.InvalidHost(reason=msg)
 
-        self.update(context, volume, {'migration_status': 'starting'})
+        # When the migration of an available volume starts, both the status
+        # and the migration status of the volume will be changed.
+        # If the admin sets lock_volume flag to True, the volume
+        # status is changed to 'maintenance', telling users
+        # that this volume is in maintenance mode, and no action is allowed
+        # on this volume, e.g. attach, detach, retype, migrate, etc.
+        updates = {'migration_status': 'starting',
+                   'previous_status': volume['status']}
+        if lock_volume and volume['status'] == 'available':
+            updates['status'] = 'maintenance'
+        self.update(context, volume, updates)
 
         # Call the scheduler to ensure that the host exists and that it can
         # accept the volume
@@ -1352,7 +1425,7 @@ class API(base.Base):
             LOG.error(msg)
             raise exception.InvalidVolume(reason=msg)
 
-        if volume['migration_status'] is not None:
+        if self._is_volume_migrating(volume):
             msg = (_("Volume %s is already part of an active migration.")
                    % volume['id'])
             LOG.error(msg)
@@ -1428,7 +1501,8 @@ class API(base.Base):
         reservations = quota_utils.get_volume_type_reservation(context, volume,
                                                                vol_type_id)
 
-        self.update(context, volume, {'status': 'retyping'})
+        self.update(context, volume, {'status': 'retyping',
+                                      'previous_status': volume['status']})
 
         request_spec = {'volume_properties': volume,
                         'volume_id': volume['id'],
index 3f571c5fcc629460616b559be5ea1747405ddda0..f2849ec60520a5bbad1d22e0a686f2146be297dd 100644 (file)
@@ -566,7 +566,11 @@ class VolumeManager(manager.SchedulerDependentManager):
             raise exception.InvalidVolume(
                 reason=_("volume is not local to this node"))
 
-        is_migrating = volume_ref['migration_status'] is not None
+        # The status 'deleting' is not included, because it only applies to
+        # the source volume to be deleted after a migration. No quota
+        # needs to be handled for it.
+        is_migrating = volume_ref['migration_status'] not in (None, 'error',
+                                                              'success')
         is_migrating_dest = (is_migrating and
                              volume_ref['migration_status'].startswith(
                                  'target:'))
@@ -873,9 +877,6 @@ class VolumeManager(manager.SchedulerDependentManager):
                                              host_name_sanitized,
                                              mountpoint,
                                              mode)
-            if volume['migration_status']:
-                self.db.volume_update(context, volume_id,
-                                      {'migration_status': None})
             self._notify_about_volume_usage(context, volume, "attach.end")
             LOG.info(_LI("Attach volume completed successfully."),
                      resource=volume)
@@ -1439,13 +1440,6 @@ class VolumeManager(manager.SchedulerDependentManager):
                 self._clean_temporary_volume(ctxt, volume['id'],
                                              new_volume['id'])
 
-    def _get_original_status(self, volume):
-        attachments = volume['volume_attachment']
-        if not attachments:
-            return 'available'
-        else:
-            return 'in-use'
-
     def _clean_temporary_volume(self, ctxt, volume_id, new_volume_id,
                                 clean_db_only=False):
         volume = self.db.volume_get(ctxt, volume_id)
@@ -1506,14 +1500,15 @@ class VolumeManager(manager.SchedulerDependentManager):
         new_volume = self.db.volume_get(ctxt, new_volume_id)
         rpcapi = volume_rpcapi.VolumeAPI()
 
-        orig_volume_status = self._get_original_status(volume)
+        orig_volume_status = volume['previous_status']
 
         if error:
             LOG.info(_LI("migrate_volume_completion is cleaning up an error "
                          "for volume %(vol1)s (temporary volume %(vol2)s"),
                      {'vol1': volume['id'], 'vol2': new_volume['id']})
             rpcapi.delete_volume(ctxt, new_volume)
-            updates = {'migration_status': None, 'status': orig_volume_status}
+            updates = {'migration_status': 'error',
+                       'status': orig_volume_status}
             self.db.volume_update(ctxt, volume_id, updates)
             return volume_id
 
@@ -1541,12 +1536,9 @@ class VolumeManager(manager.SchedulerDependentManager):
         # asynchronously delete the destination id
         __, updated_new = self.db.finish_volume_migration(
             ctxt, volume_id, new_volume_id)
-        if orig_volume_status == 'in-use':
-            updates = {'migration_status': 'completing',
-                       'status': orig_volume_status}
-        else:
-            updates = {'migration_status': None}
-        self.db.volume_update(ctxt, volume_id, updates)
+        updates = {'status': orig_volume_status,
+                   'previous_status': volume['status'],
+                   'migration_status': 'success'}
 
         if orig_volume_status == 'in-use':
             attachments = volume['volume_attachment']
@@ -1556,6 +1548,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                      attachment['attached_host'],
                                      attachment['mountpoint'],
                                      'rw')
+        self.db.volume_update(ctxt, volume_id, updates)
 
         # Asynchronous deletion of the source volume in the back-end (now
         # pointed by the target volume id)
@@ -1565,6 +1558,9 @@ class VolumeManager(manager.SchedulerDependentManager):
             LOG.error(_LE('Failed to request async delete of migration source '
                           'vol %(vol)s: %(err)s'),
                       {'vol': volume_id, 'err': ex})
+        updates = {'migration_status': 'success',
+                   'status': orig_volume_status,
+                   'previous_status': volume['status']}
 
         LOG.info(_LI("Complete-Migrate volume completed successfully."),
                  resource=volume)
@@ -1588,8 +1584,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         moved = False
 
         status_update = None
-        if volume_ref['status'] == 'retyping':
-            status_update = {'status': self._get_original_status(volume_ref)}
+        if volume_ref['status'] in ('retyping', 'maintenance'):
+            status_update = {'status': volume_ref['previous_status']}
 
         self.db.volume_update(ctxt, volume_ref['id'],
                               {'migration_status': 'migrating'})
@@ -1601,7 +1597,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                                                  host)
                 if moved:
                     updates = {'host': host['host'],
-                               'migration_status': None}
+                               'migration_status': 'success',
+                               'previous_status': volume_ref['status']}
                     if status_update:
                         updates.update(status_update)
                     if model_update:
@@ -1611,7 +1608,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                                        updates)
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    updates = {'migration_status': None}
+                    updates = {'migration_status': 'error'}
                     if status_update:
                         updates.update(status_update)
                     self.db.volume_update(ctxt, volume_ref['id'], updates)
@@ -1621,7 +1618,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                              new_type_id)
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    updates = {'migration_status': None}
+                    updates = {'migration_status': 'error'}
                     if status_update:
                         updates.update(status_update)
                     self.db.volume_update(ctxt, volume_ref['id'], updates)
@@ -1831,7 +1828,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         context = ctxt.elevated()
 
         volume_ref = self.db.volume_get(ctxt, volume_id)
-        status_update = {'status': self._get_original_status(volume_ref)}
+        status_update = {'status': volume_ref['previous_status']}
         if context.project_id != volume_ref['project_id']:
             project_id = volume_ref['project_id']
         else: