]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Move driver initialization check into the method
authorFlavio Percoco <flaper87@gmail.com>
Tue, 10 Dec 2013 11:31:50 +0000 (12:31 +0100)
committerFlavio Percoco <flaper87@gmail.com>
Tue, 14 Jan 2014 19:23:00 +0000 (20:23 +0100)
Volumes and backups managers' methods are decorated with
`require_initialized_driver` which checks whether the driver has been
initialized or not. The decorator fails with a `DriverNotInitialized`
exception if the driver hasn't been initialized.

This early failure leaves volumes and backups in a wrong status which is
not just confusing for the user but it also makes it difficult to do
anything with the resources after they've been left in a 'bogus' status.

For example, when a volume creation is requested, the volume is first
created in the database and its status is set to 'creating'. Then the
scheduler will pick an available volume node and send the task to it. If
the driver has not been initialized, the volume status will be left as
'creating' instead of 'error'.

This patch fixes that issue by moving the driver initialization check
into the various manager's methods. In some cases this check is done at
the very beginning of the method, in some others - either to avoid code
duplication or because the lines above the check made sense to be
executed first - this check is done later in the method.

Change-Id: I2610be6ba1aa7df417f1a1f7bb27af30273e4814
Closes-bug: #1242942

cinder/backup/manager.py
cinder/exception.py
cinder/tests/test_volume.py
cinder/utils.py
cinder/volume/flows/create_volume/__init__.py
cinder/volume/manager.py

index 2aadfa42ee1846ab3fa61175da5106bfc310121a..1255026e21688cf367fbec37eca10f4663579a23 100644 (file)
@@ -217,7 +217,6 @@ class BackupManager(manager.SchedulerDependentManager):
                 LOG.info(_('Resuming delete on backup: %s.') % backup['id'])
                 self.delete_backup(ctxt, backup['id'])
 
-    @utils.require_driver_initialized
     def create_backup(self, context, backup_id):
         """Create volume backups using configured backup service."""
         backup = self.db.backup_get(context, backup_id)
@@ -258,6 +257,12 @@ class BackupManager(manager.SchedulerDependentManager):
             raise exception.InvalidBackup(reason=err)
 
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught,
+            # the volume status will be set back to available and
+            # the backup status to 'error'
+            utils.require_driver_initialized(self.driver)
+
             backup_service = self.service.get_backup_driver(context)
             self._get_driver(backend).backup_volume(context, backup,
                                                     backup_service)
@@ -276,7 +281,6 @@ class BackupManager(manager.SchedulerDependentManager):
                                                    self.az})
         LOG.info(_('Create backup finished. backup: %s.'), backup_id)
 
-    @utils.require_driver_initialized
     def restore_backup(self, context, backup_id, volume_id):
         """Restore volume backups from configured backup service."""
         LOG.info(_('Restore backup started, backup: %(backup_id)s '
@@ -334,6 +338,12 @@ class BackupManager(manager.SchedulerDependentManager):
             raise exception.InvalidBackup(reason=err)
 
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught,
+            # the volume status will be set back to available and
+            # the backup status to 'error'
+            utils.require_driver_initialized(self.driver)
+
             backup_service = self.service.get_backup_driver(context)
             self._get_driver(backend).restore_backup(context, backup,
                                                      volume,
@@ -351,9 +361,21 @@ class BackupManager(manager.SchedulerDependentManager):
                    ' to volume %(volume_id)s.') %
                  {'backup_id': backup_id, 'volume_id': volume_id})
 
-    @utils.require_driver_initialized
     def delete_backup(self, context, backup_id):
         """Delete volume backup from configured backup service."""
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the backup status updated. Fail early since there
+            # are no other status to change but backup's
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized as err:
+            with excutils.save_and_reraise_exception():
+                    self.db.backup_update(context, backup_id,
+                                          {'status': 'error',
+                                           'fail_reason':
+                                           unicode(err)})
+
         LOG.info(_('Delete backup started, backup: %s.'), backup_id)
         backup = self.db.backup_get(context, backup_id)
         self.db.backup_update(context, backup_id, {'host': self.host})
index 033ab4bb9bb117dd342122d708fe3da248569a53..68d5649576981dfbf003fe3f0b97ccc728c19b0e 100644 (file)
@@ -138,7 +138,7 @@ class ImageNotAuthorized(CinderException):
 
 
 class DriverNotInitialized(CinderException):
-    message = _("Volume driver '%(driver)s' not initialized.")
+    message = _("Volume driver not ready.")
 
 
 class Invalid(CinderException):
index 4c50acb5b004c13fff480ed522ab02c9bd2351b1..1a43feacc63223643810f094b7e79396011cf00b 100644 (file)
@@ -146,6 +146,73 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertEqual(volume['status'], "error")
         self.volume.delete_volume(self.context, volume_id)
 
+    @mock.patch.object(QUOTAS, 'reserve')
+    @mock.patch.object(QUOTAS, 'commit')
+    @mock.patch.object(QUOTAS, 'rollback')
+    def test_create_driver_not_initialized(self, reserve, commit, rollback):
+        self.volume.driver._initialized = False
+
+        def fake_reserve(context, expire=None, project_id=None, **deltas):
+            return ["RESERVATION"]
+
+        def fake_commit_and_rollback(context, reservations, project_id=None):
+            pass
+
+        reserve.return_value = fake_reserve
+        commit.return_value = fake_commit_and_rollback
+        rollback.return_value = fake_commit_and_rollback
+
+        volume = tests_utils.create_volume(
+            self.context,
+            availability_zone=CONF.storage_availability_zone,
+            **self.volume_params)
+
+        volume_id = volume['id']
+        self.assertIsNone(volume['encryption_key_id'])
+        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.create_volume,
+                          self.context, volume_id)
+
+        # NOTE(flaper87): The volume status should be error_deleting.
+        volume = db.volume_get(context.get_admin_context(), volume_id)
+        self.assertEqual(volume.status, "error")
+        db.volume_destroy(context.get_admin_context(), volume_id)
+
+    @mock.patch.object(QUOTAS, 'reserve')
+    @mock.patch.object(QUOTAS, 'commit')
+    @mock.patch.object(QUOTAS, 'rollback')
+    def test_delete_driver_not_initialized(self, reserve, commit, rollback):
+        # NOTE(flaper87): Set initialized to False
+        self.volume.driver._initialized = False
+
+        def fake_reserve(context, expire=None, project_id=None, **deltas):
+            return ["RESERVATION"]
+
+        def fake_commit_and_rollback(context, reservations, project_id=None):
+            pass
+
+        reserve.return_value = fake_reserve
+        commit.return_value = fake_commit_and_rollback
+        rollback.return_value = fake_commit_and_rollback
+
+        volume = tests_utils.create_volume(
+            self.context,
+            availability_zone=CONF.storage_availability_zone,
+            **self.volume_params)
+
+        volume_id = volume['id']
+        self.assertIsNone(volume['encryption_key_id'])
+        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.delete_volume,
+                          self.context, volume_id)
+
+        # NOTE(flaper87): The volume status should be error.
+        volume = db.volume_get(context.get_admin_context(), volume_id)
+        self.assertEqual(volume.status, "error_deleting")
+        db.volume_destroy(context.get_admin_context(), volume_id)
+
     def test_create_delete_volume(self):
         """Test volume can be created and deleted."""
         # Need to stub out reserve, commit, and rollback
@@ -448,6 +515,30 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.volume.delete_snapshot(self.context, snapshot_id)
         self.volume.delete_volume(self.context, volume_src['id'])
 
+    def test_create_snapshot_driver_not_initialized(self):
+        volume_src = tests_utils.create_volume(self.context,
+                                               **self.volume_params)
+        self.volume.create_volume(self.context, volume_src['id'])
+        snapshot_id = self._create_snapshot(volume_src['id'])['id']
+
+        # NOTE(flaper87): Set initialized to False
+        self.volume.driver._initialized = False
+
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.create_snapshot,
+                          self.context, volume_src['id'],
+                          snapshot_id)
+
+        # NOTE(flaper87): The volume status should be error.
+        snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id)
+        self.assertEqual(snapshot.status, "error")
+
+        # NOTE(flaper87): Set initialized to True,
+        # lets cleanup the mess
+        self.volume.driver._initialized = True
+        self.volume.delete_snapshot(self.context, snapshot_id)
+        self.volume.delete_volume(self.context, volume_src['id'])
+
     def _mock_synchronized(self, name, *s_args, **s_kwargs):
         def inner_sync1(f):
             def inner_sync2(*args, **kwargs):
@@ -1798,6 +1889,29 @@ class VolumeTestCase(BaseVolumeTestCase):
         # clean up
         self.volume.delete_volume(self.context, volume['id'])
 
+    def test_extend_volume_driver_not_initialized(self):
+        """Test volume can be extended at API level."""
+        # create a volume and assign to host
+        volume = tests_utils.create_volume(self.context, size=2,
+                                           status='available',
+                                           host=CONF.host)
+        self.volume.create_volume(self.context, volume['id'])
+
+        # NOTE(flaper87): Set initialized to False
+        self.volume.driver._initialized = False
+
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.extend_volume,
+                          self.context, volume['id'], 3)
+
+        volume = db.volume_get(context.get_admin_context(), volume['id'])
+        self.assertEqual(volume.status, 'error_extending')
+
+        # NOTE(flaper87): Set initialized to True,
+        # lets cleanup the mess.
+        self.volume.driver._initialized = True
+        self.volume.delete_volume(self.context, volume['id'])
+
     def test_extend_volume_manager(self):
         """Test volume can be extended at the manager level."""
         def fake_reserve(context, expire=None, project_id=None, **deltas):
@@ -2133,6 +2247,25 @@ class VolumeTestCase(BaseVolumeTestCase):
     def test_retype_volume_migration_equal_types(self):
         self._retype_volume_exec(False, diff_equal=True)
 
+    def test_migrate_driver_not_initialized(self):
+        volume = tests_utils.create_volume(self.context, size=0,
+                                           host=CONF.host)
+        host_obj = {'host': 'newhost', 'capabilities': {}}
+
+        self.volume.driver._initialized = False
+        self.assertRaises(exception.DriverNotInitialized,
+                          self.volume.migrate_volume,
+                          self.context, volume['id'],
+                          host_obj, True)
+
+        volume = db.volume_get(context.get_admin_context(), volume['id'])
+        self.assertEqual(volume.migration_status, 'error')
+
+        # NOTE(flaper87): Set initialized to True,
+        # lets cleanup the mess.
+        self.volume.driver._initialized = True
+        self.volume.delete_volume(self.context, volume['id'])
+
     def test_update_volume_readonly_flag(self):
         """Test volume readonly flag can be updated at API level."""
         # create a volume and assign to host
index bcda10469ecf3349d577e58d1ce513dd99bdd596..90c62c6b50d53b009bc34a0ca35fd3c22316cf95 100644 (file)
@@ -20,7 +20,6 @@
 
 import contextlib
 import datetime
-import functools
 import hashlib
 import inspect
 import os
@@ -808,15 +807,19 @@ def brick_get_connector(protocol, driver=None,
                                                 *args, **kwargs)
 
 
-def require_driver_initialized(func):
-    @functools.wraps(func)
-    def wrapper(self, *args, **kwargs):
-        # we can't do anything if the driver didn't init
-        if not self.driver.initialized:
-            driver_name = self.driver.__class__.__name__
-            raise exception.DriverNotInitialized(driver=driver_name)
-        return func(self, *args, **kwargs)
-    return wrapper
+def require_driver_initialized(driver):
+    """Verifies if `driver` is initialized
+
+    If the driver is not initialized, an exception will be raised.
+
+    :params driver: The driver instance.
+    :raises: `exception.DriverNotInitialized`
+    """
+    # we can't do anything if the driver didn't init
+    if not driver.initialized:
+        driver_name = driver.__class__.__name__
+        LOG.error(_("Volume driver %s not initialized") % driver_name)
+        raise exception.DriverNotInitialized()
 
 
 def get_file_mode(path):
index bb38577630b8866a63a3c2d6409d16b1aa51f6e9..9faf158334a8767f28bb24c84af8c444e8f849ba 100644 (file)
@@ -1394,19 +1394,25 @@ class CreateVolumeFromSpecTask(base.CinderTask):
         return self.driver.create_volume(volume_ref)
 
     def execute(self, context, volume_ref, volume_spec):
+        volume_spec = dict(volume_spec)
+        volume_id = volume_spec.pop('volume_id', None)
+
         # we can't do anything if the driver didn't init
         if not self.driver.initialized:
-            LOG.error(_("Unable to create volume, driver not initialized"))
             driver_name = self.driver.__class__.__name__
-            raise exception.DriverNotInitialized(driver=driver_name)
+            LOG.error(_("Unable to create volume. "
+                        "Volume driver %s not initialized") % driver_name)
+
+            # NOTE(flaper87): Set the error status before
+            # raising any exception.
+            self.db.volume_update(context, volume_id, dict(status='error'))
+            raise exception.DriverNotInitialized()
 
         create_type = volume_spec.pop('type', None)
         create_functor = self._create_func_mapping.get(create_type)
         if not create_functor:
             raise exception.VolumeTypeNotFound(volume_type_id=create_type)
 
-        volume_spec = dict(volume_spec)
-        volume_id = volume_spec.pop('volume_id', None)
         if not volume_id:
             volume_id = volume_ref['id']
         LOG.info(_("Volume %(volume_id)s: being created using %(functor)s "
index fdb3f7e8bab0bd79fa758afadf00e428b3f6141e..3026576be9c4638c91bb90847de245c123a8a94d 100644 (file)
@@ -287,7 +287,6 @@ class VolumeManager(manager.SchedulerDependentManager):
         # collect and publish service capabilities
         self.publish_service_capabilities(ctxt)
 
-    @utils.require_driver_initialized
     def create_volume(self, context, volume_id, request_spec=None,
                       filter_properties=None, allow_reschedule=True,
                       snapshot_id=None, image_id=None, source_volid=None):
@@ -299,6 +298,8 @@ class VolumeManager(manager.SchedulerDependentManager):
             filter_properties = {}
 
         try:
+            # NOTE(flaper87): Driver initialization is
+            # verified by the task itself.
             flow_engine = create_volume.get_manager_flow(
                 context,
                 self.db,
@@ -349,7 +350,6 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.stats['allocated_capacity_gb'] += volume_ref['size']
         return volume_ref['id']
 
-    @utils.require_driver_initialized
     @locked_volume_operation
     def delete_volume(self, context, volume_id):
         """Deletes and unexports volume."""
@@ -371,6 +371,11 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         self._notify_about_volume_usage(context, volume_ref, "delete.start")
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the volume status updated.
+            utils.require_driver_initialized(self.driver)
+
             LOG.debug(_("volume %s: removing export"), volume_ref['id'])
             self.driver.remove_export(context, volume_ref)
             LOG.debug(_("volume %s: deleting"), volume_ref['id'])
@@ -428,7 +433,6 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         return True
 
-    @utils.require_driver_initialized
     def create_snapshot(self, context, volume_id, snapshot_id):
         """Creates and exports the snapshot."""
         caller_context = context
@@ -440,6 +444,11 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, snapshot_ref, "create.start")
 
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the snapshot status updated.
+            utils.require_driver_initialized(self.driver)
+
             LOG.debug(_("snapshot %(snap_id)s: creating"),
                       {'snap_id': snapshot_ref['id']})
 
@@ -478,7 +487,6 @@ class VolumeManager(manager.SchedulerDependentManager):
         self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
         return snapshot_id
 
-    @utils.require_driver_initialized
     @locked_snapshot_operation
     def delete_snapshot(self, context, snapshot_id):
         """Deletes and unexports snapshot."""
@@ -492,6 +500,11 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, snapshot_ref, "delete.start")
 
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the snapshot status updated.
+            utils.require_driver_initialized(self.driver)
+
             LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
 
             # Pass context so that drivers that want to use it, can,
@@ -541,7 +554,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             QUOTAS.commit(context, reservations, project_id=project_id)
         return True
 
-    @utils.require_driver_initialized
     def attach_volume(self, context, volume_id, instance_uuid, host_name,
                       mountpoint, mode):
         """Updates db to show volume is attached."""
@@ -599,6 +611,11 @@ class VolumeManager(manager.SchedulerDependentManager):
                 raise exception.InvalidVolumeAttachMode(mode=mode,
                                                         volume_id=volume_id)
             try:
+                # NOTE(flaper87): Verify the driver is enabled
+                # before going forward. The exception will be caught
+                # and the volume status updated.
+                utils.require_driver_initialized(self.driver)
+
                 self.driver.attach_volume(context,
                                           volume,
                                           instance_uuid,
@@ -617,7 +634,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             self._notify_about_volume_usage(context, volume, "attach.end")
         return do_attach()
 
-    @utils.require_driver_initialized
     def detach_volume(self, context, volume_id):
         """Updates db to show volume is detached."""
         # TODO(vish): refactor this into a more general "unreserve"
@@ -626,6 +642,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         volume = self.db.volume_get(context, volume_id)
         self._notify_about_volume_usage(context, volume, "detach.start")
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the volume status updated.
+            utils.require_driver_initialized(self.driver)
+
             self.driver.detach_volume(context, volume)
         except Exception:
             with excutils.save_and_reraise_exception():
@@ -644,7 +665,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             self.driver.ensure_export(context, volume)
         self._notify_about_volume_usage(context, volume, "detach.end")
 
-    @utils.require_driver_initialized
     def copy_volume_to_image(self, context, volume_id, image_meta):
         """Uploads the specified volume to Glance.
 
@@ -654,6 +674,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         """
         payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
         try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the volume status updated.
+            utils.require_driver_initialized(self.driver)
+
             volume = self.db.volume_get(context, volume_id)
             self.driver.ensure_export(context.elevated(), volume)
             image_service, image_id = \
@@ -675,7 +700,6 @@ class VolumeManager(manager.SchedulerDependentManager):
                 self.db.volume_update(context, volume_id,
                                       {'status': 'in-use'})
 
-    @utils.require_driver_initialized
     def initialize_connection(self, context, volume_id, connector):
         """Prepare volume for connection from host represented by connector.
 
@@ -713,6 +737,11 @@ class VolumeManager(manager.SchedulerDependentManager):
               json in various places, so it should not contain any non-json
               data types.
         """
+        # NOTE(flaper87): Verify the driver is enabled
+        # before going forward. The exception will be caught
+        # and the volume status updated.
+        utils.require_driver_initialized(self.driver)
+
         volume = self.db.volume_get(context, volume_id)
         self.driver.validate_connector(connector)
         try:
@@ -750,12 +779,16 @@ class VolumeManager(manager.SchedulerDependentManager):
             conn_info['data']['access_mode'] = access_mode
         return conn_info
 
-    @utils.require_driver_initialized
     def terminate_connection(self, context, volume_id, connector, force=False):
         """Cleanup connection from host represented by connector.
 
         The format of connector is the same as for initialize_connection.
         """
+        # NOTE(flaper87): Verify the driver is enabled
+        # before going forward. The exception will be caught
+        # and the volume status updated.
+        utils.require_driver_initialized(self.driver)
+
         volume_ref = self.db.volume_get(context, volume_id)
         try:
             self.driver.terminate_connection(volume_ref,
@@ -766,8 +799,12 @@ class VolumeManager(manager.SchedulerDependentManager):
             LOG.error(err_msg)
             raise exception.VolumeBackendAPIException(data=err_msg)
 
-    @utils.require_driver_initialized
     def accept_transfer(self, context, volume_id, new_user, new_project):
+        # NOTE(flaper87): Verify the driver is enabled
+        # before going forward. The exception will be caught
+        # and the volume status updated.
+        utils.require_driver_initialized(self.driver)
+
         # NOTE(jdg): need elevated context as we haven't "given" the vol
         # yet
         volume_ref = self.db.volume_get(context.elevated(), volume_id)
@@ -847,9 +884,18 @@ class VolumeManager(manager.SchedulerDependentManager):
         else:
             return 'in-use'
 
-    @utils.require_driver_initialized
     def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
                                   error=False):
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the migration status updated.
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                self.db.volume_update(ctxt, volume_id,
+                                      {'migration_status': 'error'})
+
         msg = _("migrate_volume_completion: completing migration for "
                 "volume %(vol1)s (temporary volume %(vol2)s")
         LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
@@ -892,10 +938,19 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.db.volume_update(ctxt, volume_id, updates)
         return volume['id']
 
-    @utils.require_driver_initialized
     def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
                        new_type_id=None):
         """Migrate the volume to the specified host (called on source host)."""
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the migration status updated.
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                self.db.volume_update(ctxt, volume_id,
+                                      {'migration_status': 'error'})
+
         volume_ref = self.db.volume_get(ctxt, volume_id)
         model_update = None
         moved = False
@@ -996,8 +1051,17 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, snapshot, event_suffix,
             extra_usage_info=extra_usage_info, host=self.host)
 
-    @utils.require_driver_initialized
     def extend_volume(self, context, volume_id, new_size):
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the volume status updated.
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                self.db.volume_update(context, volume_id,
+                                      {'status': 'error_extending'})
+
         volume = self.db.volume_get(context, volume_id)
         size_increase = (int(new_size)) - volume['size']
 
@@ -1047,9 +1111,9 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, volume, "resize.end",
             extra_usage_info={'size': int(new_size)})
 
-    @utils.require_driver_initialized
     def retype(self, ctxt, volume_id, new_type_id, host,
                migration_policy='never', reservations=None):
+
         def _retype_error(context, volume_id, old_reservations,
                           new_reservations, status_update):
             try:
@@ -1067,6 +1131,19 @@ class VolumeManager(manager.SchedulerDependentManager):
         else:
             project_id = context.project_id
 
+        try:
+            # NOTE(flaper87): Verify the driver is enabled
+            # before going forward. The exception will be caught
+            # and the volume status updated.
+            utils.require_driver_initialized(self.driver)
+        except exception.DriverNotInitialized:
+            with excutils.save_and_reraise_exception():
+                # NOTE(flaper87): Other exceptions in this method don't
+                # set the volume status to error. Should that be done
+                # here? Setting the volume back to it's original status
+                # for now.
+                self.db.volume_update(context, volume_id, status_update)
+
         # Get old reservations
         try:
             reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}