]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix create_export/remove_export in driver.py
authorAvishay Traeger <avishay@il.ibm.com>
Wed, 26 Feb 2014 09:27:06 +0000 (11:27 +0200)
committerJohn Griffith <john.griffith@solidfire.com>
Sat, 15 Mar 2014 14:12:47 +0000 (14:12 +0000)
1. There was a call to rpcapi.create_export which does not have a
matching volume manager function, causing volume migration to crash.
This call was not necessary because there is already an
initialize_connection RPC call, which calls the driver's create_export
function. Removed the create_export RPC call and function. Added better
error handling to that code in _attach_volume in driver.py as well.

2. The manager called remove_export from detach_volume, which was not
being called by these functions. I believe it makes more sense to call
it from terminate_connection. Moved it there, and fixed up the
corresponding code in _detach_volume in driver.py.

3. Remove leftovers of export code from create_volume manager flow.

Change-Id: I2b192630ebed54368f151db47b49cbc72601a8d7
Closes-Bug: #1285060

cinder/tests/test_coraid.py
cinder/tests/test_volume.py
cinder/volume/driver.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index 52e2564a927dac7d7aa6e4e1520ccdb223a9a050..25d5a769f09c5b546dcc936c7c68239636f02336 100644 (file)
@@ -66,7 +66,8 @@ fake_lun_addr = {"shelf": fake_shelf, "lun": fake_lun}
 
 fake_volume_type = {'id': 1}
 
-fake_volume = {"name": fake_volume_name,
+fake_volume = {"id": fake_volume_name,
+               "name": fake_volume_name,
                "size": fake_volume_size,
                "volume_type": fake_volume_type}
 
@@ -771,8 +772,8 @@ class CoraidDriverImageTestCases(CoraidDriverTestCase):
             .AndReturn(self.fake_connection)
 
         self.mox.StubOutWithMock(self.driver, 'terminate_connection')
-        self.driver.terminate_connection(fake_volume, mox.IgnoreArg())\
-            .AndReturn(None)
+        self.driver.terminate_connection(fake_volume, mox.IgnoreArg(),
+                                         force=False).AndReturn(None)
 
         root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
 
index 631e8e966b8c098918b4cf5124d0fd40ea60195f..99752f6596daa3bfd4cfeb48c42a977d1f2ccea7 100644 (file)
@@ -2702,8 +2702,8 @@ class GenericVolumeDriverTestCase(DriverTestCase):
         f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null'))
         backup_service.backup(backup, f)
         utils.execute('chown', 0, '/dev/null', run_as_root=True)
-        self.volume.driver._detach_volume(attach_info)
-        self.volume.driver.terminate_connection(vol, properties)
+        self.volume.driver._detach_volume(self.context, attach_info, vol,
+                                          properties)
         self.mox.ReplayAll()
         self.volume.driver.backup_volume(self.context, backup, backup_service)
         self.mox.UnsetStubs()
@@ -2735,8 +2735,8 @@ class GenericVolumeDriverTestCase(DriverTestCase):
         f = fileutils.file_open('/dev/null', 'wb').AndReturn(file('/dev/null'))
         backup_service.restore(backup, vol['id'], f)
         utils.execute('chown', 0, '/dev/null', run_as_root=True)
-        self.volume.driver._detach_volume(attach_info)
-        self.volume.driver.terminate_connection(vol, properties)
+        self.volume.driver._detach_volume(self.context, attach_info, vol,
+                                          properties)
         self.mox.ReplayAll()
         self.volume.driver.restore_backup(self.context, backup, vol,
                                           backup_service)
index b3ac35af5141e48cf9c1a4786fc27db7f6ae8fdd..58d4195b161ba1227481cb27aa8bf19716234dba 100644 (file)
@@ -273,16 +273,6 @@ class VolumeDriver(object):
         """Fail if connector doesn't contain all the data needed by driver."""
         pass
 
-    def _copy_volume_data_cleanup(self, context, volume, properties,
-                                  attach_info, remote, force=False):
-        self._detach_volume(attach_info)
-        if remote:
-            rpcapi = volume_rpcapi.VolumeAPI()
-            rpcapi.terminate_connection(context, volume, properties,
-                                        force=force)
-        else:
-            self.terminate_connection(volume, properties, force=False)
-
     def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
         """Copy data from src_vol to dest_vol."""
         LOG.debug(_('copy_data_between_volumes %(src)s -> %(dest)s.')
@@ -316,9 +306,8 @@ class VolumeDriver(object):
                 LOG.error(msg % {'vol': src_vol['id']})
                 self.db.volume_update(context, src_vol['id'],
                                       {'status': src_orig_status})
-                self._copy_volume_data_cleanup(context, dest_vol, properties,
-                                               dest_attach_info, dest_remote,
-                                               force=True)
+                self._detach_volume(context, dest_attach_info, dest_vol,
+                                    properties, force=True, remote=dest_remote)
 
         try:
             size_in_mb = int(src_vol['size']) * 1024    # vol size is in GB
@@ -334,12 +323,12 @@ class VolumeDriver(object):
                 LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
                 copy_error = True
         finally:
-            self._copy_volume_data_cleanup(context, dest_vol, properties,
-                                           dest_attach_info, dest_remote,
-                                           force=copy_error)
-            self._copy_volume_data_cleanup(context, src_vol, properties,
-                                           src_attach_info, src_remote,
-                                           force=copy_error)
+            self._detach_volume(context, dest_attach_info, dest_vol,
+                                properties, force=copy_error,
+                                remote=dest_remote)
+            self._detach_volume(context, src_attach_info, src_vol,
+                                properties, force=copy_error,
+                                remote=src_remote)
 
     def copy_image_to_volume(self, context, volume, image_service, image_id):
         """Fetch the image from image_service and write it to the volume."""
@@ -356,8 +345,7 @@ class VolumeDriver(object):
                                      self.configuration.volume_dd_blocksize,
                                      size=volume['size'])
         finally:
-            self._detach_volume(attach_info)
-            self.terminate_connection(volume, properties)
+            self._detach_volume(context, attach_info, volume, properties)
 
     def copy_volume_to_image(self, context, volume, image_service, image_meta):
         """Copy the volume to the specified image."""
@@ -372,18 +360,50 @@ class VolumeDriver(object):
                                       image_meta,
                                       attach_info['device']['path'])
         finally:
-            self._detach_volume(attach_info)
-            self.terminate_connection(volume, properties)
+            self._detach_volume(context, attach_info, volume, properties)
 
     def _attach_volume(self, context, volume, properties, remote=False):
         """Attach the volume."""
         if remote:
+            # Call remote manager's initialize_connection which includes
+            # driver's create_export and initialize_connection
             rpcapi = volume_rpcapi.VolumeAPI()
-            rpcapi.create_export(context, volume)
             conn = rpcapi.initialize_connection(context, volume, properties)
         else:
-            self.create_export(context, volume)
-            conn = self.initialize_connection(volume, properties)
+            # Call local driver's create_export and initialize_connection.
+            # NOTE(avishay) This is copied from the manager's code - need to
+            # clean this up in the future.
+            model_update = None
+            try:
+                LOG.debug(_("Volume %s: creating export"), volume['id'])
+                model_update = self.create_export(context, volume)
+                if model_update:
+                    volume = self.db.volume_update(context, volume['id'],
+                                                   model_update)
+            except exception.CinderException as ex:
+                if model_update:
+                    LOG.exception(_("Failed updating model of volume "
+                                    "%(volume_id)s with driver provided model "
+                                    "%(model)s") %
+                                  {'volume_id': volume['id'],
+                                   'model': model_update})
+                    raise exception.ExportFailure(reason=ex)
+
+            try:
+                conn = self.initialize_connection(volume, properties)
+            except Exception as err:
+                try:
+                    err_msg = (_('Unable to fetch connection information from '
+                                 'backend: %(err)s') % {'err': err})
+                    LOG.error(err_msg)
+                    LOG.debug("Cleaning up failed connect initialization.")
+                    self.remove_export(context, volume)
+                except Exception as ex:
+                    ex_msg = (_('Error encountered during cleanup '
+                                'of a failed attach: %(ex)s') % {'ex': ex})
+                    LOG.error(err_msg)
+                    raise exception.VolumeBackendAPIException(data=ex_msg)
+                raise exception.VolumeBackendAPIException(data=err_msg)
 
         # Use Brick's code to do attach/detach
         use_multipath = self.configuration.use_multipath_for_image_xfer
@@ -406,13 +426,42 @@ class VolumeDriver(object):
                                                       {'path': host_device}))
         return {'conn': conn, 'device': device, 'connector': connector}
 
-    def _detach_volume(self, attach_info):
+    def _detach_volume(self, context, attach_info, volume, properties,
+                       force=False, remote=False):
         """Disconnect the volume from the host."""
         # Use Brick's code to do attach/detach
         connector = attach_info['connector']
         connector.disconnect_volume(attach_info['conn']['data'],
                                     attach_info['device'])
 
+        if remote:
+            # Call remote manager's terminate_connection which includes
+            # driver's terminate_connection and remove export
+            rpcapi = volume_rpcapi.VolumeAPI()
+            rpcapi.terminate_connection(context, volume, properties,
+                                        force=force)
+        else:
+            # Call local driver's terminate_connection and remove export.
+            # NOTE(avishay) This is copied from the manager's code - need to
+            # clean this up in the future.
+            try:
+                self.terminate_connection(volume, properties, force=force)
+            except Exception as err:
+                err_msg = (_('Unable to terminate volume connection: %(err)s')
+                           % {'err': err})
+                LOG.error(err_msg)
+                raise exception.VolumeBackendAPIException(data=err_msg)
+
+            try:
+                LOG.debug(_("volume %s: removing export"), volume['id'])
+                self.remove_export(context, volume)
+            except Exception as ex:
+                LOG.exception(_("Error detaching volume %(volume)s, "
+                                "due to remove export failure."),
+                              {"volume": volume['id']})
+                raise exception.RemoveExportException(volume=volume['id'],
+                                                      reason=ex)
+
     def clone_image(self, volume, image_location, image_id, image_meta):
         """Create a volume efficiently from an existing image.
 
@@ -451,8 +500,7 @@ class VolumeDriver(object):
                     backup_service.backup(backup, volume_file)
 
         finally:
-            self._detach_volume(attach_info)
-            self.terminate_connection(volume, properties)
+            self._detach_volume(context, attach_info, volume, properties)
 
     def restore_backup(self, context, backup, volume, backup_service):
         """Restore an existing backup to a new or existing volume."""
@@ -471,8 +519,7 @@ class VolumeDriver(object):
                     backup_service.restore(backup, volume['id'], volume_file)
 
         finally:
-            self._detach_volume(attach_info)
-            self.terminate_connection(volume, properties)
+            self._detach_volume(context, attach_info, volume, properties)
 
     def clear_download(self, context, volume):
         """Clean up after an interrupted image copy."""
index 4735ebd151b31bc07404dbfb90f77549ac7790fe..f573ccbab6d2a9ecc9fcfa8cfe7c99fc9e0f6908 100644 (file)
@@ -63,10 +63,6 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
         # These exception types will trigger the volume to be set into error
         # status rather than being rescheduled.
         self.no_reschedule_types = [
-            # The volume has already finished being created when the exports
-            # occur, rescheduling would be bad if it happened due to exports
-            # not succeeding.
-            exception.ExportFailure,
             # Image copying happens after volume creation so rescheduling due
             # to copy failure will mean the same volume will be created at
             # another place when it still exists locally.
@@ -602,15 +598,14 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
             if model_update:
                 volume_ref = self.db.volume_update(context, volume_ref['id'],
                                                    model_update)
-        except exception.CinderException as ex:
+        except exception.CinderException:
             # If somehow the update failed we want to ensure that the
             # failure is logged (but not try rescheduling since the volume at
             # this point has been created).
-            if model_update:
-                LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                                " with creation provided model %(model)s") %
-                              {'volume_id': volume_id, 'model': model_update})
-                raise exception.ExportFailure(reason=ex)
+            LOG.exception(_("Failed updating model of volume %(volume_id)s"
+                            " with creation provided model %(model)s") %
+                          {'volume_id': volume_id, 'model': model_update})
+            raise
 
         return volume_ref
 
index 2b806d9882685991f0d0ebcf29a2640dd4dcfbc4..5f29163ea70d45d059be25474310d23e19bf72ef 100644 (file)
@@ -170,7 +170,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.15'
+    RPC_API_VERSION = '1.16'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -311,7 +311,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                       filter_properties=None, allow_reschedule=True,
                       snapshot_id=None, image_id=None, source_volid=None):
 
-        """Creates and exports the volume."""
+        """Creates the volume."""
         context_saved = context.deepcopy()
         context = context.elevated()
         if filter_properties is None:
@@ -684,18 +684,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         volume = self.db.volume_get(context, volume_id)
         try:
             utils.require_driver_initialized(self.driver)
-            LOG.debug(_("volume %s: removing export"), volume_id)
-            self.driver.remove_export(context, volume)
         except exception.DriverNotInitialized as ex:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_("Error detaching volume %(volume)s, "
                                 "due to uninitialized driver."),
                               {"volume": volume_id})
-        except Exception as ex:
-            LOG.exception(_("Error detaching volume %(volume)s, "
-                            "due to remove export failure."),
-                          {"volume": volume_id})
-            raise exception.RemoveExportException(volume=volume_id, reason=ex)
 
         self._notify_about_volume_usage(context, volume, "detach.end")
 
@@ -870,6 +863,15 @@ class VolumeManager(manager.SchedulerDependentManager):
             LOG.error(err_msg)
             raise exception.VolumeBackendAPIException(data=err_msg)
 
+        try:
+            LOG.debug(_("volume %s: removing export"), volume_id)
+            self.driver.remove_export(context, volume_ref)
+        except Exception as ex:
+            LOG.exception(_("Error detaching volume %(volume)s, "
+                            "due to remove export failure."),
+                          {"volume": volume_id})
+            raise exception.RemoveExportException(volume=volume_id, reason=ex)
+
     def accept_transfer(self, context, volume_id, new_user, new_project):
         # NOTE(flaper87): Verify the driver is enabled
         # before going forward. The exception will be caught
index ee3a710aa049bb6d2d5840b26f91344f83d3db52..5d251c9fd03d3cbd7c6384d088982cfa93c90e4b 100644 (file)
@@ -50,6 +50,7 @@ class VolumeAPI(object):
         1.13 - Adds create_export.
         1.14 - Adds reservation parameter to extend_volume().
         1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
+        1.16 - Removes create_export.
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -161,10 +162,6 @@ class VolumeAPI(object):
                    migration_policy=migration_policy,
                    reservations=reservations)
 
-    def create_export(self, ctxt, volume):
-        cctxt = self.client.prepare(server=volume['host'], version='1.13')
-        return cctxt.call(ctxt, 'create_export', volume_id=volume['id'])
-
     def manage_existing(self, ctxt, volume, ref):
         cctxt = self.client.prepare(server=volume['host'], version='1.15')
         cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)