]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove create_export from volume create
authorJohn Griffith <john.griffith@solidfire.com>
Fri, 31 Jan 2014 03:53:37 +0000 (03:53 +0000)
committerjohn-griffith <john.griffith@solidfire.com>
Tue, 4 Feb 2014 13:39:44 +0000 (06:39 -0700)
Currently the LVM driver creates iscsi targets on
volume creation, even though it's only used at
attach time.  This isn't necessary and in fact
causes issues if a volume is extended because the
target information isn't currently updated after the
extend.  In addition a number of other drivers inclduing
the LIO iscsi driver require the target be created with
initiator info, so the tgt that's created initially
again isn't valid.

This change removes the create_export call from the
volume create process and makes it part of the
managers intialize_connection routine which is
more appropriate.

This change also removes the target on detach since
it's not used any longer and again as the volume may
be modified or extended.

Change-Id: I0b7dfdaf7e49a069da22f22f459861b0b49430a4
Closes-Bug: 1248947

cinder/brick/iscsi/iscsi.py
cinder/exception.py
cinder/tests/test_volume.py
cinder/volume/driver.py
cinder/volume/drivers/lvm.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index cb042ba832ae90e0069a9c9da6553ba60ed737c6..236e4d0a8fd4450fc8bb95efe5237cf0141808b0 100644 (file)
@@ -53,10 +53,6 @@ class TargetAdmin(executor.Executor):
         """Create a iSCSI target and logical unit."""
         raise NotImplementedError()
 
-    def update_iscsi_target(self, name):
-        """Update an iSCSI target."""
-        raise NotImplementedError()
-
     def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
         """Remove a iSCSI target and logical unit."""
         raise NotImplementedError()
@@ -368,9 +364,6 @@ class IetAdm(TargetAdmin):
                 raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
         return tid
 
-    def update_iscsi_target(self, name):
-        pass
-
     def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
         LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
         self._delete_logicalunit(tid, lun, **kwargs)
@@ -525,9 +518,6 @@ class LioAdm(TargetAdmin):
 
         return tid
 
-    def update_iscsi_target(self, name):
-        pass
-
     def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
         LOG.info(_('Removing iscsi_target: %s') % vol_id)
         vol_uuid_name = vol_name
index dfbb05246981b9dfe87cdf6e16b1ce1335480d9b..6e7c4f10afd4db6cccb4c91bf3f0fad0a14bcc13 100644 (file)
@@ -660,3 +660,7 @@ class GlusterfsNoSharesMounted(VolumeDriverException):
 
 class GlusterfsNoSuitableShareFound(VolumeDriverException):
     message = _("There is no share which can host %(volume_size)sG")
+
+
+class RemoveExportException(VolumeDriverException):
+    message = _("Failed to remove export for volume %(volume)s: %(reason)s")
index 476a9f3ff629ffa6e321e8540f45bb2b1395acfd..9f7aa760ac38a5ae3ba0bf9cd30312bb62f8ed38 100644 (file)
@@ -931,15 +931,25 @@ class VolumeTestCase(BaseVolumeTestCase):
                           image_id='fake_id',
                           source_volume='fake_id')
 
-    @mock.patch.object(db, 'volume_get')
     @mock.patch.object(db, 'volume_admin_metadata_get')
+    @mock.patch.object(db, 'volume_get')
+    @mock.patch.object(db, 'volume_update')
     def test_initialize_connection_fetchqos(self,
-                                            _mock_volume_admin_metadata_get,
-                                            _mock_volume_get):
+                                            _mock_volume_update,
+                                            _mock_volume_get,
+                                            _mock_volume_admin_metadata_get):
         """Make sure initialize_connection returns correct information."""
-        _mock_volume_get.return_value = {'volume_type_id': 'fake_type_id',
-                                         'volume_admin_metadata': {}}
-        _mock_volume_admin_metadata_get.return_value = {}
+        _fake_admin_meta = {'fake-key': 'fake-value'}
+        _fake_volume = {'volume_type_id': 'fake_type_id',
+                        'name': 'fake_name',
+                        'host': 'fake_host',
+                        'id': 'fake_volume_id',
+                        'volume_admin_metadata': _fake_admin_meta}
+
+        _mock_volume_get.return_value = _fake_volume
+        _mock_volume_update.return_value = _fake_volume
+        _mock_volume_admin_metadata_get.return_value = _fake_admin_meta
+
         connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
         qos_values = {'consumer': 'front-end',
                       'specs': {
index 50987551228f73e129afb13e173c19d9f29c22c8..3bcc59b5829e258297af98ac1aac7a6c09621ba3 100644 (file)
@@ -373,8 +373,10 @@ class VolumeDriver(object):
         """Attach the volume."""
         if remote:
             rpcapi = volume_rpcapi.VolumeAPI()
+            rpcapi.create_export(context, volume)
             conn = rpcapi.initialize_connection(context, volume, properties)
         else:
+            self.create_export(context, volume)
             conn = self.initialize_connection(volume, properties)
 
         # Use Brick's code to do attach/detach
index c1c2b4edeced8a24e31d4a4b2e1d923cd85148ad..fb13a588db50580f39f956bbee3a7f9ec7443d14 100644 (file)
@@ -669,6 +669,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
                 LOG.info(_("Skipping remove_export. No iscsi_target "
                            "provisioned for volume: %s"), volume['id'])
                 return
+
         else:
             iscsi_target = 0
 
index 080c16fbb374822df4e252c39e757cd3b749583b..30b7f2e1029bc42b4b3899bf98396880e31675ff 100644 (file)
@@ -613,28 +613,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                               {'volume_id': volume_id, 'model': model_update})
                 raise exception.ExportFailure(reason=ex)
 
-        # Persist any driver exported model information.
-        model_update = None
-        try:
-            LOG.debug(_("Volume %s: creating export"), volume_ref['id'])
-            model_update = self.driver.create_export(context, volume_ref)
-            if model_update:
-                self.db.volume_update(context, volume_ref['id'], model_update)
-        except exception.CinderException as ex:
-            # If somehow the read *or* create export failed we want to ensure
-            # that the failure is logged (but not try rescheduling since
-            # the volume at this point has been created).
-            #
-            # NOTE(harlowja): Notice that since the model_update is initially
-            # empty, the only way it will still be empty is if there is no
-            # model_update (which we don't care about) or there was an
-            # model_update and updating failed.
-            if model_update:
-                LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                              " with driver provided model %(model)s") %
-                              {'volume_id': volume_id, 'model': model_update})
-                raise exception.ExportFailure(reason=ex)
-
         return volume_ref
 
 
index 100cc422c920a3c1fabf312a608c19a02c827984..0b4e7a9d37bb7eca34561c8068e5d513aa34eb22 100644 (file)
@@ -182,7 +182,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.12'
+    RPC_API_VERSION = '1.13'
 
     def __init__(self, volume_driver=None, service_name=None,
                  *args, **kwargs):
@@ -247,7 +247,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             sum = 0
             self.stats.update({'allocated_capacity_gb': sum})
             for volume in volumes:
-                if volume['status'] in ['available', 'in-use']:
+                if volume['status'] in ['in-use']:
                     # calculate allocated capacity for driver
                     sum += volume['size']
                     self.stats['allocated_capacity_gb'] = sum
@@ -393,7 +393,6 @@ class VolumeManager(manager.SchedulerDependentManager):
         except exception.VolumeIsBusy:
             LOG.error(_("Cannot delete volume %s: volume is busy"),
                       volume_ref['id'])
-            self.driver.ensure_export(context, volume_ref)
             self.db.volume_update(context, volume_ref['id'],
                                   {'status': 'available'})
             return True
@@ -641,7 +640,6 @@ class VolumeManager(manager.SchedulerDependentManager):
     def detach_volume(self, context, volume_id):
         """Updates db to show volume is detached."""
         # TODO(vish): refactor this into a more general "unreserve"
-        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
 
         volume = self.db.volume_get(context, volume_id)
         self._notify_about_volume_usage(context, volume, "detach.start")
@@ -662,11 +660,29 @@ class VolumeManager(manager.SchedulerDependentManager):
         self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
                                              'attached_mode')
 
-        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
+        # NOTE(jdg): We used to do an ensure export here to
+        # catch upgrades while volumes were attached (E->F)
+        # this was necessary to convert in-use volumes from
+        # int ID's to UUID's.  Don't need this any longer
+
+        # We're going to remove the export here
+        # (delete the iscsi target)
         volume = self.db.volume_get(context, volume_id)
-        if (volume['provider_location'] and
-                volume['name'] not in volume['provider_location']):
-            self.driver.ensure_export(context, volume)
+        try:
+            utils.require_driver_initialized(self.driver)
+            LOG.debug(_("volume %s: removing export"), volume_id)
+            self.driver.remove_export(context, volume)
+        except exception.DriverNotInitialized as ex:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_("Error detaching volume %(volume)s, "
+                                "due to uninitialized driver."),
+                              {"volume": volume_id})
+        except Exception as ex:
+            LOG.exception(_("Error detaching volume %(volume)s, "
+                            "due to remove export failure."),
+                          {"volume": volume_id})
+            raise exception.RemoveExportException(volume=volume_id, reason=ex)
+
         self._notify_about_volume_usage(context, volume, "detach.end")
 
     def copy_volume_to_image(self, context, volume_id, image_meta):
@@ -684,7 +700,6 @@ class VolumeManager(manager.SchedulerDependentManager):
             utils.require_driver_initialized(self.driver)
 
             volume = self.db.volume_get(context, volume_id)
-            self.driver.ensure_export(context.elevated(), volume)
             image_service, image_id = \
                 glance.get_remote_image_service(context, image_meta['id'])
             self.driver.copy_volume_to_image(context, volume, image_service,
@@ -745,12 +760,34 @@ class VolumeManager(manager.SchedulerDependentManager):
         # before going forward. The exception will be caught
         # and the volume status updated.
         utils.require_driver_initialized(self.driver)
+        try:
+            self.driver.validate_connector(connector)
+        except Exception as err:
+            err_msg = (_('Unable to fetch connection information from '
+                         'backend: %(err)s') % {'err': str(err)})
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
 
         volume = self.db.volume_get(context, volume_id)
-        self.driver.validate_connector(connector)
+        model_update = None
+        try:
+            LOG.debug(_("Volume %s: creating export"), volume_id)
+            model_update = self.driver.create_export(context, volume)
+            if model_update:
+                volume = self.db.volume_update(context,
+                                               volume_id,
+                                               model_update)
+        except exception.CinderException as ex:
+            if model_update:
+                LOG.exception(_("Failed updating model of volume %(volume_id)s"
+                              " with driver provided model %(model)s") %
+                              {'volume_id': volume_id, 'model': model_update})
+                raise exception.ExportFailure(reason=ex)
+
         try:
             conn_info = self.driver.initialize_connection(volume, connector)
         except Exception as err:
+            self.driver.remove_export(context, volume)
             err_msg = (_('Unable to fetch connection information from '
                          'backend: %(err)s') % {'err': str(err)})
             LOG.error(err_msg)
index aaca9a1fbf00affcb01be661e8b6295f5e958325..01011527d69a0f2aae47ff45e48c840a070d9292 100644 (file)
@@ -47,6 +47,7 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
         1.11 - Adds mode parameter to attach_volume()
                to support volume read-only attaching.
         1.12 - Adds retype.
+        1.13 - Adds create_export.
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -195,3 +196,11 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
                                 reservations=reservations),
                   topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
                   version='1.12')
+
+    def create_export(self, ctxt, volume):
+        return self.call(ctxt, self.make_msg('create_export',
+                                             volume_id=volume['id']),
+                         topic=rpc.queue_get_for(ctxt,
+                                                 self.topic,
+                                                 volume['host']),
+                         version='1.13')