]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
XtremIO: fix generic glance cache with XtremIO
authorShay Halsband <shay.halsband@emc.com>
Thu, 19 Nov 2015 13:51:31 +0000 (15:51 +0200)
committerShay Halsband <shay.halsband@emc.com>
Thu, 17 Dec 2015 14:35:50 +0000 (16:35 +0200)
* Identify the apropriate error for snapshot limit from the array
  and raise the right error to invalidate the cache.
* Add an optional limit to the number of snapshot taken from the
  volume representing a cached image
* Add unittest for snapshot limit

DocImpact

Change-Id: Ic15257c9409373a4883870681d1412161cd3437c
Closes-Bug: #1519843

cinder/exception.py
cinder/tests/unit/test_emc_xtremio.py
cinder/volume/drivers/emc/xtremio.py

index b534453759102b64feaf1fa5e43f9cca8e1e2c98..ba7bd9265a3314ad3221f5342ac40447be9a8c67 100644 (file)
@@ -963,6 +963,10 @@ class XtremIOArrayBusy(CinderException):
     message = _("System is busy, retry operation.")
 
 
+class XtremIOSnapshotsLimitExceeded(CinderException):
+    message = _("Exceeded the limit of snapshots per volume")
+
+
 # Infortrend EonStor DS Driver
 class InfortrendCliException(CinderException):
     message = _("Infortrend CLI exception: %(err)s Param: %(param)s "
index 1dac8aeb3a90276e42c69b4746b7ce3919e07f4f..fb2a032c4bc4ed144e76d24723b7d79f0f105d9f 100644 (file)
@@ -296,9 +296,11 @@ class EMCXIODriverISCSITestCase(test.TestCase):
         config.san_ip = ''
         config.xtremio_cluster_name = 'brick1'
         config.xtremio_provisioning_factor = 20.0
+        config.max_over_subscription_ratio = 20.0
+        config.xtremio_volumes_per_glance_cache = 100
 
         def safe_get(key):
-            getattr(config, key)
+            return getattr(config, key)
 
         config.safe_get = safe_get
         self.driver = xtremio.XtremIOISCSIDriver(configuration=config)
@@ -351,12 +353,49 @@ class EMCXIODriverISCSITestCase(test.TestCase):
 
     def test_clone_volume(self, req):
         req.side_effect = xms_request
+        self.driver.db = mock.Mock()
+        (self.driver.db.
+         image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock()
         self.driver.create_volume(self.data.test_volume)
+        vol = xms_data['volumes'][1]
+        vol['num-of-dest-snaps'] = 200
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_clone,
+                          self.data.test_volume)
+
+        vol['num-of-dest-snaps'] = 50
         self.driver.create_cloned_volume(self.data.test_clone,
                                          self.data.test_volume)
         self.driver.delete_volume(self.data.test_clone)
         self.driver.delete_volume(self.data.test_volume)
 
+        mock.patch.object(self.driver.client,
+                          'create_snapshot',
+                          mock.Mock(side_effect=
+                                    exception.XtremIOSnapshotsLimitExceeded()))
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_clone,
+                          self.data.test_volume)
+
+        response = mock.MagicMock()
+        response.status_code = 400
+        response.json.return_value = {
+            "message": "too_many_snapshots_per_vol",
+            "error_code": 400
+        }
+        self.assertRaises(exception.XtremIOSnapshotsLimitExceeded,
+                          self.driver.client.handle_errors,
+                          response, '', '')
+        response.json.return_value = {
+            "message": "too_many_objs",
+            "error_code": 400
+        }
+        self.assertRaises(exception.XtremIOSnapshotsLimitExceeded,
+                          self.driver.client.handle_errors,
+                          response, '', '')
+
     def test_duplicate_volume(self, req):
         req.side_effect = xms_request
         self.driver.create_volume(self.data.test_volume)
index 80cc5fdbf1bcc878184838cd2101d8533306dc08..8c1d2df25916c4b43c9bc2f24c33cadbf060bdc1 100644 (file)
@@ -24,6 +24,7 @@ supported XtremIO version 2.4 and up
 1.0.5 - add support for XtremIO 4.0
 1.0.6 - add support for iSCSI multipath, CA validation, consistency groups,
         R/O snapshots, CHAP discovery authentication
+1.0.7 - cache glance images on the array
 """
 
 import json
@@ -37,6 +38,7 @@ from oslo_log import log as logging
 from oslo_utils import units
 import six
 
+from cinder import context
 from cinder import exception
 from cinder.i18n import _, _LE, _LI, _LW
 from cinder import objects
@@ -59,7 +61,10 @@ XTREMIO_OPTS = [
                help='Number of retries in case array is busy'),
     cfg.IntOpt('xtremio_array_busy_retry_interval',
                default=5,
-               help='Interval between retries in case array is busy')]
+               help='Interval between retries in case array is busy'),
+    cfg.IntOpt('xtremio_volumes_per_glance_cache',
+               default=100,
+               help='Number of volumes created from each cached glance image')]
 
 CONF.register_opts(XTREMIO_OPTS)
 
@@ -69,6 +74,9 @@ VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique'
 VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found'
 ALREADY_MAPPED_ERR = 'already_mapped'
 SYSTEM_BUSY = 'system_is_busy'
+TOO_MANY_OBJECTS = 'too_many_objs'
+TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol'
+
 
 XTREMIO_OID_NAME = 1
 XTREMIO_OID_INDEX = 2
@@ -157,6 +165,8 @@ class XtremIOClient(object):
                 raise exception.XtremIOAlreadyMappedError()
             elif err_msg == SYSTEM_BUSY:
                 raise exception.XtremIOArrayBusy()
+            elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL):
+                raise exception.XtremIOSnapshotsLimitExceeded()
         msg = _('Bad response from XMS, %s') % response.text
         LOG.error(msg)
         raise exception.VolumeBackendAPIException(message=msg)
@@ -336,7 +346,7 @@ class XtremIOClient4(XtremIOClient):
 class XtremIOVolumeDriver(san.SanDriver):
     """Executes commands relating to Volumes."""
 
-    VERSION = '1.0.6'
+    VERSION = '1.0.7'
     driver_name = 'XtremIO'
     MIN_XMS_VERSION = [3, 0, 0]
 
@@ -410,7 +420,18 @@ class XtremIOVolumeDriver(san.SanDriver):
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
-        self.client.create_snapshot(src_vref['id'], volume['id'])
+        vol = self.client.req('volumes', name=src_vref['id'])['content']
+        ctxt = context.get_admin_context()
+        cache = self.db.image_volume_cache_get_by_volume_id(ctxt,
+                                                            src_vref['id'])
+        limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache')
+        if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']:
+            raise exception.CinderException('Exceeded the configured limit of '
+                                            '%d snapshots per volume' % limit)
+        try:
+            self.client.create_snapshot(src_vref['id'], volume['id'])
+        except exception.XtremIOSnapshotsLimitExceeded as e:
+            raise exception.CinderException(e.message)
 
         if volume.get('consistencygroup_id') and self.client is XtremIOClient4:
             self.client.add_vol_to_cg(volume['id'],