]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Support for consistency groups in ScaleIO driver
authorMatan Sabag <matan.sabag@emc.com>
Mon, 18 Jan 2016 14:35:25 +0000 (06:35 -0800)
committerMatan Sabag <matan.sabag@emc.com>
Sun, 14 Feb 2016 09:36:57 +0000 (01:36 -0800)
Add support for all consistency groups functionalities
in ScaleIO driver.
Also fixed a small mistake in a test regarding deleting a snapshot.

DocImpact
Implements: blueprint scaleio-consistency-groups
Change-Id: Id8b52aeb546f9f5fa68b98a4e59bd3f12e78bbef

cinder/tests/unit/volume/drivers/emc/scaleio/mocks.py
cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_snapshot.py
cinder/volume/drivers/emc/scaleio.py
releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml [new file with mode: 0644]

index 1e2c9e9b5dae9f65f6befffb58297ff922394f16..95fee182a1b254a9b90d64844d9d4cb7531be6a3 100644 (file)
@@ -49,10 +49,6 @@ class ScaleIODriver(scaleio.ScaleIODriver):
                                             *args,
                                             **kwargs)
 
-    def update_consistencygroup(self, context, group, add_volumes=None,
-                                remove_volumes=None):
-        pass
-
     def local_path(self, volume):
         pass
 
@@ -62,28 +58,12 @@ class ScaleIODriver(scaleio.ScaleIODriver):
     def promote_replica(self, context, volume):
         pass
 
-    def delete_consistencygroup(self, context, group, volumes):
-        pass
-
-    def create_consistencygroup_from_src(self, context, group, volumes,
-                                         cgsnapshot=None, snapshots=None):
-        pass
-
     def create_replica_test_volume(self, volume, src_vref):
         pass
 
-    def create_consistencygroup(self, context, group):
-        pass
-
     def unmanage(self, volume):
         pass
 
-    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
-        pass
-
-    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
-        pass
-
 
 class MockHTTPSResponse(requests.Response):
     """Mock HTTP Response
diff --git a/cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py b/cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py
new file mode 100644 (file)
index 0000000..45678b5
--- /dev/null
@@ -0,0 +1,209 @@
+# Copyright (c) 2013 - 2016 EMC Corporation.\r
+# All Rights Reserved.\r
+#\r
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may\r
+#    not use this file except in compliance with the License. You may obtain\r
+#    a copy of the License at\r
+#\r
+#         http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+#    Unless required by applicable law or agreed to in writing, software\r
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+#    License for the specific language governing permissions and limitations\r
+#    under the License.\r
+\r
+import json\r
+\r
+import mock\r
+\r
+from cinder import context\r
+from cinder.tests.unit import fake_consistencygroup\r
+from cinder.tests.unit import fake_snapshot\r
+from cinder.tests.unit import fake_volume\r
+from cinder.tests.unit.volume.drivers.emc import scaleio\r
+from cinder.tests.unit.volume.drivers.emc.scaleio import mocks\r
+\r
+\r
+class TestConsistencyGroups(scaleio.TestScaleIODriver):\r
+    """Test cases for ``ScaleIODriver consistency groups support``"""\r
+\r
+    def setUp(self):\r
+        """Setup a test case environment.\r
+\r
+        Creates a fake volume object and sets up the required API responses.\r
+        """\r
+        super(TestConsistencyGroups, self).setUp()\r
+        self.ctx = context.RequestContext('fake', 'fake', auth_token=True)\r
+        self.consistency_group = (\r
+            fake_consistencygroup.fake_consistencyobject_obj(self.ctx,\r
+                                                             **{'id': 'cgid'}))\r
+        fake_volume1 = fake_volume.fake_volume_obj(\r
+            self.ctx,\r
+            **{'id': 'volid1', 'provider_id': 'pid_1'})\r
+        fake_volume2 = fake_volume.fake_volume_obj(\r
+            self.ctx,\r
+            **{'id': 'volid2', 'provider_id': 'pid_2'})\r
+        fake_volume3 = fake_volume.fake_volume_obj(\r
+            self.ctx,\r
+            **{'id': 'volid3', 'provider_id': 'pid_3'})\r
+        fake_volume4 = fake_volume.fake_volume_obj(\r
+            self.ctx,\r
+            **{'id': 'volid4', 'provider_id': 'pid_4'})\r
+        self.volumes = [fake_volume1, fake_volume2]\r
+        self.volumes2 = [fake_volume3, fake_volume4]\r
+        fake_snapshot1 = fake_snapshot.fake_snapshot_obj(\r
+            self.ctx,\r
+            **{'id': 'snapid1', 'volume_id': 'volid1',\r
+               'volume': fake_volume1})\r
+        fake_snapshot2 = fake_snapshot.fake_snapshot_obj(\r
+            self.ctx,\r
+            **{'id': 'snapid2', 'volume_id': 'volid2', 'volume':\r
+                fake_volume2})\r
+        self.snapshots = [fake_snapshot1, fake_snapshot2]\r
+        self.snapshot_reply = json.dumps({\r
+            'volumeIdList': ['sid1', 'sid2'],\r
+            'snapshotGroupId': 'sgid1'})\r
+        self.HTTPS_MOCK_RESPONSES = {\r
+            self.RESPONSE_MODE.Valid: {\r
+                'instances/Volume::{}/action/removeVolume'.format(\r
+                    fake_volume1['provider_id']\r
+                ): fake_volume1['provider_id'],\r
+                'instances/Volume::{}/action/removeVolume'.format(\r
+                    fake_volume2['provider_id']\r
+                ): fake_volume2['provider_id'],\r
+                'instances/Volume::{}/action/removeMappedSdc'.format(\r
+                    fake_volume1['provider_id']\r
+                ): fake_volume1['provider_id'],\r
+                'instances/Volume::{}/action/removeMappedSdc'.format(\r
+                    fake_volume2['provider_id']\r
+                ): fake_volume2['provider_id'],\r
+                'instances/System/action/snapshotVolumes':\r
+                    self.snapshot_reply,\r
+            },\r
+            self.RESPONSE_MODE.BadStatus: {\r
+                'instances/Volume::{}/action/removeVolume'.format(\r
+                    fake_volume1['provider_id']\r
+                ): mocks.MockHTTPSResponse(\r
+                    {\r
+                        'errorCode': 401,\r
+                        'message': 'BadStatus Volume Test',\r
+                    }, 401\r
+                ),\r
+                'instances/Volume::{}/action/removeVolume'.format(\r
+                    fake_volume2['provider_id']\r
+                ): mocks.MockHTTPSResponse(\r
+                    {\r
+                        'errorCode': 401,\r
+                        'message': 'BadStatus Volume Test',\r
+                    }, 401\r
+                ),\r
+                'instances/System/action/snapshotVolumes':\r
+                    self.BAD_STATUS_RESPONSE\r
+            },\r
+        }\r
+\r
+    def _fake_cgsnapshot(self):\r
+        cgsnap = {'id': 'cgsid', 'name': 'testsnap',\r
+                  'consistencygroup_id': 'cgid', 'status': 'available'}\r
+        return cgsnap\r
+\r
+    def test_create_consistencygroup(self):\r
+        result = self.driver.create_consistencygroup(self.ctx,\r
+                                                     self.consistency_group)\r
+        self.assertEqual('available', result['status'])\r
+\r
+    def test_delete_consistencygroup_valid(self):\r
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)\r
+        self.driver.configuration.set_override(\r
+            'sio_unmap_volume_before_deletion',\r
+            override=True)\r
+        result_model_update, result_volumes_update = (\r
+            self.driver.delete_consistencygroup(self.ctx,\r
+                                                self.consistency_group,\r
+                                                self.volumes))\r
+        self.assertTrue(all(volume['status'] == 'deleted' for volume in\r
+                            result_volumes_update))\r
+        self.assertEqual('deleted', result_model_update['status'])\r
+\r
+    def test_delete_consistency_group_fail(self):\r
+        self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)\r
+        result_model_update, result_volumes_update = (\r
+            self.driver.delete_consistencygroup(self.ctx,\r
+                                                self.consistency_group,\r
+                                                self.volumes))\r
+        self.assertTrue(any(volume['status'] == 'error_deleting' for volume in\r
+                            result_volumes_update))\r
+        self.assertTrue(result_model_update['status'] in ['error_deleting',\r
+                                                          'error'])\r
+\r
+    def test_create_consistencygroup_from_cg(self):\r
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)\r
+        result_model_update, result_volumes_model_update = (\r
+            self.driver.create_consistencygroup_from_src(\r
+                self.ctx, self.consistency_group, self.volumes2,\r
+                source_cg=self.consistency_group, source_vols=self.volumes))\r
+        self.assertEqual('available', result_model_update['status'])\r
+        get_pid = lambda snapshot: snapshot['provider_id']\r
+        volume_provider_list = list(map(get_pid, result_volumes_model_update))\r
+        self.assertListEqual(volume_provider_list, ['sid1', 'sid2'])\r
+\r
+    def test_create_consistencygroup_from_cgs(self):\r
+        self.snapshots[0]['provider_id'] = 'pid_1'\r
+        self.snapshots[1]['provider_id'] = 'pid_2'\r
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)\r
+        result_model_update, result_volumes_model_update = (\r
+            self.driver.create_consistencygroup_from_src(\r
+                self.ctx, self.consistency_group, self.volumes2,\r
+                cgsnapshot=self._fake_cgsnapshot(),\r
+                snapshots=self.snapshots))\r
+        self.assertEqual('available', result_model_update['status'])\r
+        get_pid = lambda snapshot: snapshot['provider_id']\r
+        volume_provider_list = list(map(get_pid, result_volumes_model_update))\r
+        self.assertListEqual(['sid1', 'sid2'], volume_provider_list)\r
+\r
+    @mock.patch('cinder.objects.snapshot')\r
+    @mock.patch('cinder.objects.snapshot')\r
+    def test_create_cgsnapshots(self, snapshot1, snapshot2):\r
+        type(snapshot1).volume = mock.PropertyMock(\r
+            return_value=self.volumes[0])\r
+        type(snapshot2).volume = mock.PropertyMock(\r
+            return_value=self.volumes[1])\r
+        snapshots = [snapshot1, snapshot2]\r
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)\r
+        result_model_update, result_snapshot_model_update = (\r
+            self.driver.create_cgsnapshot(\r
+                self.ctx,\r
+                self._fake_cgsnapshot(),\r
+                snapshots\r
+            ))\r
+        self.assertEqual('available', result_model_update['status'])\r
+        self.assertTrue(all(snapshot['status'] == 'available' for snapshot in\r
+                            result_snapshot_model_update))\r
+        get_pid = lambda snapshot: snapshot['provider_id']\r
+        snapshot_provider_list = list(map(get_pid,\r
+                                          result_snapshot_model_update))\r
+        self.assertListEqual(['sid1', 'sid2'], snapshot_provider_list)\r
+\r
+    @mock.patch('cinder.objects.snapshot')\r
+    @mock.patch('cinder.objects.snapshot')\r
+    def test_delete_cgsnapshots(self, snapshot1, snapshot2):\r
+        type(snapshot1).volume = mock.PropertyMock(\r
+            return_value=self.volumes[0])\r
+        type(snapshot2).volume = mock.PropertyMock(\r
+            return_value=self.volumes[1])\r
+        type(snapshot1).provider_id = mock.PropertyMock(\r
+            return_value='pid_1')\r
+        type(snapshot2).provider_id = mock.PropertyMock(\r
+            return_value='pid_2')\r
+        snapshots = [snapshot1, snapshot2]\r
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)\r
+        result_model_update, result_snapshot_model_update = (\r
+            self.driver.delete_cgsnapshot(\r
+                self.ctx,\r
+                self._fake_cgsnapshot(),\r
+                snapshots\r
+            ))\r
+        self.assertEqual('deleted', result_model_update['status'])\r
+        self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in\r
+                            result_snapshot_model_update))\r
index 106bd54a06c1b78758bd7e0272caa2d0c2c28218..5c8c72c5e12b3f77f2a034c49b61291532645bee 100644 (file)
@@ -88,9 +88,8 @@ class TestDeleteSnapShot(scaleio.TestScaleIODriver):
         self.driver.delete_snapshot(self.snapshot)
 
     def test_delete_invalid_snapshot(self):
-        self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.delete_snapshot, self.snapshot)
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)
+        self.driver.delete_snapshot(self.snapshot)
 
     def test_delete_snapshot(self):
         """Setting the unmap volume before delete flag for tests """
index 76bf97b00eb41dda586413e5ebb48288b281222d..1772ab614440306f3394180eaa2d71015c152403 100644 (file)
@@ -30,7 +30,7 @@ from six.moves import urllib
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LI, _LW
+from cinder.i18n import _, _LI, _LW, _LE
 from cinder.image import image_utils
 from cinder import utils
 from cinder.volume import driver
@@ -239,11 +239,11 @@ class ScaleIODriver(driver.VolumeDriver):
         extraspecs_limit = storage_type.get(extraspecs_key)
         if extraspecs_limit is not None:
             if qos_limit is not None:
-                LOG.warning(_LW("QoS specs are overriding extraspecs"))
+                LOG.warning(_LW("QoS specs are overriding extra_specs."))
             else:
-                LOG.info(_LI("Using extraspecs for defining QoS specs "
-                             "will be deprecated in the next "
-                             "version of OpenStack, please use QoS specs"))
+                LOG.info(_LI("Using extra_specs for defining QoS specs "
+                             "will be deprecated in the N release "
+                             "of OpenStack. Please use QoS specs."))
         return qos_limit if qos_limit is not None else extraspecs_limit
 
     def _id_to_base64(self, id):
@@ -447,16 +447,7 @@ class ScaleIODriver(driver.VolumeDriver):
                     'server_port': self.server_port}
         request = ("https://%(server_ip)s:%(server_port)s"
                    "/api/instances/System/action/snapshotVolumes") % req_vars
-        r = requests.post(
-            request,
-            data=json.dumps(params),
-            headers=self._get_headers(),
-            auth=(
-                self.server_username,
-                self.server_token),
-            verify=self._get_verify_cert())
-        r = self._check_response(r, request, False, params)
-        response = r.json()
+        r, response = self._execute_scaleio_post_request(params, request)
         LOG.info(_LI("Snapshot volume response: %s."), response)
         if r.status_code != OK_STATUS_CODE and "errorCode" in response:
             msg = (_("Failed creating snapshot for volume %(volname)s: "
@@ -468,6 +459,19 @@ class ScaleIODriver(driver.VolumeDriver):
 
         return {'provider_id': response['volumeIdList'][0]}
 
+    def _execute_scaleio_post_request(self, params, request):
+        r = requests.post(
+            request,
+            data=json.dumps(params),
+            headers=self._get_headers(),
+            auth=(
+                self.server_username,
+                self.server_token),
+            verify=self._get_verify_cert())
+        r = self._check_response(r, request, False, params)
+        response = r.json()
+        return r, response
+
     def _check_response(self, response, request, is_get_request=True,
                         params=None):
         if response.status_code == 401 or response.status_code == 403:
@@ -713,6 +717,7 @@ class ScaleIODriver(driver.VolumeDriver):
         stats['free_capacity_gb'] = 'unknown'
         stats['reserved_percentage'] = 0
         stats['QoS_support'] = True
+        stats['consistencygroup_support'] = True
 
         pools = []
 
@@ -829,6 +834,7 @@ class ScaleIODriver(driver.VolumeDriver):
                     'total_capacity_gb': total_capacity_gb,
                     'free_capacity_gb': free_capacity_gb,
                     'QoS_support': True,
+                    'consistencygroup_support': True,
                     'reserved_percentage': 0
                     }
 
@@ -1024,9 +1030,9 @@ class ScaleIODriver(driver.VolumeDriver):
         LOG.info(_LI("Get Volume response: %s"), response)
         self._manage_existing_check_legal_response(r, existing_ref)
         if response['mappedSdcInfo'] is not None:
-            reason = ("manage_existing cannot manage a volume "
-                      "connected to hosts. Please disconnect this volume "
-                      "from existing hosts before importing")
+            reason = _("manage_existing cannot manage a volume "
+                       "connected to hosts. Please disconnect this volume "
+                       "from existing hosts before importing")
             raise exception.ManageExistingInvalidReference(
                 existing_ref=existing_ref,
                 reason=reason
@@ -1087,6 +1093,138 @@ class ScaleIODriver(driver.VolumeDriver):
                 reason=reason
             )
 
+    def create_consistencygroup(self, context, group):
+        """Creates a consistency group.
+
+        ScaleIO won't create CG until cg-snapshot creation,
+        db will maintain the volumes and CG relationship.
+        """
+        LOG.info(_LI("Creating Consistency Group"))
+        model_update = {'status': 'available'}
+        return model_update
+
+    def delete_consistencygroup(self, context, group, volumes):
+        """Deletes a consistency group.
+
+        ScaleIO will delete the volumes of the CG.
+        """
+        LOG.info(_LI("Deleting Consistency Group"))
+        model_update = {'status': 'deleted'}
+        error_statuses = ['error', 'error_deleting']
+        volumes_model_update = []
+        for volume in volumes:
+            try:
+                self._delete_volume(volume['provider_id'])
+                update_item = {'id': volume['id'],
+                               'status': 'deleted'}
+                volumes_model_update.append(update_item)
+            except exception.VolumeBackendAPIException as err:
+                update_item = {'id': volume['id'],
+                               'status': 'error_deleting'}
+                volumes_model_update.append(update_item)
+                if model_update['status'] not in error_statuses:
+                    model_update['status'] = 'error_deleting'
+                LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
+                              "Exception: %(exception)s."),
+                          {'vol': volume['name'], 'exception': err})
+        return model_update, volumes_model_update
+
+    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
+        """Creates a cgsnapshot."""
+        get_scaleio_snapshot_params = lambda snapshot: {
+            'volumeId': snapshot.volume['provider_id'],
+            'snapshotName': self._id_to_base64(snapshot['id'])}
+        snapshotDefs = list(map(get_scaleio_snapshot_params, snapshots))
+        r, response = self._snapshot_volume_group(snapshotDefs)
+        LOG.info(_LI("Snapshot volume response: %s."), response)
+        if r.status_code != OK_STATUS_CODE and "errorCode" in response:
+            msg = (_("Failed creating snapshot for group: "
+                     "%(response)s.") %
+                   {'response': response['message']})
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+        snapshot_model_update = []
+        for snapshot, scaleio_id in zip(snapshots, response['volumeIdList']):
+            update_item = {'id': snapshot['id'],
+                           'status': 'available',
+                           'provider_id': scaleio_id}
+            snapshot_model_update.append(update_item)
+        model_update = {'status': 'available'}
+        return model_update, snapshot_model_update
+
+    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
+        """Deletes a cgsnapshot."""
+        error_statuses = ['error', 'error_deleting']
+        model_update = {'status': cgsnapshot['status']}
+        snapshot_model_update = []
+        for snapshot in snapshots:
+            try:
+                self._delete_volume(snapshot.provider_id)
+                update_item = {'id': snapshot['id'],
+                               'status': 'deleted'}
+                snapshot_model_update.append(update_item)
+            except exception.VolumeBackendAPIException as err:
+                update_item = {'id': snapshot['id'],
+                               'status': 'error_deleting'}
+                snapshot_model_update.append(update_item)
+                if model_update['status'] not in error_statuses:
+                    model_update['status'] = 'error_deleting'
+                LOG.error(_LE("Failed to delete the snapshot %(snap)s "
+                              "of cgsnapshot: %(cgsnapshot_id)s. "
+                              "Exception: %(exception)s."),
+                          {'snap': snapshot['name'],
+                           'exception': err,
+                           'cgsnapshot_id': cgsnapshot.id})
+        model_update['status'] = 'deleted'
+        return model_update, snapshot_model_update
+
+    def create_consistencygroup_from_src(self, context, group, volumes,
+                                         cgsnapshot=None, snapshots=None,
+                                         source_cg=None, source_vols=None):
+        """Creates a consistency group from a source."""
+        get_scaleio_snapshot_params = lambda src_volume, trg_volume: {
+            'volumeId': src_volume['provider_id'],
+            'snapshotName': self._id_to_base64(trg_volume['id'])}
+        if cgsnapshot and snapshots:
+            snapshotDefs = map(get_scaleio_snapshot_params, snapshots, volumes)
+        else:
+            snapshotDefs = map(get_scaleio_snapshot_params, source_vols,
+                               volumes)
+        r, response = self._snapshot_volume_group(list(snapshotDefs))
+        LOG.info(_LI("Snapshot volume response: %s."), response)
+        if r.status_code != OK_STATUS_CODE and "errorCode" in response:
+            msg = (_("Failed creating snapshot for group: "
+                     "%(response)s.") %
+                   {'response': response['message']})
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+        volumes_model_update = []
+        for volume, scaleio_id in zip(volumes, response['volumeIdList']):
+            update_item = {'id': volume['id'],
+                           'status': 'available',
+                           'provider_id': scaleio_id}
+            volumes_model_update.append(update_item)
+        model_update = {'status': 'available'}
+        return model_update, volumes_model_update
+
+    def update_consistencygroup(self, context, group,
+                                add_volumes=None, remove_volumes=None):
+        """Update a consistency group.
+
+        ScaleIO does not handle volume grouping.
+        Cinder maintains volumes and CG relationship.
+        """
+        return None, None, None
+
+    def _snapshot_volume_group(self, snapshotDefs):
+        LOG.info(_LI("ScaleIO snapshot group of volumes"))
+        params = {'snapshotDefs': snapshotDefs}
+        req_vars = {'server_ip': self.server_ip,
+                    'server_port': self.server_port}
+        request = ("https://%(server_ip)s:%(server_port)s"
+                   "/api/instances/System/action/snapshotVolumes") % req_vars
+        return self._execute_scaleio_post_request(params, request)
+
     def ensure_export(self, context, volume):
         """Driver entry point to get the export info for an existing volume."""
         pass
diff --git a/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml b/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml
new file mode 100644 (file)
index 0000000..e6bf08d
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add Consistency Group support in ScaleIO driver.