]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add patch for consistency group update in ProphetStor driver
authorrick.chen <rick.chen@prophetstor.com>
Wed, 1 Apr 2015 07:46:58 +0000 (15:46 +0800)
committerrick.chen <rick.chen@prophetstor.com>
Tue, 12 May 2015 14:00:40 +0000 (22:00 +0800)
BP consistency-groups-kilo-update has introduced CG Modification
support. This patch is adding support for the new API in
ProphetStor driver.

This patch also included styling changes which replaced \ with
().

Change-Id: I256f320d2563b247a2493301638a7699b66a83b6
Implements: blueprint cg-modified-prophetstor

cinder/tests/unit/test_prophetstor_dpl.py
cinder/volume/drivers/prophetstor/dplcommon.py

index e98336ea14640d06e38ffa1cfd3344ed95ca456d..42aa199f08e1c3bab919e13678478ad43eb4767b 100644 (file)
@@ -114,6 +114,15 @@ DATA_IN_VOLUME_VG = {'id': 'abc123',
                      'status': 'available',
                      'host': "hostname@backend#%s" % POOLUUID}
 
+DATA_IN_REMOVE_VOLUME_VG = {
+    'id': 'fe2dbc515810451dab2f8c8a48d15bee',
+    'display_name': 'fe2dbc515810451dab2f8c8a48d15bee',
+    'display_description': '',
+    'size': 1,
+    'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
+    'status': 'available',
+    'host': "hostname@backend#%s" % POOLUUID}
+
 DATA_IN_VOLUME1 = {'id': 'abc456',
                    'display_name': 'abc456',
                    'display_description': '',
@@ -139,6 +148,42 @@ DATA_OUT_SNAPSHOT_CG = {
     'display_description': '',
     'cgsnapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'}
 
+DATA_OUT_CG = {
+    "objectType": "application/cdmi-container",
+    "objectID": "fe2dbc515810451dab2f8c8a48d15bee",
+    "objectName": "<new_volume_group_uuid>",
+    "parentURI": "/dpl_volgroup",
+    "parentID": "fe2dbc515810451dab2f8c8a48d15bee",
+    "domainURI": "",
+    "capabilitiesURI": "",
+    "completionStatus": "Complete",
+    "percentComplete": 100,
+    "metadata":
+    {
+        "type": "volume|snapshot|replica",
+        "volume_group_uuid": "<volume_group_uuid>",
+        "origin_uuid": "<origin_uuid>",
+        "snapshot_uuid": "<snapshot_uuid>",
+        "display_name": "<display name>",
+        "display_description": "<display description>",
+        "ctime": 12345678,
+        "total_capacity": 1024,
+        "snapshot_used_capacity": 0,
+        "maximum_snapshot": 1024,
+        "snapshot_quota": 0,
+        "state": "<state>",
+        "properties":
+        {
+            "snapshot_rotation": True,
+        }
+    },
+    "childrenrange": "<range>",
+    "children":
+    [
+        "fe2dbc515810451dab2f8c8a48d15bee",
+    ],
+}
+
 
 class TestProphetStorDPLVolume(test.TestCase):
 
@@ -290,8 +335,8 @@ class TestProphetStorDPLVolume(test.TestCase):
         metadata = {}
         params = {}
         metadata['display_name'] = DATA_IN_SNAPSHOT['display_name']
-        metadata['display_description'] = \
-            DATA_IN_SNAPSHOT['display_description']
+        metadata['display_description'] = (
+            DATA_IN_SNAPSHOT['display_description'])
         params['metadata'] = metadata
         params['snapshot'] = DATA_IN_SNAPSHOT['id']
 
@@ -640,8 +685,8 @@ class TestProphetStorDPLDriver(test.TestCase):
         self.assertDictMatch({'status': 'available'}, model_update)
 
     def test_delete_consistency_group(self):
-        self.DB_MOCK.volume_get_all_by_group.return_value = \
-            [DATA_IN_VOLUME_VG]
+        self.DB_MOCK.volume_get_all_by_group.return_value = (
+            [DATA_IN_VOLUME_VG])
         self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
         self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT
         model_update, volumes = self.dpldriver.delete_consistencygroup(
@@ -652,17 +697,59 @@ class TestProphetStorDPLDriver(test.TestCase):
             self._conver_uuid2hex((DATA_IN_VOLUME_VG['id'])))
         self.assertDictMatch({'status': 'deleted'}, model_update, )
 
+    def test_update_consistencygroup(self):
+        self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+        self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
+        self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
+        add_vol = DATA_IN_VOLUME_VG
+        remove_vol = DATA_IN_REMOVE_VOLUME_VG
+        (model_update, add_vols, remove_vols) = (
+            self.dpldriver.update_consistencygroup(self.context,
+                                                   DATA_IN_GROUP,
+                                                   [add_vol],
+                                                   [remove_vol]))
+        self.DPL_MOCK.join_vg.assert_called_once_with(
+            self._conver_uuid2hex(add_vol['id']),
+            self._conver_uuid2hex(DATA_IN_GROUP['id']))
+        self.DPL_MOCK.leave_vg.assert_called_once_with(
+            self._conver_uuid2hex(remove_vol['id']),
+            self._conver_uuid2hex(DATA_IN_GROUP['id']))
+        self.assertDictMatch({'status': 'available'}, model_update)
+
+    def test_update_consistencygroup_exception_join(self):
+        self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+        self.DPL_MOCK.join_vg.return_value = -1, None
+        self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
+        add_vol = DATA_IN_VOLUME_VG
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.dpldriver.update_consistencygroup,
+                          context=None,
+                          group=DATA_IN_GROUP,
+                          add_volumes=[add_vol],
+                          remove_volumes=None)
+
+    def test_update_consistencygroup_exception_leave(self):
+        self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+        self.DPL_MOCK.leave_vg.return_value = -1, None
+        remove_vol = DATA_IN_REMOVE_VOLUME_VG
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.dpldriver.update_consistencygroup,
+                          context=None,
+                          group=DATA_IN_GROUP,
+                          add_volumes=None,
+                          remove_volumes=[remove_vol])
+
     def test_create_consistency_group_snapshot(self):
-        self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
-            [DATA_OUT_SNAPSHOT_CG]
+        self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = (
+            [DATA_OUT_SNAPSHOT_CG])
         self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
         model_update, snapshots = self.dpldriver.create_cgsnapshot(
             self.context, DATA_IN_CG_SNAPSHOT)
         self.assertDictMatch({'status': 'available'}, model_update)
 
     def test_delete_consistency_group_snapshot(self):
-        self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
-            [DATA_OUT_SNAPSHOT_CG]
+        self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = (
+            [DATA_OUT_SNAPSHOT_CG])
         self.DPL_MOCK.delete_cgsnapshot.return_value = DATA_OUTPUT
         model_update, snapshots = self.dpldriver.delete_cgsnapshot(
             self.context, DATA_IN_CG_SNAPSHOT)
index b58980b7acfc0ff04f4feddd1ea926f539409100..5f78982f86ac7912f1bd5a7455442e8cb22ad7a0 100644 (file)
@@ -16,6 +16,7 @@
 Implementation of the class of ProphetStor DPL storage adapter of Federator.
     # v2.0.1 Consistency group support
     # v2.0.2 Pool aware scheduler
+    # v2.0.3 Consistency group modification support
 """
 
 import base64
@@ -158,8 +159,8 @@ class DPLCommand(object):
                     retcode = errno.EFAULT
                     break
 
-        if retcode == 0 and response.status in expected_status and\
-                response.status == httplib.NOT_FOUND:
+        if (retcode == 0 and response.status in expected_status
+                and response.status == httplib.NOT_FOUND):
             retcode = errno.ENODATA
         elif retcode == 0 and response.status not in expected_status:
             LOG.error(_LE('%(method)s %(url)s unexpected response status: '
@@ -188,9 +189,9 @@ class DPLCommand(object):
                 LOG.error(_LE('Read response raised an exception: %s.'),
                           e)
                 retcode = errno.ENOEXEC
-        elif retcode == 0 and \
-                response.status in [httplib.OK, httplib.CREATED] and \
-                httplib.NO_CONTENT not in expected_status:
+        elif (retcode == 0 and
+                response.status in [httplib.OK, httplib.CREATED] and
+                httplib.NO_CONTENT not in expected_status):
             try:
                 data = response.read()
                 data = json.loads(data)
@@ -682,8 +683,8 @@ class DPLVolume(object):
 
 
 class DPLCOMMONDriver(driver.VolumeDriver):
-    """class of dpl storage adapter."""
-    VERSION = '2.0.2'
+    """Class of dpl storage adapter."""
+    VERSION = '2.0.3'
 
     def __init__(self, *args, **kwargs):
         super(DPLCOMMONDriver, self).__init__(*args, **kwargs)
@@ -714,10 +715,10 @@ class DPLCOMMONDriver(driver.VolumeDriver):
         ret = 0
         event_uuid = ""
 
-        if type(output) is dict and \
-                output.get("metadata") and output["metadata"]:
-            if output["metadata"].get("event_uuid") and  \
-                    output["metadata"]["event_uuid"]:
+        if (type(output) is dict and
+                output.get("metadata") and output["metadata"]):
+            if (output["metadata"].get("event_uuid") and
+                    output["metadata"]["event_uuid"]):
                 event_uuid = output["metadata"]["event_uuid"]
             else:
                 ret = errno.EINVAL
@@ -773,9 +774,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
                 break
         return status
 
-    def _join_volume_group(self, volume):
+    def _join_volume_group(self, volume, cgId):
         # Join volume group if consistency group id not empty
-        cgId = volume['consistencygroup_id']
         msg = ''
         try:
             ret, output = self.dpl.join_vg(
@@ -797,6 +797,29 @@ class DPLCOMMONDriver(driver.VolumeDriver):
                          'group %(cgid)s.'),
                      {'id': volume['id'], 'cgid': cgId})
 
+    def _leave_volume_group(self, volume, cgId):
+        # Leave volume group if consistency group id not empty
+        msg = ''
+        try:
+            ret, output = self.dpl.leave_vg(
+                self._conver_uuid2hex(volume['id']),
+                self._conver_uuid2hex(cgId))
+        except Exception as e:
+            ret = errno.EFAULT
+            msg = _('Fexvisor failed to remove volume %(id)s '
+                    'due to %(reason)s.') % {"id": volume['id'],
+                                             "reason": six.text_type(e)}
+        if ret:
+            if not msg:
+                msg = _('Flexvisor failed to remove volume %(id)s '
+                        'from group %(cgid)s.') % {'id': volume['id'],
+                                                   'cgid': cgId}
+            raise exception.VolumeBackendAPIException(data=msg)
+        else:
+            LOG.info(_LI('Flexvisor succeeded to remove volume %(id)s from '
+                         'group %(cgid)s.'),
+                     {'id': volume['id'], 'cgid': cgId})
+
     def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID):
         snapshotID = None
         ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True)
@@ -937,6 +960,51 @@ class DPLCOMMONDriver(driver.VolumeDriver):
         model_update['status'] = 'deleted'
         return model_update, snapshots
 
+    def update_consistencygroup(self, context, group, add_volumes=None,
+                                remove_volumes=None):
+        addvollist = []
+        removevollist = []
+        cgid = group['id']
+        vid = ''
+        model_update = {'status': 'available'}
+        # Get current group info in backend storage.
+        ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid))
+        if ret == 0:
+            group_members = output.get('children', [])
+
+        if add_volumes:
+            addvollist = add_volumes
+        if remove_volumes:
+            removevollist = remove_volumes
+
+        # Process join volumes.
+        try:
+            for volume in addvollist:
+                vid = volume['id']
+                # Verify the volume exists in the group or not.
+                if self._conver_uuid2hex(vid) in group_members:
+                    continue
+                self._join_volume_group(volume, cgid)
+        except exception as e:
+            msg = _("Fexvisor failed to join the volume %(vol)s in the "
+                    "group %(group)s due to "
+                    "%(ret)s.") % {"vol": vid, "group": cgid,
+                                   "ret": six.text_type(e)}
+            raise exception.VolumeBackendAPIException(data=msg)
+        # Process leave volumes.
+        try:
+            for volume in removevollist:
+                vid = volume['id']
+                if self._conver_uuid2hex(vid) in group_members:
+                    self._leave_volume_group(volume, cgid)
+        except exception as e:
+            msg = _("Fexvisor failed to remove the volume %(vol)s in the "
+                    "group %(group)s due to "
+                    "%(ret)s.") % {"vol": vid, "group": cgid,
+                                   "ret": six.text_type(e)}
+            raise exception.VolumeBackendAPIException(data=msg)
+        return model_update, None, None
+
     def create_volume(self, volume):
         """Create a volume."""
         pool = volume_utils.extract_host(volume['host'],
@@ -983,7 +1051,7 @@ class DPLCOMMONDriver(driver.VolumeDriver):
 
         if volume.get('consistencygroup_id', None):
             try:
-                self._join_volume_group(volume)
+                self._join_volume_group(volume, volume['consistencygroup_id'])
             except Exception:
                 # Delete volume if volume failed to join group.
                 self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
@@ -1066,7 +1134,7 @@ class DPLCOMMONDriver(driver.VolumeDriver):
 
         if volume.get('consistencygroup_id', None):
             try:
-                self._join_volume_group(volume)
+                self._join_volume_group(volume, volume['consistencygroup_id'])
             except Exception:
                 # Delete volume if volume failed to join group.
                 self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
@@ -1154,7 +1222,7 @@ class DPLCOMMONDriver(driver.VolumeDriver):
 
         if volume.get('consistencygroup_id', None):
             try:
-                self._join_volume_group(volume)
+                self._join_volume_group(volume, volume['consistencygroup_id'])
             except Exception:
                 # Delete volume if volume failed to join group.
                 self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
@@ -1252,14 +1320,14 @@ class DPLCOMMONDriver(driver.VolumeDriver):
                                           snapshot['volume_id'],
                                           event_uuid)
                 if status['state'] != 'available':
-                    msg = _('Flexvisor failed to create snapshot for volume '
-                            '%(id)s: %(status)s.') % \
-                        {'id': snapshot['volume_id'], 'status': ret}
+                    msg = (_('Flexvisor failed to create snapshot for volume '
+                             '%(id)s: %(status)s.') %
+                           {'id': snapshot['volume_id'], 'status': ret})
                     raise exception.VolumeBackendAPIException(data=msg)
             else:
-                msg = _('Flexvisor failed to create snapshot for volume '
-                        '(failed to get event) %(id)s.') % \
-                    {'id': snapshot['volume_id']}
+                msg = (_('Flexvisor failed to create snapshot for volume '
+                         '(failed to get event) %(id)s.') %
+                       {'id': snapshot['volume_id']})
                 raise exception.VolumeBackendAPIException(data=msg)
         elif ret != 0:
             msg = _('Flexvisor failed to create snapshot for volume %(id)s: '
@@ -1334,15 +1402,15 @@ class DPLCOMMONDriver(driver.VolumeDriver):
             if ret == 0:
                 pool = {}
                 pool['pool_name'] = output['metadata']['pool_uuid']
-                pool['total_capacity_gb'] = \
+                pool['total_capacity_gb'] = (
                     self._convert_size_GB(
-                        int(output['metadata']['total_capacity']))
-                pool['free_capacity_gb'] = \
+                        int(output['metadata']['total_capacity'])))
+                pool['free_capacity_gb'] = (
                     self._convert_size_GB(
-                        int(output['metadata']['available_capacity']))
-                pool['allocated_capacity_gb'] = \
+                        int(output['metadata']['available_capacity'])))
+                pool['allocated_capacity_gb'] = (
                     self._convert_size_GB(
-                        int(output['metadata']['used_capacity']))
+                        int(output['metadata']['used_capacity'])))
                 pool['QoS_support'] = False
                 pool['reserved_percentage'] = 0
                 pools.append(pool)
@@ -1358,8 +1426,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
         """
         data = {}
         pools = self._get_pools()
-        data['volume_backend_name'] = \
-            self.configuration.safe_get('volume_backend_name')
+        data['volume_backend_name'] = (
+            self.configuration.safe_get('volume_backend_name'))
         location_info = '%(driver)s:%(host)s:%(volume)s' % {
             'driver': self.__class__.__name__,
             'host': self.configuration.san_ip,