'status': 'available',
'host': "hostname@backend#%s" % POOLUUID}
+DATA_IN_REMOVE_VOLUME_VG = {
+ 'id': 'fe2dbc515810451dab2f8c8a48d15bee',
+ 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee',
+ 'display_description': '',
+ 'size': 1,
+ 'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
+ 'status': 'available',
+ 'host': "hostname@backend#%s" % POOLUUID}
+
DATA_IN_VOLUME1 = {'id': 'abc456',
'display_name': 'abc456',
'display_description': '',
'display_description': '',
'cgsnapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'}
+DATA_OUT_CG = {
+ "objectType": "application/cdmi-container",
+ "objectID": "fe2dbc515810451dab2f8c8a48d15bee",
+ "objectName": "<new_volume_group_uuid>",
+ "parentURI": "/dpl_volgroup",
+ "parentID": "fe2dbc515810451dab2f8c8a48d15bee",
+ "domainURI": "",
+ "capabilitiesURI": "",
+ "completionStatus": "Complete",
+ "percentComplete": 100,
+ "metadata":
+ {
+ "type": "volume|snapshot|replica",
+ "volume_group_uuid": "<volume_group_uuid>",
+ "origin_uuid": "<origin_uuid>",
+ "snapshot_uuid": "<snapshot_uuid>",
+ "display_name": "<display name>",
+ "display_description": "<display description>",
+ "ctime": 12345678,
+ "total_capacity": 1024,
+ "snapshot_used_capacity": 0,
+ "maximum_snapshot": 1024,
+ "snapshot_quota": 0,
+ "state": "<state>",
+ "properties":
+ {
+ "snapshot_rotation": True,
+ }
+ },
+ "childrenrange": "<range>",
+ "children":
+ [
+ "fe2dbc515810451dab2f8c8a48d15bee",
+ ],
+}
+
class TestProphetStorDPLVolume(test.TestCase):
metadata = {}
params = {}
metadata['display_name'] = DATA_IN_SNAPSHOT['display_name']
- metadata['display_description'] = \
- DATA_IN_SNAPSHOT['display_description']
+ metadata['display_description'] = (
+ DATA_IN_SNAPSHOT['display_description'])
params['metadata'] = metadata
params['snapshot'] = DATA_IN_SNAPSHOT['id']
self.assertDictMatch({'status': 'available'}, model_update)
def test_delete_consistency_group(self):
- self.DB_MOCK.volume_get_all_by_group.return_value = \
- [DATA_IN_VOLUME_VG]
+ self.DB_MOCK.volume_get_all_by_group.return_value = (
+ [DATA_IN_VOLUME_VG])
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT
model_update, volumes = self.dpldriver.delete_consistencygroup(
self._conver_uuid2hex((DATA_IN_VOLUME_VG['id'])))
self.assertDictMatch({'status': 'deleted'}, model_update, )
+ def test_update_consistencygroup(self):
+ self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+ self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
+ self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
+ add_vol = DATA_IN_VOLUME_VG
+ remove_vol = DATA_IN_REMOVE_VOLUME_VG
+ (model_update, add_vols, remove_vols) = (
+ self.dpldriver.update_consistencygroup(self.context,
+ DATA_IN_GROUP,
+ [add_vol],
+ [remove_vol]))
+ self.DPL_MOCK.join_vg.assert_called_once_with(
+ self._conver_uuid2hex(add_vol['id']),
+ self._conver_uuid2hex(DATA_IN_GROUP['id']))
+ self.DPL_MOCK.leave_vg.assert_called_once_with(
+ self._conver_uuid2hex(remove_vol['id']),
+ self._conver_uuid2hex(DATA_IN_GROUP['id']))
+ self.assertDictMatch({'status': 'available'}, model_update)
+
+ def test_update_consistencygroup_exception_join(self):
+ self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+ self.DPL_MOCK.join_vg.return_value = -1, None
+ self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
+ add_vol = DATA_IN_VOLUME_VG
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.dpldriver.update_consistencygroup,
+ context=None,
+ group=DATA_IN_GROUP,
+ add_volumes=[add_vol],
+ remove_volumes=None)
+
+ def test_update_consistencygroup_exception_leave(self):
+ self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
+ self.DPL_MOCK.leave_vg.return_value = -1, None
+ remove_vol = DATA_IN_REMOVE_VOLUME_VG
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.dpldriver.update_consistencygroup,
+ context=None,
+ group=DATA_IN_GROUP,
+ add_volumes=None,
+ remove_volumes=[remove_vol])
+
def test_create_consistency_group_snapshot(self):
- self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
- [DATA_OUT_SNAPSHOT_CG]
+ self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = (
+ [DATA_OUT_SNAPSHOT_CG])
self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.create_cgsnapshot(
self.context, DATA_IN_CG_SNAPSHOT)
self.assertDictMatch({'status': 'available'}, model_update)
def test_delete_consistency_group_snapshot(self):
- self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
- [DATA_OUT_SNAPSHOT_CG]
+ self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = (
+ [DATA_OUT_SNAPSHOT_CG])
self.DPL_MOCK.delete_cgsnapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.delete_cgsnapshot(
self.context, DATA_IN_CG_SNAPSHOT)
Implementation of the class of ProphetStor DPL storage adapter of Federator.
# v2.0.1 Consistency group support
# v2.0.2 Pool aware scheduler
+ # v2.0.3 Consistency group modification support
"""
import base64
retcode = errno.EFAULT
break
- if retcode == 0 and response.status in expected_status and\
- response.status == httplib.NOT_FOUND:
+ if (retcode == 0 and response.status in expected_status
+ and response.status == httplib.NOT_FOUND):
retcode = errno.ENODATA
elif retcode == 0 and response.status not in expected_status:
LOG.error(_LE('%(method)s %(url)s unexpected response status: '
LOG.error(_LE('Read response raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
- elif retcode == 0 and \
- response.status in [httplib.OK, httplib.CREATED] and \
- httplib.NO_CONTENT not in expected_status:
+ elif (retcode == 0 and
+ response.status in [httplib.OK, httplib.CREATED] and
+ httplib.NO_CONTENT not in expected_status):
try:
data = response.read()
data = json.loads(data)
class DPLCOMMONDriver(driver.VolumeDriver):
- """class of dpl storage adapter."""
- VERSION = '2.0.2'
+ """Class of dpl storage adapter."""
+ VERSION = '2.0.3'
def __init__(self, *args, **kwargs):
super(DPLCOMMONDriver, self).__init__(*args, **kwargs)
ret = 0
event_uuid = ""
- if type(output) is dict and \
- output.get("metadata") and output["metadata"]:
- if output["metadata"].get("event_uuid") and \
- output["metadata"]["event_uuid"]:
+ if (type(output) is dict and
+ output.get("metadata") and output["metadata"]):
+ if (output["metadata"].get("event_uuid") and
+ output["metadata"]["event_uuid"]):
event_uuid = output["metadata"]["event_uuid"]
else:
ret = errno.EINVAL
break
return status
- def _join_volume_group(self, volume):
+ def _join_volume_group(self, volume, cgId):
# Join volume group if consistency group id not empty
- cgId = volume['consistencygroup_id']
msg = ''
try:
ret, output = self.dpl.join_vg(
'group %(cgid)s.'),
{'id': volume['id'], 'cgid': cgId})
+ def _leave_volume_group(self, volume, cgId):
+ # Leave volume group if consistency group id not empty
+ msg = ''
+ try:
+ ret, output = self.dpl.leave_vg(
+ self._conver_uuid2hex(volume['id']),
+ self._conver_uuid2hex(cgId))
+ except Exception as e:
+ ret = errno.EFAULT
+ msg = _('Fexvisor failed to remove volume %(id)s '
+ 'due to %(reason)s.') % {"id": volume['id'],
+ "reason": six.text_type(e)}
+ if ret:
+ if not msg:
+ msg = _('Flexvisor failed to remove volume %(id)s '
+ 'from group %(cgid)s.') % {'id': volume['id'],
+ 'cgid': cgId}
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ LOG.info(_LI('Flexvisor succeeded to remove volume %(id)s from '
+ 'group %(cgid)s.'),
+ {'id': volume['id'], 'cgid': cgId})
+
def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID):
snapshotID = None
ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True)
model_update['status'] = 'deleted'
return model_update, snapshots
+ def update_consistencygroup(self, context, group, add_volumes=None,
+ remove_volumes=None):
+ addvollist = []
+ removevollist = []
+ cgid = group['id']
+ vid = ''
+ model_update = {'status': 'available'}
+ # Get current group info in backend storage.
+ ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid))
+ if ret == 0:
+ group_members = output.get('children', [])
+
+ if add_volumes:
+ addvollist = add_volumes
+ if remove_volumes:
+ removevollist = remove_volumes
+
+ # Process join volumes.
+ try:
+ for volume in addvollist:
+ vid = volume['id']
+ # Verify the volume exists in the group or not.
+ if self._conver_uuid2hex(vid) in group_members:
+ continue
+ self._join_volume_group(volume, cgid)
+ except exception as e:
+ msg = _("Fexvisor failed to join the volume %(vol)s in the "
+ "group %(group)s due to "
+ "%(ret)s.") % {"vol": vid, "group": cgid,
+ "ret": six.text_type(e)}
+ raise exception.VolumeBackendAPIException(data=msg)
+ # Process leave volumes.
+ try:
+ for volume in removevollist:
+ vid = volume['id']
+ if self._conver_uuid2hex(vid) in group_members:
+ self._leave_volume_group(volume, cgid)
+ except exception as e:
+ msg = _("Fexvisor failed to remove the volume %(vol)s in the "
+ "group %(group)s due to "
+ "%(ret)s.") % {"vol": vid, "group": cgid,
+ "ret": six.text_type(e)}
+ raise exception.VolumeBackendAPIException(data=msg)
+ return model_update, None, None
+
def create_volume(self, volume):
"""Create a volume."""
pool = volume_utils.extract_host(volume['host'],
if volume.get('consistencygroup_id', None):
try:
- self._join_volume_group(volume)
+ self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
if volume.get('consistencygroup_id', None):
try:
- self._join_volume_group(volume)
+ self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
if volume.get('consistencygroup_id', None):
try:
- self._join_volume_group(volume)
+ self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
snapshot['volume_id'],
event_uuid)
if status['state'] != 'available':
- msg = _('Flexvisor failed to create snapshot for volume '
- '%(id)s: %(status)s.') % \
- {'id': snapshot['volume_id'], 'status': ret}
+ msg = (_('Flexvisor failed to create snapshot for volume '
+ '%(id)s: %(status)s.') %
+ {'id': snapshot['volume_id'], 'status': ret})
raise exception.VolumeBackendAPIException(data=msg)
else:
- msg = _('Flexvisor failed to create snapshot for volume '
- '(failed to get event) %(id)s.') % \
- {'id': snapshot['volume_id']}
+ msg = (_('Flexvisor failed to create snapshot for volume '
+ '(failed to get event) %(id)s.') %
+ {'id': snapshot['volume_id']})
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create snapshot for volume %(id)s: '
if ret == 0:
pool = {}
pool['pool_name'] = output['metadata']['pool_uuid']
- pool['total_capacity_gb'] = \
+ pool['total_capacity_gb'] = (
self._convert_size_GB(
- int(output['metadata']['total_capacity']))
- pool['free_capacity_gb'] = \
+ int(output['metadata']['total_capacity'])))
+ pool['free_capacity_gb'] = (
self._convert_size_GB(
- int(output['metadata']['available_capacity']))
- pool['allocated_capacity_gb'] = \
+ int(output['metadata']['available_capacity'])))
+ pool['allocated_capacity_gb'] = (
self._convert_size_GB(
- int(output['metadata']['used_capacity']))
+ int(output['metadata']['used_capacity'])))
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
pools.append(pool)
"""
data = {}
pools = self._get_pools()
- data['volume_backend_name'] = \
- self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = (
+ self.configuration.safe_get('volume_backend_name'))
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.configuration.san_ip,