# Average busy percentage
AVG_BUSY_PERC = 'avg_busy_perc'
+# replication constants
+HPE3PAR_CPG_REMOTE = 'DestOpenStackCPG'
+HPE3PAR_CPG2_REMOTE = 'destfakepool'
+HPE3PAR_CPG_MAP = 'OpenStackCPG:DestOpenStackCPG fakepool:destfakepool'
+SYNC_MODE = 1
+PERIODIC_MODE = 2
+SYNC_PERIOD = 900
+
class Comment(object):
def __init__(self, expected):
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000'
+ VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db'
VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111'
VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222'
VOLUME_NAME = 'volume-' + VOLUME_ID
SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ'
+ RCG_3PAR_NAME = 'rcg-0DM4qZEVSKON-DXN-N'
CONSIS_GROUP_ID = '6044fedf-c889-4752-900f-2039d247a5df'
CONSIS_GROUP_NAME = 'vvs-YET.38iJR1KQDyA50kel3w'
CGSNAPSHOT_ID = 'e91c5ed5-daee-4e84-8724-1c9e31e7a1f2'
CGSNAPSHOT_BASE_NAME = 'oss-6Rxe1druToSHJByeMeeh8g'
+ CLIENT_ID = "12345"
+ REPLICATION_CLIENT_ID = "54321"
# fake host on the 3par
FAKE_HOST = 'fakehost'
FAKE_CINDER_HOST = 'fakehost@foo#' + HPE3PAR_CPG
+ FAKE_FAILOVER_HOST = 'fakefailover@foo#destfakepool'
USER_ID = '2689d9a913974c008b1d859013f23607'
PROJECT_ID = 'fac88235b9d64685a3530f73e490348f'
VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156'
'volume_type': None,
'volume_type_id': None}
+ volume_replicated = {'name': VOLUME_NAME,
+ 'id': VOLUME_ID,
+ 'display_name': 'Foo Volume',
+ 'replication_status': 'disabled',
+ 'provider_location': CLIENT_ID,
+ 'size': 2,
+ 'host': FAKE_CINDER_HOST,
+ 'volume_type': 'replicated',
+ 'volume_type_id': VOLUME_TYPE_ID_REPLICATED}
+
+ replication_targets = [{'target_device_id': 'target',
+ 'cpg_map': HPE3PAR_CPG_MAP,
+ 'hpe3par_api_url': 'https://1.1.1.1/api/v1',
+ 'hpe3par_username': HPE3PAR_USER_NAME,
+ 'hpe3par_password': HPE3PAR_USER_PASS,
+ 'san_ip': HPE3PAR_SAN_IP,
+ 'san_login': HPE3PAR_USER_NAME,
+ 'san_password': HPE3PAR_USER_PASS,
+ 'san_ssh_port': HPE3PAR_SAN_SSH_PORT,
+ 'ssh_conn_timeout': HPE3PAR_SAN_SSH_CON_TIMEOUT,
+ 'san_private_key': HPE3PAR_SAN_SSH_PRIVATE,
+ 'managed_backend_name': FAKE_FAILOVER_HOST}]
+
+ list_rep_targets = [{'target_device_id': 'target'}]
+
volume_encrypted = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'deleted_at': None,
'id': 'gold'}
+ volume_type_replicated = {'name': 'replicated',
+ 'deleted': False,
+ 'updated_at': None,
+ 'extra_specs':
+ {'replication_enabled': '<is> True'},
+ 'deleted_at': None,
+ 'id': VOLUME_TYPE_ID_REPLICATED}
+
volume_type_dedup = {'name': 'dedup',
'deleted': False,
'updated_at': None,
'TASK_ACTIVE': TASK_ACTIVE,
'TASK_DONE': TASK_DONE,
'getTask.return_value': STATUS_DONE,
- 'getStorageSystemInfo.return_value': {'serialNumber': '1234567'},
+ 'getStorageSystemInfo.return_value': {'id': CLIENT_ID,
+ 'serialNumber': '1234567'},
'getVolume.return_value': RETYPE_VOLUME_INFO_0,
'modifyVolume.return_value': ("anyResponse", {'taskid': 1})
}
port=HPE3PAR_SAN_SSH_PORT,
conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT)]
+ get_id_login = [
+ mock.call.getWsApiVersion(),
+ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS),
+ mock.call.setSSHOptions(
+ HPE3PAR_SAN_IP,
+ HPE3PAR_USER_NAME,
+ HPE3PAR_USER_PASS,
+ missing_key_policy='AutoAddPolicy',
+ privatekey=HPE3PAR_SAN_SSH_PRIVATE,
+ known_hosts_file=mock.ANY,
+ port=HPE3PAR_SAN_SSH_PORT,
+ conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT),
+ mock.call.getStorageSystemInfo()]
+
standard_logout = [
mock.call.logout()]
readOnly = False
def setup_configuration(self):
- configuration = mock.Mock()
+ configuration = mock.MagicMock()
configuration.hpe3par_debug = False
configuration.hpe3par_username = HPE3PAR_USER_NAME
configuration.hpe3par_password = HPE3PAR_USER_PASS
configuration.goodness_function = GOODNESS_FUNCTION
configuration.filter_function = FILTER_FUNCTION
configuration.image_volume_cache_enabled = False
+ configuration.replication_device = None
return configuration
@mock.patch(
self.standard_logout)
self.assertIsNone(return_model)
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_create_volume_replicated_managed_periodic(self,
+ _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_client.getRemoteCopyGroup.side_effect = (
+ hpeexceptions.HTTPNotFound)
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.create_volume(self.volume_replicated)
+ comment = Comment({
+ "volume_type_name": "replicated",
+ "display_name": "Foo Volume",
+ "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db",
+ "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "qos": {},
+ "type": "OpenStack"})
+
+ target_device_id = self.replication_targets[0]['target_device_id']
+ expected = [
+ mock.call.createVolume(
+ self.VOLUME_3PAR_NAME,
+ HPE3PAR_CPG,
+ 2048, {
+ 'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False,
+ 'snapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': target_device_id,
+ 'mode': PERIODIC_MODE,
+ 'snapCPG': HPE3PAR_CPG_REMOTE}],
+ {'localUserCPG': HPE3PAR_CPG,
+ 'localSnapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': target_device_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.modifyRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ {'targets': [{'syncPeriod': SYNC_PERIOD,
+ 'targetName': target_device_id}]}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_create_volume_replicated_managed_sync(self,
+ _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'sync'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_client.getRemoteCopyGroup.side_effect = (
+ hpeexceptions.HTTPNotFound)
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'sync',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.create_volume(self.volume_replicated)
+ comment = Comment({
+ "volume_type_name": "replicated",
+ "display_name": "Foo Volume",
+ "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db",
+ "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "qos": {},
+ "type": "OpenStack"})
+
+ target_device_id = self.replication_targets[0]['target_device_id']
+ expected = [
+ mock.call.createVolume(
+ self.VOLUME_3PAR_NAME,
+ HPE3PAR_CPG,
+ 2048, {
+ 'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False,
+ 'snapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': target_device_id,
+ 'mode': SYNC_MODE,
+ 'snapCPG': HPE3PAR_CPG_REMOTE}],
+ {'localUserCPG': HPE3PAR_CPG,
+ 'localSnapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': target_device_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_dedup(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# Setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
_mock_volume_types.return_value = {
'name': 'flash-cache-on',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': 'XXXXXXX'}
with mock.patch.object(hpecommon.HPE3PARCommon,
expected +
self.standard_logout)
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_delete_volume_replicated(self, _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'cpg': HPE3PAR_CPG_QOS,
+ 'snap_cpg': HPE3PAR_CPG_SNAP,
+ 'vvs_name': self.VVS_NAME,
+ 'qos': self.QOS,
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+ with mock.patch.object(hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+ self.driver.delete_volume(self.volume_replicated)
+
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME),
+ mock.call.removeVolumeFromRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ removeFromTarget=True),
+ mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.deleteVolume(self.VOLUME_3PAR_NAME)]
+
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+
def test_create_cloned_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getStorageSystemInfo.return_value': {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
conf = {
'getStorageSystemInfo.return_value': {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
def test_migrate_volume_diff_host(self):
conf = {
'getStorageSystemInfo.return_value': {
+ 'id': self.CLIENT_ID,
'serialNumber': 'different'},
}
conf = {
'getStorageSystemInfo.return_value': {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
model_update = self.driver.create_volume_from_snapshot(
self.volume,
self.snapshot)
- self.assertIsNone(model_update)
+ self.assertEqual({}, model_update)
comment = Comment({
"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",
self.volume,
str(new_size))
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_extend_volume_replicated(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # extending a replicated volume type.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'cpg': HPE3PAR_CPG,
+ 'snap_cpg': HPE3PAR_CPG_SNAP,
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+
+ grow_size = 3
+ old_size = self.volume_replicated['size']
+ new_size = old_size + grow_size
+
+ # Test a successful extend.
+ self.driver.extend_volume(
+ self.volume_replicated,
+ new_size)
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME),
+ mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+
+ # Test an unsuccessful extend. growVolume will fail but remote
+ # copy should still be started again.
+ mock_client.growVolume.side_effect = (
+ hpeexceptions.HTTPForbidden("Error: The volume cannot be "
+ "extended."))
+ self.assertRaises(
+ hpeexceptions.HTTPForbidden,
+ self.driver.extend_volume,
+ self.volume_replicated,
+ new_size)
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME),
+ mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+
def test_get_ports(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
def test_create_consistency_group(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
comment = Comment({
'display_name': 'cg_name',
comment=comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_create_consistency_group_from_src(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
volume = self.volume
cgsnap_comment = Comment({
comment=cg_comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
snapshots=[self.snapshot])
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_delete_consistency_group(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
comment = Comment({
'display_name': 'cg_name',
comment=comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.CONSIS_GROUP_NAME)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_update_consistency_group_add_vol(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
volume = self.volume
comment = Comment({
comment=comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_update_consistency_group_remove_vol(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
volume = self.volume
comment = Comment({
comment=comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_create_cgsnapshot(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
volume = self.volume
cg_comment = Comment({
comment=cg_comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
optional=cgsnap_optional)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
def test_delete_cgsnapshot(self):
mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
volume = self.volume
cgsnapshot = self.fake_cgsnapshot_object()
comment=cg_comment)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
self.VOLUME_NAME_3PAR)]
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
cgsnapshot, [])
mock_client.assert_has_calls(
- [mock.call.getWsApiVersion()] +
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_enable_not_in_rcopy(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_client.getRemoteCopyGroup.side_effect = (
+ hpeexceptions.HTTPNotFound)
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'cpg': HPE3PAR_CPG,
+ 'snap_cpg': HPE3PAR_CPG_SNAP,
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.replication_enable(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ target_device_id = self.replication_targets[0]['target_device_id']
+ expected = [
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': target_device_id,
+ 'mode': PERIODIC_MODE,
+ 'snapCPG': HPE3PAR_CPG_REMOTE}],
+ {'localUserCPG': HPE3PAR_CPG,
+ 'localSnapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': target_device_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.modifyRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ {'targets': [{'syncPeriod': SYNC_PERIOD,
+ 'targetName': target_device_id}]}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_enable_in_rcopy(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.replication_enable(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ expected = [
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_enable_non_replicated_type(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+
+ _mock_volume_types.return_value = {
+ 'name': 'NOT_replicated',
+ 'extra_specs': {
+ 'volume_type': self.volume_type}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_enable,
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_disable(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.replication_disable(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'disabled'},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_disable_fail(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.stopRemoteCopy.side_effect = (
+ Exception("Error: Remote Copy could not be stopped."))
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.replication_disable(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'disable_failed'},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_disable_non_replicated_type(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+
+ _mock_volume_types.return_value = {
+ 'name': 'NOT_replicated',
+ 'extra_specs': {
+ 'volume_type': self.volume_type}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_disable,
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_list_replication_targets(self, _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ target_device_id = self.replication_targets[0]['target_device_id']
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getRemoteCopyGroup.return_value = (
+ {'targets': [{'targetName': target_device_id}]})
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.list_replication_targets(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ expected = [
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+
+ targets = self.list_rep_targets
+ self.assertEqual({'volume_id': self.volume_replicated['id'],
+ 'targets': targets},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_list_replication_targets_non_replicated_type(self,
+ _mock_volume_types):
+ # Managed vs. unmanaged and periodic vs. sync are not relevant when
+ # enabling/disabling replication and listing replication targets.
+ # We will use managed and periodic as the default.
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'NOT_replicated',
+ 'extra_specs': {
+ 'volume_type': self.volume_type}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+
+ return_model = self.driver.list_replication_targets(
+ context.get_admin_context(),
+ self.volume_replicated)
+
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ self.standard_logout)
+
+ self.assertEqual([], return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_failover_managed(self, _mock_volume_types):
+ # periodic vs. sync is not relevant when conducting a failover. We
+ # will just use periodic.
+ provider_location = self.CLIENT_ID + ":" + self.REPLICATION_CLIENT_ID
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+ valid_target_device_id = (
+ self.replication_targets[0]['target_device_id'])
+ invalid_target_device_id = 'INVALID'
+
+ # test invalid secondary target
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ invalid_target_device_id)
+
+ # test no secondary target
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ None)
+
+ # test a successful failover
+ volume = self.volume_replicated
+ volume['provider_location'] = self.CLIENT_ID
+ return_model = self.driver.replication_failover(
+ context.get_admin_context(),
+ volume,
+ valid_target_device_id)
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'inactive',
+ 'provider_location': provider_location,
+ 'host': self.FAKE_FAILOVER_HOST},
+ return_model)
+
+ # test a unsuccessful failover
+ mock_replicated_client.recoverRemoteCopyGroupFromDisaster.\
+ side_effect = (
+ exception.VolumeBackendAPIException(
+ "Error: Failover was unsuccessful."))
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ valid_target_device_id)
+
class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase):
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
mock_client = self.setup_driver(config=config, wsapi_version=wsapi)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
wsapi_version=self.wsapi_version_312)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
# cpg has no limit
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
mock_client = self.setup_driver(config=config, wsapi_version=wsapi)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
wsapi_version=self.wsapi_version_312)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
+ 'id': self.CLIENT_ID,
'serialNumber': '1234'
}
mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
self.standard_login +
expected +
self.standard_logout)
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '4.0.0'
+MIN_REP_CLIENT_VERSION = '4.0.2'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
SRSTATLD_API_VERSION = 30201200
3.0.1 - Fixed find_existing_vluns bug #1515033
3.0.2 - Python 3 support
3.0.3 - Remove db access for consistency groups
+ 3.0.4 - Adds v2 managed replication support
"""
- VERSION = "3.0.3"
+ VERSION = "3.0.4"
stats = {}
CONVERT_TO_FULL = 2
CONVERT_TO_DEDUP = 3
+ # v2 replication constants
+ SYNC = 1
+ PERIODIC = 2
+ EXTRA_SPEC_REP_MODE = "replication:mode"
+ EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period"
+
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full', 'dedup']
self.config = config
self.client = None
self.uuid = uuid.uuid4()
+ self._replication_targets = []
+ self._replication_enabled = False
def get_version(self):
return self.VERSION
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
- def _create_client(self):
- cl = client.HPE3ParClient(self.config.hpe3par_api_url)
+ def _create_client(self, timeout=None):
+ # Timeout is only supported in version 4.0.2 and greater of the
+ # python-3parclient.
+ if hpe3parclient.version >= MIN_REP_CLIENT_VERSION:
+ cl = client.HPE3ParClient(self.config.hpe3par_api_url,
+ timeout=timeout)
+ else:
+ cl = client.HPE3ParClient(self.config.hpe3par_api_url)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid)
self.client.logout()
- def do_setup(self, context):
+ def _create_replication_client(self, remote_array):
+ try:
+ cl = client.HPE3ParClient(remote_array['hpe3par_api_url'])
+ cl.login(remote_array['hpe3par_username'],
+ remote_array['hpe3par_password'])
+ except hpeexceptions.HTTPUnauthorized as ex:
+ msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
+ {'url': remote_array['hpe3par_api_url'], 'err': ex})
+ LOG.error(msg)
+ raise exception.InvalidInput(reason=msg)
+
+ known_hosts_file = CONF.ssh_hosts_key_file
+ policy = "AutoAddPolicy"
+ if CONF.strict_ssh_host_key_policy:
+ policy = "RejectPolicy"
+ cl.setSSHOptions(
+ remote_array['san_ip'],
+ remote_array['san_login'],
+ remote_array['san_password'],
+ port=remote_array['san_ssh_port'],
+ conn_timeout=remote_array['ssh_conn_timeout'],
+ privatekey=remote_array['san_private_key'],
+ missing_key_policy=policy,
+ known_hosts_file=known_hosts_file)
+ return cl
+
+ def _destroy_replication_client(self, client):
+ client.logout()
+
+ def do_setup(self, context, timeout=None):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
' install the hpe3parclient.')
raise exception.VolumeBackendAPIException(data=msg)
+
try:
- self.client = self._create_client()
+ self.client = self._create_client(timeout=timeout)
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
except hpeexceptions.UnsupportedVersion as ex:
+ # In the event we cannot contact the configured primary array,
+ # we want to allow a failover if replication is enabled.
+ if hpe3parclient.version >= MIN_REP_CLIENT_VERSION:
+ self._do_replication_setup()
+ if self._replication_enabled:
+ self.client = None
raise exception.InvalidInput(ex)
if context:
LOG.error(msg)
raise exception.InvalidInput(message=msg)
- def check_for_setup_error(self):
- self.client_login()
+ # get the client ID for provider_location
try:
- cpg_names = self.config.hpe3par_cpg
- for cpg_name in cpg_names:
- self.validate_cpg(cpg_name)
-
+ self.client_login()
+ info = self.client.getStorageSystemInfo()
+ self.client.id = six.text_type(info['id'])
+ except Exception:
+ self.client.id = 0
finally:
self.client_logout()
+ # v2 replication setup
+ if not self._replication_enabled and (
+ hpe3parclient.version >= MIN_REP_CLIENT_VERSION):
+ self._do_replication_setup()
+
+ def check_for_setup_error(self):
+ if self.client:
+ self.client_login()
+ try:
+ cpg_names = self.config.hpe3par_cpg
+ for cpg_name in cpg_names:
+ self.validate_cpg(cpg_name)
+
+ finally:
+ self.client_logout()
+
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
def _extend_volume(self, volume, volume_name, growth_size_mib,
_convert_to_base=False):
model_update = None
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ is_volume_replicated = self._volume_of_replicated_type(volume)
try:
if _convert_to_base:
LOG.debug("Converting to base volume prior to growing.")
model_update = self._convert_to_base_volume(volume)
+ # If the volume is replicated and we are not failed over,
+ # remote copy has to be stopped before the volume can be extended.
+ failed_over = volume.get("replication_status", None)
+ is_failed_over = failed_over == "failed-over"
+ if is_volume_replicated and not is_failed_over:
+ self.client.stopRemoteCopy(rcg_name)
self.client.growVolume(volume_name, growth_size_mib)
+ if is_volume_replicated and not is_failed_over:
+ self.client.startRemoteCopy(rcg_name)
except Exception as ex:
+ # If the extend fails, we must restart remote copy.
+ if is_volume_replicated:
+ self.client.startRemoteCopy(rcg_name)
with excutils.save_and_reraise_exception() as ex_ctxt:
if (not _convert_to_base and
isinstance(ex, hpeexceptions.HTTPForbidden) and
unm_name = self._encode_name(volume_id)
return "unm-%s" % unm_name
+ # v2 replication conversion
+ def _get_3par_rcg_name(self, volume_id):
+ rcg_name = self._encode_name(volume_id)
+ rcg = "rcg-%s" % rcg_name
+ return rcg[:22]
+
+ def _get_3par_remote_rcg_name(self, volume_id, provider_location):
+ return self._get_3par_rcg_name(volume_id) + ".r" + (
+ six.text_type(provider_location))
+
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
'consistencygroup_support': True,
}
+ if hpe3parclient.version >= MIN_REP_CLIENT_VERSION:
+ pool['replication_enabled'] = self._replication_enabled
+ pool['replication_type'] = ['sync', 'periodic']
+ pool['replication_count'] = len(self._replication_targets)
+
pools.append(pool)
self.stats = {'driver_version': '3.0',
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
+
+ # v2 replication check
+ replication_flag = False
+ if self._volume_of_replicated_type(volume) and (
+ self._do_volume_replication_setup(volume)):
+ replication_flag = True
+
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
- return self._get_model_update(volume['host'], cpg)
+ return self._get_model_update(volume['host'], cpg,
+ replication=replication_flag,
+ provider_location=self.client.id)
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
tpvv=True, tdvv=False):
return comment_dict[key]
return None
- def _get_model_update(self, volume_host, cpg):
+ def _get_model_update(self, volume_host, cpg, replication=False,
+ provider_location=None):
"""Get model_update dict to use when we select a pool.
The pools implementation uses a volume['host'] suffix of :poolname.
:param cpg: The actual pool (cpg) used, for example from the type.
:return: dict Model update if we need to update volume host, else None
"""
- model_update = None
+ model_update = {}
host = volume_utils.extract_host(volume_host, 'backend')
host_and_pool = volume_utils.append_host(host, cpg)
if volume_host != host_and_pool:
# Since we selected a pool based on type, update the model.
- model_update = {'host': host_and_pool}
+ model_update['host'] = host_and_pool
+ if replication:
+ model_update['replication_status'] = 'enabled'
+ if replication and provider_location:
+ model_update['provider_location'] = provider_location
+ if not model_update:
+ model_update = None
return model_update
def create_cloned_volume(self, volume, src_vref):
tpvv=type_info['tpvv'],
tdvv=type_info['tdvv'])
- return self._get_model_update(volume['host'], cpg)
+ # v2 replication check
+ replication_flag = False
+ if self._volume_of_replicated_type(volume) and (
+ self._do_volume_replication_setup(volume)):
+ replication_flag = True
+
+ return self._get_model_update(volume['host'], cpg,
+ replication=replication_flag,
+ provider_location=self.client.id)
except hpeexceptions.HTTPForbidden:
raise exception.NotAuthorized()
raise exception.CinderException(ex)
def delete_volume(self, volume):
+ # v2 replication check
+ # If the volume type is replication enabled, we want to call our own
+ # method of deconstructing the volume and its dependencies
+ if self._volume_of_replicated_type(volume):
+ replication_status = volume.get('replication_status', None)
+ if replication_status and replication_status == "failed-over":
+ self._delete_replicated_failed_over_volume(volume)
+ else:
+ self._do_volume_replication_destroy(volume)
+ return
+
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
{'vol_name': pprint.pformat(volume['display_name']),
'ss_name': pprint.pformat(snapshot['display_name'])})
- model_update = None
+ model_update = {}
if volume['size'] < snapshot['volume_size']:
err = ("You cannot reduce size of the volume. It must "
"be greater than or equal to the snapshot.")
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
+
+ # v2 replication check
+ if self._volume_of_replicated_type(volume) and (
+ self._do_volume_replication_setup(volume)):
+ model_update['replication_status'] = 'enabled'
+ model_update['provider_location'] = self.client.id
+
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
pass
return existing_vluns
+ # v2 replication methods
+ def get_replication_updates(self, context):
+ # TODO(aorourke): the manager does not do anything with these updates.
+ # When that is chanaged, I will modify this as well.
+ errors = []
+ return errors
+
+ def replication_enable(self, context, volume):
+ """Enable replication on a replication capable volume."""
+ if not self._volume_of_replicated_type(volume):
+ msg = _("Unable to enable volume replication because volume is "
+ "not of replicated type.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ model_update = {"provider_location": self.client.id}
+ # If replication is not enabled and the volume is of replicated type,
+ # we treat this as an error.
+ if not self._replication_enabled:
+ msg = _LE("Enabling replication failed because replication is "
+ "not properly configured.")
+ LOG.error(msg)
+ model_update['replication_status'] = "error"
+ else:
+ if self._do_volume_replication_setup(volume):
+ model_update['replication_status'] = "enabled"
+ else:
+ model_update['replication_status'] = "error"
+
+ return model_update
+
+ def replication_disable(self, context, volume):
+ """Disable replication on the specified volume."""
+ if not self._volume_of_replicated_type(volume):
+ msg = _("Unable to disable volume replication because volume is "
+ "not of replicated type.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ model_update = {}
+ # If replication is not enabled and the volume is of replicated type,
+ # we treat this as an error.
+ if self._replication_enabled:
+ model_update['replication_status'] = 'disabled'
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ vol_name = self._get_3par_vol_name(volume['id'])
+
+ try:
+ self.client.stopRemoteCopy(rcg_name)
+ except Exception as ex:
+ msg = (_LE("There was a problem disabling replication on "
+ "volume '%(name)s': %(error)s") %
+ {'name': vol_name,
+ 'error': six.text_type(ex)})
+ LOG.error(msg)
+ model_update['replication_status'] = 'disable_failed'
+ else:
+ msg = _LE("Disabling replication failed because replication is "
+ "not properly configured.")
+ LOG.error(msg)
+ model_update['replication_status'] = 'error'
+
+ return model_update
+
+ def replication_failover(self, context, volume, secondary):
+ """Force failover to a secondary replication target."""
+ if not self._volume_of_replicated_type(volume):
+ msg = _("Unable to failover because volume is not of "
+ "replicated type.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # If replication is not enabled and the volume is of replicated type,
+ # we treat this as an error.
+ if not self._replication_enabled:
+ msg = _LE("Issuing a fail-over failed because replication is "
+ "not properly configured.")
+ LOG.error(msg)
+ model_update = {"replication_status": "error"}
+ return model_update
+
+ failover_target = None
+ for target in self._replication_targets:
+ if target['target_device_id'] == secondary:
+ failover_target = target
+ break
+
+ if not failover_target:
+ msg = _("A valid secondary target MUST be specified in order "
+ "to failover.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ if self.client is not None and failover_target['id'] == self.client.id:
+ msg = _("The failover array cannot be the primary array.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ try:
+ # Try and stop remote-copy on main array.
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ self.client.stopRemoteCopy(rcg_name)
+ except Exception:
+ pass
+
+ try:
+ # Failover to secondary array.
+ remote_rcg_name = self._get_3par_remote_rcg_name(
+ volume['id'], volume['provider_location'])
+ cl = self._create_replication_client(failover_target)
+ cl.recoverRemoteCopyGroupFromDisaster(
+ remote_rcg_name, self.client.RC_ACTION_CHANGE_TO_PRIMARY)
+ new_location = volume['provider_location'] + ":" + (
+ failover_target['id'])
+
+ model_update = {"provider_location": new_location,
+ "replication_status": "inactive"}
+ if failover_target['managed_backend_name']:
+ # We want to update the volumes host if our target is managed.
+ model_update['host'] = failover_target['managed_backend_name']
+
+ except Exception as ex:
+ msg = _("There was a problem with the failover (%s) and it was "
+ "unsuccessful.") % six.text_type(ex)
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ finally:
+ self._destroy_replication_client(cl)
+
+ return model_update
+
+ def list_replication_targets(self, context, volume):
+ """Provides a means to obtain replication targets for a volume.
+
+ This will query all enabled targets on a 3PAR backend and cross
+ reference them with all entries in cinder.conf. It will return
+ only those that appear on both, aka enabled replication targets.
+ """
+ if not self._volume_of_replicated_type(volume):
+ return []
+
+ allowed_names = []
+ # If the primary target is offline we can not ask it what targets are
+ # available. Our only option is to list all cinder.conf entries.
+ try:
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ rcg = self.client.getRemoteCopyGroup(rcg_name)
+ rcg_targets = rcg['targets']
+ for target in rcg_targets:
+ allowed_names.append(target['targetName'])
+ except Exception:
+ LOG.warning(_LW("The primary array is currently unreachable. All "
+ "targets returned from list_replication_targets "
+ "are pulled directly from cinder.conf and are not "
+ "guarenteed to be available because they could "
+ "not be verified with the primary array."))
+
+ replication_targets = []
+ for target in self._replication_targets:
+ if not allowed_names or (
+ target['target_device_id'] in allowed_names):
+ list_vals = {'target_device_id': target['target_device_id']}
+ replication_targets.append(list_vals)
+
+ return {'volume_id': volume['id'],
+ 'targets': replication_targets}
+
+ def _do_replication_setup(self):
+ replication_devices = self.config.replication_device
+ if replication_devices:
+ for dev in replication_devices:
+ remote_array = {}
+ is_managed = dev.get('managed_backend_name')
+ if not is_managed:
+ msg = _("Unmanaged replication is not supported at this "
+ "time. Please configure cinder.conf for managed "
+ "replication.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ remote_array['managed_backend_name'] = is_managed
+ remote_array['replication_mode'] = (
+ self._get_remote_copy_mode_num(
+ dev.get('replication_mode')))
+ remote_array['target_device_id'] = (
+ dev.get('target_device_id'))
+ remote_array['cpg_map'] = (
+ dev.get('cpg_map'))
+ remote_array['hpe3par_api_url'] = (
+ dev.get('hpe3par_api_url'))
+ remote_array['hpe3par_username'] = (
+ dev.get('hpe3par_username'))
+ remote_array['hpe3par_password'] = (
+ dev.get('hpe3par_password'))
+ remote_array['san_ip'] = (
+ dev.get('san_ip'))
+ remote_array['san_login'] = (
+ dev.get('san_login'))
+ remote_array['san_password'] = (
+ dev.get('san_password'))
+ remote_array['san_ssh_port'] = (
+ dev.get('san_ssh_port', self.config.san_ssh_port))
+ remote_array['ssh_conn_timeout'] = (
+ dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout))
+ remote_array['san_private_key'] = (
+ dev.get('san_private_key', self.config.san_private_key))
+ array_name = remote_array['target_device_id']
+
+ # Make sure we can log into the client, that it has been
+ # correctly configured, and it its version matches the
+ # primary arrarys version.
+ try:
+ cl = self._create_replication_client(remote_array)
+ array_id = six.text_type(cl.getStorageSystemInfo()['id'])
+ remote_array['id'] = array_id
+ wsapi_version = cl.getWsApiVersion()['build']
+
+ if self.client is not None and (
+ wsapi_version != self.API_VERSION):
+ msg = (_LW("The target array and all of its secondary "
+ "arrays must be on the same API version. "
+ "Array '%(target)s' is on %(target_ver)s "
+ "while the primary array is on "
+ "%(primary_ver)s, therefore it will not "
+ "be added as a valid replication target.") %
+ {'target': array_name,
+ 'target_ver': wsapi_version,
+ 'primary_ver': self.API_VERSION})
+ LOG.warning(msg)
+ elif not self._is_valid_replication_array(remote_array):
+ msg = (_LW("'%s' is not a valid replication array. "
+ "In order to be valid, target_device_id, "
+ "replication_mode, "
+ "hpe3par_api_url, hpe3par_username, "
+ "hpe3par_password, cpg_map, and "
+ "must be specified. If the target is "
+ "managed, managed_backend_name must be set "
+ "as well.") % array_name)
+ LOG.warning(msg)
+ else:
+ self._replication_targets.append(remote_array)
+ except Exception:
+ msg = (_LE("Could not log in to 3PAR array (%s) with the "
+ "provided credentials.") % array_name)
+ LOG.error(msg)
+ finally:
+ self._destroy_replication_client(cl)
+
+ if self._is_replication_configured_correct():
+ self._replication_enabled = True
+
+ def _is_valid_replication_array(self, target):
+ for k, v in target.items():
+ if v is None:
+ return False
+ return True
+
+ def _is_replication_configured_correct(self):
+ rep_flag = True
+ # Make sure there is at least one replication target.
+ if len(self._replication_targets) < 1:
+ LOG.error(_LE("There must be at least one valid replication "
+ "device configured."))
+ rep_flag = False
+ return rep_flag
+
+ def _is_replication_mode_correct(self, mode, sync_num):
+ rep_flag = True
+ # Make sure replication_mode is set to either sync|periodic.
+ mode = self._get_remote_copy_mode_num(mode)
+ if not mode:
+ LOG.error(_LE("Extra spec replication:mode must be set and must "
+ "be either 'sync' or 'periodic'."))
+ rep_flag = False
+ else:
+ # If replication:mode is periodic, replication_sync_period must be
+ # set between 300 - 31622400 seconds.
+ if mode == self.PERIODIC and (
+ sync_num < 300 or sync_num > 31622400):
+ LOG.error(_LE("Extra spec replication:sync_period must be "
+ "greater than 299 and less than 31622401 "
+ "seconds."))
+ rep_flag = False
+ return rep_flag
+
+ def _volume_of_replicated_type(self, volume):
+ replicated_type = False
+ volume_type_id = volume.get('volume_type_id')
+ if volume_type_id:
+ volume_type = self._get_volume_type(volume_type_id)
+
+ extra_specs = volume_type.get('extra_specs')
+ if extra_specs and 'replication_enabled' in extra_specs:
+ rep_val = extra_specs['replication_enabled']
+ replicated_type = (rep_val == "<is> True")
+
+ return replicated_type
+
+ def _is_volume_in_remote_copy_group(self, volume):
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ try:
+ self.client.getRemoteCopyGroup(rcg_name)
+ return True
+ except hpeexceptions.HTTPNotFound:
+ return False
+
+ def _get_remote_copy_mode_num(self, mode):
+ ret_mode = None
+ if mode == "sync":
+ ret_mode = self.SYNC
+ if mode == "periodic":
+ ret_mode = self.PERIODIC
+ return ret_mode
+
+ def _get_cpg_from_cpg_map(self, cpg_map, target_cpg):
+ ret_target_cpg = None
+ cpg_pairs = cpg_map.split(' ')
+ for cpg_pair in cpg_pairs:
+ cpgs = cpg_pair.split(':')
+ cpg = cpgs[0]
+ dest_cpg = cpgs[1]
+ if cpg == target_cpg:
+ ret_target_cpg = dest_cpg
+
+ return ret_target_cpg
+
+ def _do_volume_replication_setup(self, volume):
+ """This function will do or ensure the following:
+
+ -Create volume on main array (already done in create_volume)
+ -Create Remote Copy Group on main array
+ -Add volume to Remote Copy Group on main array
+ -Start remote copy
+
+ If anything here fails, we will need to clean everything up in
+ reverse order, including the original volume.
+ """
+
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ # If the volume is already in a remote copy group, return True
+ # after starting remote copy. If remote copy is already started,
+ # issuing this command again will be fine.
+ if self._is_volume_in_remote_copy_group(volume):
+ try:
+ self.client.startRemoteCopy(rcg_name)
+ except Exception:
+ pass
+ return True
+
+ try:
+ # Grab the extra_spec entries for replication and make sure they
+ # are set correctly.
+ volume_type = self._get_volume_type(volume["volume_type_id"])
+ extra_specs = volume_type.get("extra_specs")
+ replication_mode = extra_specs.get(self.EXTRA_SPEC_REP_MODE)
+ replication_mode_num = self._get_remote_copy_mode_num(
+ replication_mode)
+ replication_sync_period = extra_specs.get(
+ self.EXTRA_SPEC_REP_SYNC_PERIOD)
+ if replication_sync_period:
+ replication_sync_period = int(replication_sync_period)
+ if not self._is_replication_mode_correct(replication_mode,
+ replication_sync_period):
+ msg = _("The replication mode was not configured correctly "
+ "in the volume type extra_specs. If replication:mode "
+ "is periodic, replication:sync_period must also be "
+ "specified and be between 300 and 31622400 seconds.")
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ vol_settings = self.get_volume_settings_from_type(volume)
+ local_cpg = vol_settings['cpg']
+ vol_name = self._get_3par_vol_name(volume['id'])
+
+ # Create remote copy group on main array.
+ rcg_targets = []
+ sync_targets = []
+ for target in self._replication_targets:
+ # Only add targets that match the volumes replication mode.
+ if target['replication_mode'] == replication_mode_num:
+ cpg = self._get_cpg_from_cpg_map(target['cpg_map'],
+ local_cpg)
+ rcg_target = {'targetName': target['target_device_id'],
+ 'mode': replication_mode_num,
+ 'snapCPG': cpg,
+ 'userCPG': cpg}
+ rcg_targets.append(rcg_target)
+ sync_target = {'targetName': target['target_device_id'],
+ 'syncPeriod': replication_sync_period}
+ sync_targets.append(sync_target)
+
+ optional = {'localSnapCPG': vol_settings['snap_cpg'],
+ 'localUserCPG': local_cpg}
+ pool = volume_utils.extract_host(volume['host'], level='pool')
+ domain = self.get_domain(pool)
+ if domain:
+ optional["domain"] = domain
+ try:
+ self.client.createRemoteCopyGroup(rcg_name, rcg_targets,
+ optional)
+ except Exception as ex:
+ msg = (_("There was an error creating the remote copy "
+ "group: %s.") %
+ six.text_type(ex))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Add volume to remote copy group.
+ rcg_targets = []
+ for target in self._replication_targets:
+ # Only add targets that match the volumes replication mode.
+ if target['replication_mode'] == replication_mode_num:
+ rcg_target = {'targetName': target['target_device_id'],
+ 'secVolumeName': vol_name}
+ rcg_targets.append(rcg_target)
+ optional = {'volumeAutoCreation': True}
+ try:
+ self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name,
+ rcg_targets,
+ optional=optional)
+ except Exception as ex:
+ msg = (_("There was an error adding the volume to the remote "
+ "copy group: %s.") %
+ six.text_type(ex))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Check and see if we are in periodic mode. If we are, update
+ # Remote Copy Group to have a sync period.
+ if replication_sync_period and (
+ replication_mode_num == self.PERIODIC):
+ opt = {'targets': sync_targets}
+ try:
+ self.client.modifyRemoteCopyGroup(rcg_name, opt)
+ except Exception as ex:
+ msg = (_("There was an error setting the sync period for "
+ "the remote copy group: %s.") %
+ six.text_type(ex))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Start the remote copy.
+ try:
+ self.client.startRemoteCopy(rcg_name)
+ except Exception as ex:
+ msg = (_("There was an error starting remote copy: %s.") %
+ six.text_type(ex))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return True
+ except Exception as ex:
+ self._do_volume_replication_destroy(volume)
+ msg = (_("There was an error setting up a remote copy group "
+ "on the 3PAR arrays: ('%s'). The volume will not be "
+ "recognized as replication type.") %
+ six.text_type(ex))
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def _do_volume_replication_destroy(self, volume, rcg_name=None):
+ """This will completely remove all traces of a remote copy group.
+
+ It should be used when deleting a replication enabled volume
+ or if setting up a remote copy group fails. It will try and do the
+ following:
+ -Stop remote copy
+ -Remove volume from Remote Copy Group on main array
+ -Delete Remote Copy Group from main array
+ -Delete volume from main array
+ """
+ if not rcg_name:
+ rcg_name = self._get_3par_rcg_name(volume['id'])
+ vol_name = self._get_3par_vol_name(volume['id'])
+
+ # Stop remote copy.
+ try:
+ self.client.stopRemoteCopy(rcg_name)
+ except Exception:
+ pass
+
+ # Delete volume from remote copy group on main array.
+ try:
+ self.client.removeVolumeFromRemoteCopyGroup(
+ rcg_name, vol_name, removeFromTarget=True)
+ except Exception:
+ pass
+
+ # Delete remote copy group on main array.
+ try:
+ self.client.removeRemoteCopyGroup(rcg_name)
+ except Exception:
+ pass
+
+ # Delete volume on the main array.
+ try:
+ self.client.deleteVolume(vol_name)
+ except Exception:
+ pass
+
+ def _delete_replicated_failed_over_volume(self, volume):
+ old_location, new_location = volume['provider_location'].split(':')
+ rcg_name = self._get_3par_remote_rcg_name(volume['id'], old_location)
+ targets = self.client.getRemoteCopyGroup(rcg_name)['targets']
+ # When failed over, we want to temporarily disable config mirroring
+ # in order to be allowed to delete the volume and remote copy group
+ for target in targets:
+ target_name = target['targetName']
+ self.client.toggleRemoteCopyConfigMirror(target_name,
+ mirror_config=False)
+
+ # Do regular volume replication destroy now config mirroring is off
+ try:
+ self._do_volume_replication_destroy(volume, rcg_name)
+ except Exception:
+ msg = (_("The failed-over volume could not be deleted."))
+ LOG.error(msg)
+ raise exception.VolumeIsBusy(message=msg)
+ finally:
+ # Turn config mirroring back on
+ for target in targets:
+ target_name = target['targetName']
+ self.client.toggleRemoteCopyConfigMirror(target_name,
+ mirror_config=True)
+
class TaskWaiter(object):
"""TaskWaiter waits for task to be not active and returns status."""