list_rep_targets = [{'target_device_id': 'target'}]
+ replication_devs_unmgd = [{'target_device_id': 'target',
+ 'cpg_map': HPE3PAR_CPG_MAP,
+ 'hpe3par_api_url': 'https://1.1.1.1/api/v1',
+ 'hpe3par_username': HPE3PAR_USER_NAME,
+ 'hpe3par_password': HPE3PAR_USER_PASS,
+ 'san_ip': HPE3PAR_SAN_IP,
+ 'san_login': HPE3PAR_USER_NAME,
+ 'san_password': HPE3PAR_USER_PASS,
+ 'san_ssh_port': HPE3PAR_SAN_SSH_PORT,
+ 'ssh_conn_timeout': HPE3PAR_SAN_SSH_CON_TIMEOUT,
+ 'san_private_key': HPE3PAR_SAN_SSH_PRIVATE,
+ 'managed_backend_name': None}]
+
volume_encrypted = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'progress': '0%',
'volume_size': 2,
'display_name': 'fakesnap',
- 'display_description': FAKE_DESC}
+ 'display_description': FAKE_DESC,
+ 'volume': volume}
wwn = ["123456789012345", "123456789054321"]
'provider_location': self.CLIENT_ID},
return_model)
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_create_volume_replicated_unmanaged_periodic(self,
+ _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ conf = self.setup_configuration()
+ self.replication_devs_unmgd[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_devs_unmgd
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
+ mock_client.getRemoteCopyGroup.side_effect = hpeexceptions.HTTPNotFound
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'cpg': HPE3PAR_CPG,
+ 'snap_cpg': HPE3PAR_CPG_SNAP,
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.create_volume(self.volume_replicated)
+ comment = Comment({
+ "volume_type_name": "replicated",
+ "display_name": "Foo Volume",
+ "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db",
+ "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "qos": {},
+ "type": "OpenStack"})
+
+ target_device_id = self.replication_targets[0]['target_device_id']
+ expected = [
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createVolume(
+ self.VOLUME_3PAR_NAME,
+ HPE3PAR_CPG,
+ 2048, {
+ 'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False,
+ 'snapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': target_device_id,
+ 'mode': PERIODIC_MODE,
+ 'snapCPG': HPE3PAR_CPG_REMOTE}],
+ {'localUserCPG': HPE3PAR_CPG,
+ 'localSnapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': target_device_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.modifyRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ {'targets': [{'syncPeriod': SYNC_PERIOD,
+ 'targetName': target_device_id}]}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_create_volume_replicated_unmanaged_sync(self,
+ _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ conf = self.setup_configuration()
+ self.replication_devs_unmgd[0]['replication_mode'] = 'sync'
+ conf.replication_device = self.replication_devs_unmgd
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
+ mock_client.getRemoteCopyGroup.side_effect = hpeexceptions.HTTPNotFound
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'cpg': HPE3PAR_CPG,
+ 'snap_cpg': HPE3PAR_CPG_SNAP,
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'sync',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ return_model = self.driver.create_volume(self.volume_replicated)
+ comment = Comment({
+ "volume_type_name": "replicated",
+ "display_name": "Foo Volume",
+ "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db",
+ "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "qos": {},
+ "type": "OpenStack"})
+
+ target_device_id = self.replication_targets[0]['target_device_id']
+ expected = [
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createVolume(
+ self.VOLUME_3PAR_NAME,
+ HPE3PAR_CPG,
+ 2048, {
+ 'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False,
+ 'snapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': target_device_id,
+ 'mode': SYNC_MODE,
+ 'snapCPG': HPE3PAR_CPG_REMOTE}],
+ {'localUserCPG': HPE3PAR_CPG,
+ 'localSnapCPG': HPE3PAR_CPG_SNAP}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': target_device_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_dedup(self, _mock_volume_types):
# setup_mock_client drive with default configuration
self.volume_replicated,
valid_target_device_id)
+ @mock.patch('hpe3parclient.version', "4.0.2")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_replication_failover_unmanaged(self, _mock_volume_types):
+ # periodic vs. sync is not relevant when conducting a failover. We
+ # will just use periodic.
+ provider_location = self.CLIENT_ID + ":" + self.REPLICATION_CLIENT_ID
+ conf = self.setup_configuration()
+ self.replication_devs_unmgd[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_devs_unmgd
+ mock_client = self.setup_driver(config=conf)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+ valid_target_device_id = (
+ self.replication_targets[0]['target_device_id'])
+ invalid_target_device_id = 'INVALID'
+
+ # test invalid secondary target
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ invalid_target_device_id)
+
+ # test no secondary target
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ None)
+
+ # test a successful failover
+ volume = self.volume_replicated
+ volume['provider_location'] = self.CLIENT_ID
+ return_model = self.driver.replication_failover(
+ context.get_admin_context(),
+ volume,
+ valid_target_device_id)
+ expected = [
+ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+ self.assertEqual({'replication_status': 'inactive',
+ 'provider_location': provider_location},
+ return_model)
+
+ # test a unsuccessful failover
+ mock_replicated_client.recoverRemoteCopyGroupFromDisaster.\
+ side_effect = (
+ exception.VolumeBackendAPIException(
+ "Error: Failover was unsuccessful."))
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.replication_failover,
+ context.get_admin_context(),
+ self.volume_replicated,
+ valid_target_device_id)
+
class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase):
def test_get_least_used_nsp_for_host_single(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
- mock_client = self.setup_driver()
+
+ # Setup two ISCSI IPs
+ conf = self.setup_configuration()
+ conf.hpe3par_iscsi_ips = ["10.10.220.253"]
+ mock_client = self.setup_driver(config=conf)
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
mock_create_client.return_value = mock_client
common = self.driver._login()
- # Setup a single ISCSI IP
- iscsi_ips = ["10.10.220.253"]
- self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips
-
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
def test_get_least_used_nsp_for_host_new(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
- mock_client = self.setup_driver()
+
+ # Setup two ISCSI IPs
+ conf = self.setup_configuration()
+ conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"]
+ mock_client = self.setup_driver(config=conf)
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
mock_create_client.return_value = mock_client
common = self.driver._login()
- # Setup two ISCSI IPs
- iscsi_ips = ["10.10.220.252", "10.10.220.253"]
- self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips
-
self.driver.initialize_iscsi_ports(common)
# Host 'newhost' does not yet have any iscsi paths,
def test_get_least_used_nsp_for_host_reuse(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
- mock_client = self.setup_driver()
+
+ # Setup two ISCSI IPs
+ conf = self.setup_configuration()
+ conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"]
+ mock_client = self.setup_driver(config=conf)
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
mock_create_client.return_value = mock_client
common = self.driver._login()
- # Setup two ISCSI IPs
- iscsi_ips = ["10.10.220.252", "10.10.220.253"]
- self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips
-
self.driver.initialize_iscsi_ports(common)
# hosts 'foo' and 'bar' already have active iscsi paths
3.0.2 - Python 3 support
3.0.3 - Remove db access for consistency groups
3.0.4 - Adds v2 managed replication support
+ 3.0.5 - Adds v2 unmanaged replication support
"""
- VERSION = "3.0.4"
+ VERSION = "3.0.5"
stats = {}
PERIODIC = 2
EXTRA_SPEC_REP_MODE = "replication:mode"
EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period"
+ RC_ACTION_CHANGE_TO_PRIMARY = 7
# Valid values for volume type extra specs
# The first value in the list is the default value
self.config = config
self.client = None
self.uuid = uuid.uuid4()
+ self._client_conf = {}
self._replication_targets = []
self._replication_enabled = False
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
+ def check_replication_flags(self, options, required_flags):
+ for flag in required_flags:
+ if not options.get(flag, None):
+ msg = (_('%s is not set and is required for the replicaiton '
+ 'device to be valid.') % flag)
+ LOG.error(msg)
+ raise exception.InvalidInput(reason=msg)
+
def _create_client(self, timeout=None):
+ hpe3par_api_url = self._client_conf['hpe3par_api_url']
# Timeout is only supported in version 4.0.2 and greater of the
# python-3parclient.
if hpe3parclient.version >= MIN_REP_CLIENT_VERSION:
- cl = client.HPE3ParClient(self.config.hpe3par_api_url,
- timeout=timeout)
+ cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout)
else:
- cl = client.HPE3ParClient(self.config.hpe3par_api_url)
+ cl = client.HPE3ParClient(hpe3par_api_url)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
- self.client.login(self.config.hpe3par_username,
- self.config.hpe3par_password)
+ self.client.login(self._client_conf['hpe3par_username'],
+ self._client_conf['hpe3par_password'])
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
- {'url': self.config.hpe3par_api_url, 'err': ex})
+ {'url': self._client_conf['hpe3par_api_url'], 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
self.client.setSSHOptions(
- self.config.san_ip,
- self.config.san_login,
- self.config.san_password,
- port=self.config.san_ssh_port,
- conn_timeout=self.config.ssh_conn_timeout,
- privatekey=self.config.san_private_key,
+ self._client_conf['san_ip'],
+ self._client_conf['san_login'],
+ self._client_conf['san_password'],
+ port=self._client_conf['san_ssh_port'],
+ conn_timeout=self._client_conf['ssh_conn_timeout'],
+ privatekey=self._client_conf['san_private_key'],
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
return cl
def _destroy_replication_client(self, client):
- client.logout()
+ if client is not None:
+ client.logout()
- def do_setup(self, context, timeout=None):
+ def do_setup(self, context, volume=None, timeout=None):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
raise exception.VolumeBackendAPIException(data=msg)
try:
+ # This will set self._client_conf with the proper credentials
+ # to communicate with the 3PAR array. It will contain either
+ # the values for the primary array or secondary array in the
+ # case of a fail-over.
+ self._get_3par_config(volume)
self.client = self._create_client(timeout=timeout)
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
finally:
self.client_logout()
- # v2 replication setup
- if not self._replication_enabled and (
- hpe3parclient.version >= MIN_REP_CLIENT_VERSION):
- self._do_replication_setup()
-
def check_for_setup_error(self):
if self.client:
self.client_login()
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
+ else:
+ raise exception.VolumeBackendAPIException(
+ data=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
volume['id'], volume['provider_location'])
cl = self._create_replication_client(failover_target)
cl.recoverRemoteCopyGroupFromDisaster(
- remote_rcg_name, self.client.RC_ACTION_CHANGE_TO_PRIMARY)
+ remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY)
new_location = volume['provider_location'] + ":" + (
failover_target['id'])
"not be verified with the primary array."))
replication_targets = []
+ volume_type = self._get_volume_type(volume["volume_type_id"])
+ extra_specs = volume_type.get("extra_specs")
+ replication_mode = extra_specs.get(self.EXTRA_SPEC_REP_MODE)
+ replication_mode_num = self._get_remote_copy_mode_num(
+ replication_mode)
+
for target in self._replication_targets:
- if not allowed_names or (
- target['target_device_id'] in allowed_names):
+ if not allowed_names and replication_mode_num == (
+ target['replication_mode']) or (
+ target['target_device_id'] in allowed_names):
list_vals = {'target_device_id': target['target_device_id']}
replication_targets.append(list_vals)
'targets': replication_targets}
def _do_replication_setup(self):
+ replication_targets = []
replication_devices = self.config.replication_device
if replication_devices:
for dev in replication_devices:
- remote_array = {}
- is_managed = dev.get('managed_backend_name')
- if not is_managed:
- msg = _("Unmanaged replication is not supported at this "
- "time. Please configure cinder.conf for managed "
- "replication.")
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
-
- remote_array['managed_backend_name'] = is_managed
+ remote_array = dict(dev.items())
+ # Override and set defaults for certain entries
+ remote_array['managed_backend_name'] = (
+ dev.get('managed_backend_name'))
remote_array['replication_mode'] = (
self._get_remote_copy_mode_num(
dev.get('replication_mode')))
- remote_array['target_device_id'] = (
- dev.get('target_device_id'))
- remote_array['cpg_map'] = (
- dev.get('cpg_map'))
- remote_array['hpe3par_api_url'] = (
- dev.get('hpe3par_api_url'))
- remote_array['hpe3par_username'] = (
- dev.get('hpe3par_username'))
- remote_array['hpe3par_password'] = (
- dev.get('hpe3par_password'))
- remote_array['san_ip'] = (
- dev.get('san_ip'))
- remote_array['san_login'] = (
- dev.get('san_login'))
- remote_array['san_password'] = (
- dev.get('san_password'))
remote_array['san_ssh_port'] = (
dev.get('san_ssh_port', self.config.san_ssh_port))
remote_array['ssh_conn_timeout'] = (
dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout))
remote_array['san_private_key'] = (
dev.get('san_private_key', self.config.san_private_key))
+ # Format iscsi IPs correctly
+ iscsi_ips = dev.get('hpe3par_iscsi_ips')
+ if iscsi_ips:
+ remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ')
+ # Format hpe3par_iscsi_chap_enabled as a bool
+ remote_array['hpe3par_iscsi_chap_enabled'] = (
+ dev.get('hpe3par_iscsi_chap_enabled') == 'True')
array_name = remote_array['target_device_id']
# Make sure we can log into the client, that it has been
# correctly configured, and it its version matches the
# primary arrarys version.
+ cl = None
try:
cl = self._create_replication_client(remote_array)
array_id = six.text_type(cl.getStorageSystemInfo()['id'])
"In order to be valid, target_device_id, "
"replication_mode, "
"hpe3par_api_url, hpe3par_username, "
- "hpe3par_password, cpg_map, and "
+ "hpe3par_password, cpg_map, san_ip, "
+ "san_login, and san_password "
"must be specified. If the target is "
"managed, managed_backend_name must be set "
"as well.") % array_name)
LOG.warning(msg)
else:
- self._replication_targets.append(remote_array)
+ replication_targets.append(remote_array)
except Exception:
msg = (_LE("Could not log in to 3PAR array (%s) with the "
"provided credentials.") % array_name)
finally:
self._destroy_replication_client(cl)
+ self._replication_targets = replication_targets
if self._is_replication_configured_correct():
self._replication_enabled = True
def _is_valid_replication_array(self, target):
- for k, v in target.items():
- if v is None:
- return False
- return True
+ required_flags = ['hpe3par_api_url', 'hpe3par_username',
+ 'hpe3par_password', 'san_ip', 'san_login',
+ 'san_password', 'target_device_id',
+ 'replication_mode', 'cpg_map']
+ try:
+ self.check_replication_flags(target, required_flags)
+ return True
+ except Exception:
+ return False
def _is_replication_configured_correct(self):
rep_flag = True
ret_mode = self.PERIODIC
return ret_mode
+ def _get_3par_config(self, volume):
+ if hpe3parclient.version >= MIN_REP_CLIENT_VERSION:
+ self._do_replication_setup()
+ conf = None
+ if self._replication_enabled and volume:
+ provider_location = volume.get('provider_location')
+ if provider_location:
+ if volume.get('replication_status') == 'failed-over':
+ _, provider_location = provider_location.split(':')
+
+ for target in self._replication_targets:
+ if target['id'] == provider_location:
+ conf = target
+ break
+ self._build_3par_config(conf)
+
+ def _build_3par_config(self, conf=None):
+ """Build 3PAR client config dictionary.
+
+ self._client_conf will contain values from self.config if the volume
+ is located on the primary array in order to properly contact it. If
+ the volume has been failed over and therefore on a secondary array,
+ self._client_conf will contain values on how to contact that array.
+ The only time we will return with entries from a secondary array is
+ with unmanaged replication.
+ """
+ if conf:
+ self._client_conf['hpe3par_username'] = (
+ conf.get('hpe3par_username'))
+ self._client_conf['hpe3par_password'] = (
+ conf.get('hpe3par_password'))
+ self._client_conf['san_ip'] = conf.get('san_ip')
+ self._client_conf['san_login'] = conf.get('san_login')
+ self._client_conf['san_password'] = conf.get('san_password')
+ self._client_conf['san_ssh_port'] = conf.get('san_ssh_port')
+ self._client_conf['ssh_conn_timeout'] = (
+ conf.get('ssh_conn_timeout'))
+ self._client_conf['san_private_key'] = conf.get('san_private_key')
+ self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url')
+ self._client_conf['hpe3par_iscsi_ips'] = (
+ conf.get('hpe3par_iscsi_ips'))
+ self._client_conf['hpe3par_iscsi_chap_enabled'] = (
+ conf.get('hpe3par_iscsi_chap_enabled'))
+ self._client_conf['iscsi_ip_address'] = (
+ conf.get('iscsi_ip_address'))
+ self._client_conf['iscsi_port'] = conf.get('iscsi_port')
+ else:
+ self._client_conf['hpe3par_username'] = (
+ self.config.hpe3par_username)
+ self._client_conf['hpe3par_password'] = (
+ self.config.hpe3par_password)
+ self._client_conf['san_ip'] = self.config.san_ip
+ self._client_conf['san_login'] = self.config.san_login
+ self._client_conf['san_password'] = self.config.san_password
+ self._client_conf['san_ssh_port'] = self.config.san_ssh_port
+ self._client_conf['ssh_conn_timeout'] = (
+ self.config.ssh_conn_timeout)
+ self._client_conf['san_private_key'] = self.config.san_private_key
+ self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url
+ self._client_conf['hpe3par_iscsi_ips'] = (
+ self.config.hpe3par_iscsi_ips)
+ self._client_conf['hpe3par_iscsi_chap_enabled'] = (
+ self.config.hpe3par_iscsi_chap_enabled)
+ self._client_conf['iscsi_ip_address'] = (
+ self.config.iscsi_ip_address)
+ self._client_conf['iscsi_port'] = self.config.iscsi_port
+
def _get_cpg_from_cpg_map(self, cpg_map, target_cpg):
ret_target_cpg = None
cpg_pairs = cpg_map.split(' ')
# Do regular volume replication destroy now config mirroring is off
try:
self._do_volume_replication_destroy(volume, rcg_name)
- except Exception:
- msg = (_("The failed-over volume could not be deleted."))
+ except Exception as ex:
+ msg = (_("The failed-over volume could not be deleted: %s") %
+ six.text_type(ex))
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
finally:
3.0.0 - Rebranded HP to HPE.
3.0.1 - Remove db access for consistency groups
3.0.2 - Adds v2 managed replication support
+ 3.0.3 - Adds v2 unmanaged replication support
"""
- VERSION = "3.0.2"
+ VERSION = "3.0.3"
def __init__(self, *args, **kwargs):
super(HPE3PARFCDriver, self).__init__(*args, **kwargs)
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration)
- def _login(self, timeout=None):
+ def _login(self, volume=None, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
- common.do_setup(None, timeout=timeout)
+ common.do_setup(None, volume, timeout=timeout)
common.client_login()
except Exception:
if common._replication_enabled:
pass
def create_volume(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
return common.create_volume(volume)
finally:
self._logout(common)
def create_cloned_volume(self, volume, src_vref):
- common = self._login()
+ common = self._login(volume)
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
def delete_volume(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
common.delete_volume(volume)
finally:
TODO: support using the size from the user.
"""
- common = self._login()
+ common = self._login(volume)
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
def create_snapshot(self, snapshot):
- common = self._login()
+ common = self._login(snapshot['volume'])
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
def delete_snapshot(self, snapshot):
- common = self._login()
+ common = self._login(snapshot['volume'])
try:
common.delete_snapshot(snapshot)
finally:
* Create a VLUN for that HOST with the volume we want to export.
"""
- common = self._login()
+ common = self._login(volume)
try:
# we have to make sure we have a host
host = self._create_host(common, volume, connector)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
- common = self._login()
+ common = self._login(volume)
try:
hostname = common._safe_hostname(connector['host'])
common.terminate_connection(volume, hostname,
pass
def extend_volume(self, volume, new_size):
- common = self._login()
+ common = self._login(volume)
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
def manage_existing(self, volume, existing_ref):
- common = self._login()
+ common = self._login(volume)
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
def manage_existing_get_size(self, volume, existing_ref):
- common = self._login()
+ common = self._login(volume)
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
def unmanage(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
common.unmanage(volume)
finally:
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
- common = self._login()
+ common = self._login(volume)
try:
common.attach_volume(volume, instance_uuid)
finally:
self._logout(common)
def detach_volume(self, context, volume, attachment=None):
- common = self._login()
+ common = self._login(volume)
try:
common.detach_volume(volume, attachment)
finally:
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
- common = self._login()
+ common = self._login(volume)
try:
return common.retype(volume, new_type, diff, host)
finally:
"to a host with storage_protocol=%s.", protocol)
return False, None
- common = self._login()
+ common = self._login(volume)
try:
return common.migrate_volume(volume, host)
finally:
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
- common = self._login()
+ common = self._login(volume)
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
self._logout(common)
def get_pool(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
def replication_enable(self, context, volume):
"""Enable replication on a replication capable volume."""
- common = self._login()
+ common = self._login(volume)
try:
return common.replication_enable(context, volume)
finally:
def replication_disable(self, context, volume):
"""Disable replication on the specified volume."""
- common = self._login()
+ common = self._login(volume)
try:
return common.replication_disable(context, volume)
finally:
def replication_failover(self, context, volume, secondary):
"""Force failover to a secondary replication target."""
- common = self._login(timeout=30)
+ common = self._login(volume, timeout=30)
try:
return common.replication_failover(context, volume, secondary)
finally:
def list_replication_targets(self, context, volume):
"""Provides a means to obtain replication targets for a volume."""
- common = self._login(timeout=30)
+ common = self._login(volume, timeout=30)
try:
return common.list_replication_targets(context, volume)
finally:
3.0.2 - Remove db access for consistency groups
3.0.3 - Fix multipath dictionary key error. bug #1522062
3.0.4 - Adds v2 managed replication support
+ 3.0.5 - Adds v2 unmanaged replication support
"""
- VERSION = "3.0.4"
+ VERSION = "3.0.5"
def __init__(self, *args, **kwargs):
super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs)
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration)
- def _login(self, timeout=None):
+ def _login(self, volume=None, timeout=None):
common = self._init_common()
- common.do_setup(None, timeout=timeout)
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
+ common.do_setup(None, volume, timeout=timeout)
common.client_login()
except Exception:
if common._replication_enabled:
self._check_flags(common)
common.check_for_setup_error()
+ self.iscsi_ips = {}
common.client_login()
try:
self.initialize_iscsi_ports(common)
# map iscsi_ip-> ip_port
# -> iqn
# -> nsp
- self.iscsi_ips = {}
+ iscsi_ip_list = {}
temp_iscsi_ip = {}
# use the 3PAR ip_addr list for iSCSI configuration
- if len(self.configuration.hpe3par_iscsi_ips) > 0:
+ if len(common._client_conf['hpe3par_iscsi_ips']) > 0:
# add port values to ip_addr, if necessary
- for ip_addr in self.configuration.hpe3par_iscsi_ips:
+ for ip_addr in common._client_conf['hpe3par_iscsi_ips']:
ip = ip_addr.split(':')
if len(ip) == 1:
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
# we won't use it and won't bother to report it, see below
- if (self.configuration.iscsi_ip_address not in temp_iscsi_ip):
- ip = self.configuration.iscsi_ip_address
- ip_port = self.configuration.iscsi_port
+ if (common._client_conf['iscsi_ip_address'] not in temp_iscsi_ip):
+ ip = common._client_conf['iscsi_ip_address']
+ ip_port = common._client_conf['iscsi_port']
temp_iscsi_ip[ip] = {'ip_port': ip_port}
# get all the valid iSCSI ports from 3PAR
ip = port['IPAddr']
if ip in temp_iscsi_ip:
ip_port = temp_iscsi_ip[ip]['ip_port']
- self.iscsi_ips[ip] = {'ip_port': ip_port,
- 'nsp': port['nsp'],
- 'iqn': port['iSCSIName']
- }
+ iscsi_ip_list[ip] = {'ip_port': ip_port,
+ 'nsp': port['nsp'],
+ 'iqn': port['iSCSIName']}
del temp_iscsi_ip[ip]
# if the single value iscsi_ip_address option is still in the
# temp dictionary it's because it defaults to $my_ip which doesn't
# make sense in this context. So, if present, remove it and move on.
- if (self.configuration.iscsi_ip_address in temp_iscsi_ip):
- del temp_iscsi_ip[self.configuration.iscsi_ip_address]
+ if common._client_conf['iscsi_ip_address'] in temp_iscsi_ip:
+ del temp_iscsi_ip[common._client_conf['iscsi_ip_address']]
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
"iscsi_ip_address '%s.'"),
(", ".join(temp_iscsi_ip)))
- if not len(self.iscsi_ips) > 0:
+ if not len(iscsi_ip_list) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
+ self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
def create_volume(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
return common.create_volume(volume)
finally:
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
- common = self._login()
+ common = self._login(volume)
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
def delete_volume(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
common.delete_volume(volume)
finally:
TODO: support using the size from the user.
"""
- common = self._login()
+ common = self._login(volume)
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
def create_snapshot(self, snapshot):
- common = self._login()
+ common = self._login(snapshot['volume'])
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
def delete_snapshot(self, snapshot):
- common = self._login()
+ common = self._login(snapshot['volume'])
try:
common.delete_snapshot(snapshot)
finally:
* Create a host on the 3par
* create vlun on the 3par
"""
- common = self._login()
+ common = self._login(volume)
try:
+ # If the volume has been failed over, we need to reinitialize
+ # iSCSI ports so they represent the new array.
+ if volume.get('replication_status') == 'failed-over' and (
+ common._client_conf['hpe3par_api_url'] not in self.iscsi_ips):
+ self.initialize_iscsi_ports(common)
+
+ # Grab the correct iSCSI ports
+ iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
+
# we have to make sure we have a host
host, username, password = self._create_host(
common,
target_luns = []
# Target portal ips are defined in cinder.conf.
- target_portal_ips = self.iscsi_ips.keys()
+ target_portal_ips = iscsi_ips.keys()
# Collect all existing VLUNs for this volume/host combination.
existing_vluns = common.find_existing_vluns(volume, host)
# instead of creating a new VLUN.
for v in existing_vluns:
portPos = common.build_portPos(
- self.iscsi_ips[iscsi_ip]['nsp'])
+ iscsi_ips[iscsi_ip]['nsp'])
if v['portPos'] == portPos:
vlun = v
break
else:
vlun = common.create_vlun(
- volume, host, self.iscsi_ips[iscsi_ip]['nsp'])
+ volume, host, iscsi_ips[iscsi_ip]['nsp'])
iscsi_ip_port = "%s:%s" % (
- iscsi_ip, self.iscsi_ips[iscsi_ip]['ip_port'])
+ iscsi_ip, iscsi_ips[iscsi_ip]['ip_port'])
target_portals.append(iscsi_ip_port)
target_iqns.append(port['iSCSIName'])
target_luns.append(vlun['lun'])
if least_used_nsp is None:
LOG.warning(_LW("Least busy iSCSI port not found, "
"using first iSCSI port in list."))
- iscsi_ip = self.iscsi_ips.keys()[0]
+ iscsi_ip = iscsi_ips.keys()[0]
else:
- iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
+ iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common)
- iscsi_ip_port = self.iscsi_ips[iscsi_ip]['ip_port']
- iscsi_target_iqn = self.iscsi_ips[iscsi_ip]['iqn']
+ iscsi_ip_port = iscsi_ips[iscsi_ip]['ip_port']
+ iscsi_target_iqn = iscsi_ips[iscsi_ip]['iqn']
info = {'driver_volume_type': 'iscsi',
'data': {'target_portal': "%s:%s" %
(iscsi_ip, iscsi_ip_port),
}
}
- if self.configuration.hpe3par_iscsi_chap_enabled:
+ if common._client_conf['hpe3par_iscsi_chap_enabled']:
info['data']['auth_method'] = 'CHAP'
info['data']['auth_username'] = username
info['data']['auth_password'] = password
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
- common = self._login()
+ common = self._login(volume)
try:
hostname = common._safe_hostname(connector['host'])
common.terminate_connection(
def _set_3par_chaps(self, common, hostname, volume, username, password):
"""Sets a 3PAR host's CHAP credentials."""
- if not self.configuration.hpe3par_iscsi_chap_enabled:
+ if not common._client_conf['hpe3par_iscsi_chap_enabled']:
return
mod_request = {'chapOperation': common.client.HOST_EDIT_ADD,
domain = common.get_domain(cpg)
# Get the CHAP secret if CHAP is enabled
- if self.configuration.hpe3par_iscsi_chap_enabled:
+ if common._client_conf['hpe3par_iscsi_chap_enabled']:
vol_name = common._get_3par_vol_name(volume['id'])
username = common.client.getVolumeMetaData(
vol_name, CHAP_USER_KEY)['value']
password)
host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and
- self.configuration.hpe3par_iscsi_chap_enabled):
+ common._client_conf['hpe3par_iscsi_chap_enabled']):
LOG.warning(_LW("Host exists without CHAP credentials set and "
"has iSCSI attachments but CHAP is enabled. "
"Updating host with new CHAP credentials."))
"""Gets the associated account, generates CHAP info and updates."""
model_update = {}
- if not self.configuration.hpe3par_iscsi_chap_enabled:
+ if not common._client_conf['hpe3par_iscsi_chap_enabled']:
model_update['provider_auth'] = None
return model_update
return model_update
def create_export(self, context, volume, connector):
- common = self._login()
+ common = self._login(volume)
try:
return self._do_export(common, volume)
finally:
Also retrieves CHAP credentials, if present on the volume
"""
- common = self._login()
+ common = self._login(volume)
try:
vol_name = common._get_3par_vol_name(volume['id'])
common.client.getVolume(vol_name)
* Return NSP with fewest active vluns
"""
- iscsi_nsps = self._get_iscsi_nsps()
+ iscsi_nsps = self._get_iscsi_nsps(common)
# If there's only one path, use it
if len(iscsi_nsps) == 1:
return iscsi_nsps[0]
# Calculate the least used iscsi nsp
least_used_nsp = self._get_least_used_nsp(common,
vluns['members'],
- self._get_iscsi_nsps())
+ self._get_iscsi_nsps(common))
return least_used_nsp
- def _get_iscsi_nsps(self):
+ def _get_iscsi_nsps(self, common):
"""Return the list of candidate nsps."""
nsps = []
- for value in self.iscsi_ips.values():
+ iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
+ for value in iscsi_ips.values():
nsps.append(value['nsp'])
return nsps
- def _get_ip_using_nsp(self, nsp):
+ def _get_ip_using_nsp(self, nsp, common):
"""Return IP associated with given nsp."""
- for (key, value) in self.iscsi_ips.items():
+ iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
+ for (key, value) in iscsi_ips.items():
if value['nsp'] == nsp:
return key
return current_least_used_nsp
def extend_volume(self, volume, new_size):
- common = self._login()
+ common = self._login(volume)
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
def manage_existing(self, volume, existing_ref):
- common = self._login()
+ common = self._login(volume)
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
def manage_existing_get_size(self, volume, existing_ref):
- common = self._login()
+ common = self._login(volume)
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
def unmanage(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
common.unmanage(volume)
finally:
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
- common = self._login()
+ common = self._login(volume)
try:
common.attach_volume(volume, instance_uuid)
finally:
self._logout(common)
def detach_volume(self, context, volume, attachment=None):
- common = self._login()
+ common = self._login(volume)
try:
common.detach_volume(volume, attachment)
finally:
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
- common = self._login()
+ common = self._login(volume)
try:
return common.retype(volume, new_type, diff, host)
finally:
"to a host with storage_protocol=%s.", protocol)
return False, None
- common = self._login()
+ common = self._login(volume)
try:
return common.migrate_volume(volume, host)
finally:
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
- common = self._login()
+ common = self._login(volume)
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
self._logout(common)
def get_pool(self, volume):
- common = self._login()
+ common = self._login(volume)
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
def replication_enable(self, context, volume):
"""Enable replication on a replication capable volume."""
- common = self._login()
+ common = self._login(volume)
try:
return common.replication_enable(context, volume)
finally:
def replication_disable(self, context, volume):
"""Disable replication on the specified volume."""
- common = self._login()
+ common = self._login(volume)
try:
return common.replication_disable(context, volume)
finally:
def replication_failover(self, context, volume, secondary):
"""Force failover to a secondary replication target."""
- common = self._login(timeout=30)
+ common = self._login(volume, timeout=30)
try:
return common.replication_failover(context, volume, secondary)
finally:
def list_replication_targets(self, context, volume):
"""Provides a means to obtain replication targets for a volume."""
- common = self._login(timeout=30)
+ common = self._login(volume, timeout=30)
try:
return common.list_replication_targets(context, volume)
finally:
--- /dev/null
+---
+features:
+ - Added unmanaged v2 replication support to the HPE 3PAR driver.