HPELEFTHAND_CLUSTER_NAME = 'CloudCluster1'
VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db'
FAKE_FAILOVER_HOST = 'fakefailover@foo#destfakepool'
+REPLICATION_BACKEND_ID = 'target'
class HPELeftHandBaseDriver(object):
'replication_driver_data': ('{"location": "' + HPELEFTHAND_API_URL +
'"}')}
- repl_targets = [{'target_device_id': 'target',
+ repl_targets = [{'backend_id': 'target',
'managed_backend_name': FAKE_FAILOVER_HOST,
'hpelefthand_api_url': HPELEFTHAND_API_URL2,
'hpelefthand_username': HPELEFTHAND_USERNAME,
'cluster_id': 6,
'cluster_vip': '10.0.1.6'}]
- repl_targets_unmgd = [{'target_device_id': 'target',
+ repl_targets_unmgd = [{'backend_id': 'target',
'hpelefthand_api_url': HPELEFTHAND_API_URL2,
'hpelefthand_username': HPELEFTHAND_USERNAME,
'hpelefthand_password': HPELEFTHAND_PASSWORD,
'cluster_id': 6,
'cluster_vip': '10.0.1.6'}]
- list_rep_targets = [{'target_device_id': 'target'}]
+ list_rep_targets = [{'backend_id': REPLICATION_BACKEND_ID}]
serverName = 'fakehost'
server_id = 0
self.assertEqual('deleting', cgsnap['status'])
@mock.patch.object(volume_types, 'get_volume_type')
- def test_create_volume_replicated_managed(self, _mock_get_volume_type):
- # set up driver with default config
- conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
- mock_client = self.setup_driver(config=conf)
- mock_client.createVolume.return_value = {
- 'iscsiIqn': self.connector['initiator']}
- mock_client.doesRemoteSnapshotScheduleExist.return_value = False
- mock_replicated_client = self.setup_driver(config=conf)
-
- _mock_get_volume_type.return_value = {
- 'name': 'replicated',
- 'extra_specs': {
- 'replication_enabled': '<is> True'}}
-
- with mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_client') as mock_do_setup, \
- mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_replication_client') as mock_replication_client:
- mock_do_setup.return_value = mock_client
- mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.create_volume(self.volume_replicated)
-
- expected = [
- mock.call.createVolume(
- 'fakevolume_replicated',
- 1,
- units.Gi,
- {'isThinProvisioned': True,
- 'clusterName': 'CloudCluster1'}),
- mock.call.doesRemoteSnapshotScheduleExist(
- 'fakevolume_replicated_SCHED_Pri'),
- mock.call.createRemoteSnapshotSchedule(
- 'fakevolume_replicated',
- 'fakevolume_replicated_SCHED',
- 1800,
- '1970-01-01T00:00:00Z',
- 5,
- 'CloudCluster1',
- 5,
- 'fakevolume_replicated',
- '1.1.1.1',
- 'foo1',
- 'bar2'),
- mock.call.logout()]
-
- mock_client.assert_has_calls(
- self.driver_startup_call_stack +
- expected)
- prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0'
- rep_data = json.dumps({"location": HPELEFTHAND_API_URL})
- self.assertEqual({'replication_status': 'enabled',
- 'replication_driver_data': rep_data,
- 'provider_location': prov_location},
- return_model)
-
- @mock.patch.object(volume_types, 'get_volume_type')
- def test_create_volume_replicated_unmanaged(self, _mock_get_volume_type):
+ def test_create_volume_replicated(self, _mock_get_volume_type):
# set up driver with default config
conf = self.default_mock_conf()
conf.replication_device = self.repl_targets_unmgd
expected)
@mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_enable_no_snapshot_schedule(self,
- _mock_get_volume_type):
- # set up driver with default config
- conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
- mock_client = self.setup_driver(config=conf)
- mock_client.doesRemoteSnapshotScheduleExist.return_value = False
- mock_replicated_client = self.setup_driver(config=conf)
-
- _mock_get_volume_type.return_value = {
- 'name': 'replicated',
- 'extra_specs': {
- 'replication_enabled': '<is> True'}}
-
- with mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_client') as mock_do_setup, \
- mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_replication_client') as mock_replication_client:
- mock_do_setup.return_value = mock_client
- mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.replication_enable(
- context.get_admin_context(),
- self.volume_replicated)
-
- expected = [
- mock.call.doesRemoteSnapshotScheduleExist(
- 'fakevolume_replicated_SCHED_Pri'),
- mock.call.createRemoteSnapshotSchedule(
- 'fakevolume_replicated',
- 'fakevolume_replicated_SCHED',
- 1800,
- '1970-01-01T00:00:00Z',
- 5,
- 'CloudCluster1',
- 5,
- 'fakevolume_replicated',
- '1.1.1.1',
- 'foo1',
- 'bar2')]
- mock_client.assert_has_calls(
- self.driver_startup_call_stack +
- expected)
-
- self.assertEqual({'replication_status': 'enabled'},
- return_model)
-
- @mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_enable_with_snapshot_schedule(self,
- _mock_get_volume_type):
- # set up driver with default config
- conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
- mock_client = self.setup_driver(config=conf)
- mock_client.doesRemoteSnapshotScheduleExist.return_value = True
- mock_replicated_client = self.setup_driver(config=conf)
-
- _mock_get_volume_type.return_value = {
- 'name': 'replicated',
- 'extra_specs': {
- 'replication_enabled': '<is> True'}}
-
- with mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_client') as mock_do_setup, \
- mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_replication_client') as mock_replication_client:
- mock_do_setup.return_value = mock_client
- mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.replication_enable(
- context.get_admin_context(),
- self.volume_replicated)
-
- expected = [
- mock.call.doesRemoteSnapshotScheduleExist(
- 'fakevolume_replicated_SCHED_Pri'),
- mock.call.startRemoteSnapshotSchedule(
- 'fakevolume_replicated_SCHED_Pri')]
- mock_client.assert_has_calls(
- self.driver_startup_call_stack +
- expected)
-
- self.assertEqual({'replication_status': 'enabled'},
- return_model)
-
- @mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_disable(self, _mock_get_volume_type):
- # set up driver with default config
- conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
- mock_client = self.setup_driver(config=conf)
- mock_replicated_client = self.setup_driver(config=conf)
-
- _mock_get_volume_type.return_value = {
- 'name': 'replicated',
- 'extra_specs': {
- 'replication_enabled': '<is> True'}}
-
- with mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_client') as mock_do_setup, \
- mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_replication_client') as mock_replication_client:
- mock_do_setup.return_value = mock_client
- mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.replication_disable(
- context.get_admin_context(),
- self.volume_replicated)
-
- expected = [
- mock.call.stopRemoteSnapshotSchedule(
- 'fakevolume_replicated_SCHED_Pri')]
- mock_client.assert_has_calls(
- self.driver_startup_call_stack +
- expected)
-
- self.assertEqual({'replication_status': 'disabled'},
- return_model)
-
- @mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_disable_fail(self, _mock_get_volume_type):
+ def test_failover_host(self, _mock_get_volume_type):
+ ctxt = context.get_admin_context()
# set up driver with default config
conf = self.default_mock_conf()
conf.replication_device = self.repl_targets
mock_client = self.setup_driver(config=conf)
- mock_client.stopRemoteSnapshotSchedule.side_effect = (
- Exception("Error: Could not stop remote snapshot schedule."))
mock_replicated_client = self.setup_driver(config=conf)
+ mock_replicated_client.getVolumeByName.return_value = {
+ 'iscsiIqn': self.connector['initiator']}
_mock_get_volume_type.return_value = {
'name': 'replicated',
'_create_replication_client') as mock_replication_client:
mock_do_setup.return_value = mock_client
mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.replication_disable(
- context.get_admin_context(),
- self.volume_replicated)
-
- expected = [
- mock.call.stopRemoteSnapshotSchedule(
- 'fakevolume_replicated_SCHED_Pri')]
- mock_client.assert_has_calls(
- self.driver_startup_call_stack +
- expected)
-
- self.assertEqual({'replication_status': 'disable_failed'},
- return_model)
+ invalid_backend_id = 'INVALID'
- @mock.patch.object(volume_types, 'get_volume_type')
- def test_list_replication_targets(self, _mock_get_volume_type):
- # set up driver with default config
- conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
- mock_client = self.setup_driver(config=conf)
- mock_replicated_client = self.setup_driver(config=conf)
-
- _mock_get_volume_type.return_value = {
- 'name': 'replicated',
- 'extra_specs': {
- 'replication_enabled': '<is> True'}}
+ # Test invalid secondary target.
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver.failover_host,
+ ctxt,
+ [self.volume_replicated],
+ invalid_backend_id)
- with mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_client') as mock_do_setup, \
- mock.patch.object(
- hpe_lefthand_iscsi.HPELeftHandISCSIDriver,
- '_create_replication_client') as mock_replication_client:
- mock_do_setup.return_value = mock_client
- mock_replication_client.return_value = mock_replicated_client
- return_model = self.driver.list_replication_targets(
+ # Test a successful failover.
+ return_model = self.driver.failover_host(
context.get_admin_context(),
- self.volume_replicated)
-
- targets = self.list_rep_targets
- self.assertEqual({'volume_id': 1,
- 'targets': targets},
- return_model)
+ [self.volume_replicated],
+ REPLICATION_BACKEND_ID)
+ prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0'
+ expected_model = (REPLICATION_BACKEND_ID,
+ [{'updates': {'replication_status':
+ 'failed-over',
+ 'provider_location':
+ prov_location},
+ 'volume_id': 1}])
+ self.assertEqual(expected_model, return_model)
@mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_failover_managed(self, _mock_get_volume_type):
- ctxt = context.get_admin_context()
+ def test_replication_failback_host_ready(self, _mock_get_volume_type):
# set up driver with default config
conf = self.default_mock_conf()
- conf.replication_device = self.repl_targets
+ conf.replication_device = self.repl_targets_unmgd
mock_client = self.setup_driver(config=conf)
mock_replicated_client = self.setup_driver(config=conf)
mock_replicated_client.getVolumeByName.return_value = {
- 'iscsiIqn': self.connector['initiator']}
+ 'iscsiIqn': self.connector['initiator'],
+ 'isPrimary': True}
+ mock_replicated_client.getRemoteSnapshotSchedule.return_value = (
+ ['',
+ 'HP StoreVirtual LeftHand OS Command Line Interface',
+ '(C) Copyright 2007-2016',
+ '',
+ 'RESPONSE',
+ ' result 0',
+ ' period 1800',
+ ' paused false'])
_mock_get_volume_type.return_value = {
'name': 'replicated',
'_create_replication_client') as mock_replication_client:
mock_do_setup.return_value = mock_client
mock_replication_client.return_value = mock_replicated_client
- valid_target_device_id = (self.repl_targets[0]['target_device_id'])
- invalid_target_device_id = 'INVALID'
- # test invalid secondary target
- self.assertRaises(
- exception.VolumeBackendAPIException,
- self.driver.replication_failover,
- ctxt,
- self.volume_replicated,
- invalid_target_device_id)
-
- # test a successful failover
- return_model = self.driver.replication_failover(
+ volume = self.volume_replicated.copy()
+ rep_data = json.dumps({"primary_config_group": "failover_group"})
+ volume['replication_driver_data'] = rep_data
+ return_model = self.driver.failover_host(
context.get_admin_context(),
- self.volume_replicated,
- valid_target_device_id)
- rep_data = json.dumps({"location": HPELEFTHAND_API_URL2})
+ [volume],
+ 'default')
prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0'
- self.assertEqual({'provider_location': prov_location,
- 'replication_driver_data': rep_data,
- 'host': FAKE_FAILOVER_HOST},
- return_model)
+ expected_model = (None,
+ [{'updates': {'replication_status':
+ 'available',
+ 'provider_location':
+ prov_location},
+ 'volume_id': 1}])
+ self.assertEqual(expected_model, return_model)
@mock.patch.object(volume_types, 'get_volume_type')
- def test_replication_failover_unmanaged(self, _mock_get_volume_type):
- ctxt = context.get_admin_context()
+ def test_replication_failback_host_not_ready(self,
+ _mock_get_volume_type):
# set up driver with default config
conf = self.default_mock_conf()
conf.replication_device = self.repl_targets_unmgd
mock_client = self.setup_driver(config=conf)
mock_replicated_client = self.setup_driver(config=conf)
mock_replicated_client.getVolumeByName.return_value = {
- 'iscsiIqn': self.connector['initiator']}
+ 'iscsiIqn': self.connector['initiator'],
+ 'isPrimary': False}
+ mock_replicated_client.getRemoteSnapshotSchedule.return_value = (
+ ['',
+ 'HP StoreVirtual LeftHand OS Command Line Interface',
+ '(C) Copyright 2007-2016',
+ '',
+ 'RESPONSE',
+ ' result 0',
+ ' period 1800',
+ ' paused true'])
_mock_get_volume_type.return_value = {
'name': 'replicated',
'_create_replication_client') as mock_replication_client:
mock_do_setup.return_value = mock_client
mock_replication_client.return_value = mock_replicated_client
- valid_target_device_id = (self.repl_targets[0]['target_device_id'])
- invalid_target_device_id = 'INVALID'
- # test invalid secondary target
+ volume = self.volume_replicated.copy()
self.assertRaises(
- exception.VolumeBackendAPIException,
- self.driver.replication_failover,
- ctxt,
- self.volume_replicated,
- invalid_target_device_id)
-
- # test a successful failover
- return_model = self.driver.replication_failover(
+ exception.VolumeDriverException,
+ self.driver.failover_host,
context.get_admin_context(),
- self.volume_replicated,
- valid_target_device_id)
- rep_data = json.dumps({"location": HPELEFTHAND_API_URL2})
- prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0'
- self.assertEqual({'provider_location': prov_location,
- 'replication_driver_data': rep_data},
- return_model)
+ [volume],
+ 'default')
2.0.3 - Adds v2 unmanaged replication support
2.0.4 - Add manage/unmanage snapshot support
2.0.5 - Changed minimum client version to be 2.1.0
+ 2.0.6 - Update replication to version 2.1
"""
- VERSION = "2.0.5"
+ VERSION = "2.0.6"
device_stats = {}
MAX_REMOTE_RETENTION_COUNT = 50
REP_SNAPSHOT_SUFFIX = "_SS"
REP_SCHEDULE_SUFFIX = "_SCHED"
+ FAILBACK_VALUE = 'default'
def __init__(self, *args, **kwargs):
super(HPELeftHandISCSIDriver, self).__init__(*args, **kwargs)
self._client_conf = {}
self._replication_targets = []
self._replication_enabled = False
+ self._active_backend_id = kwargs.get('active_backend_id', None)
- def _login(self, volume=None, timeout=None):
- conf = self._get_lefthand_config(volume)
+ def _login(self, timeout=None):
+ conf = self._get_lefthand_config()
if conf:
self._client_conf['hpelefthand_username'] = (
conf['hpelefthand_username'])
def create_volume(self, volume):
"""Creates a volume."""
- client = self._login(volume)
+ client = self._login()
try:
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self._get_volume_extra_specs(volume)
def delete_volume(self, volume):
"""Deletes a volume."""
- client = self._login(volume)
+ client = self._login()
# v2 replication check
# If the volume type is replication enabled, we want to call our own
# method of deconstructing the volume and its dependencies
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
- client = self._login(snapshot['volume'])
+ client = self._login()
try:
volume_info = client.getVolumeByName(snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
- client = self._login(snapshot['volume'])
+ client = self._login()
try:
snap_info = client.getSnapshotByName(snapshot['name'])
client.deleteSnapshot(snap_info['id'])
data['replication_enabled'] = self._replication_enabled
data['replication_type'] = ['periodic']
data['replication_count'] = len(self._replication_targets)
+ data['replication_targets'] = self._get_replication_targets()
self.device_stats = data
used from that host. HPE VSA requires a volume to be assigned
to a server.
"""
- client = self._login(volume)
+ client = self._login()
try:
server_info = self._create_server(connector, client)
volume_info = client.getVolumeByName(volume['name'])
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
server_info = client.getServerByName(connector['host'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
- client = self._login(volume)
+ client = self._login()
try:
snap_info = client.getSnapshotByName(snapshot['name'])
volume_info = client.cloneSnapshot(
self._logout(client)
def create_cloned_volume(self, volume, src_vref):
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(src_vref['name'])
clone_info = client.cloneVolume(volume['name'], volume_info['id'])
'new_type': new_type,
'diff': diff,
'host': host})
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
host_location = host['capabilities']['location_info']
(driver, cluster, vip) = host_location.split(' ')
- client = self._login(volume)
+ client = self._login()
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'cluster=%(cluster)s', {
'id': volume['id'],
# volume isn't attached and can be updated
original_name = CONF.volume_name_template % volume['id']
current_name = CONF.volume_name_template % new_volume['id']
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(current_name)
volumeMods = {'name': original_name}
target_vol_name = self._get_existing_volume_ref_name(existing_ref)
# Check for the existence of the virtual volume.
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(target_vol_name)
except hpeexceptions.HTTPNotFound:
reason=reason)
# Check for the existence of the virtual volume.
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(target_vol_name)
except hpeexceptions.HTTPNotFound:
# Rename the volume's name to unm-* format so that it can be
# easily found later.
- client = self._login(volume)
+ client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
new_vol_name = 'unm-' + six.text_type(volume['id'])
return volume_types.get_volume_type(ctxt, type_id)
# v2 replication methods
- def replication_enable(self, context, volume):
- """Enable replication on a replication capable volume."""
- model_update = {}
- # If replication is not enabled and the volume is of replicated type,
- # we treat this as an error.
- if not self._replication_enabled:
- msg = _LE("Enabling replication failed because replication is "
- "not properly configured.")
- LOG.error(msg)
- model_update['replication_status'] = "error"
+ def failover_host(self, context, volumes, secondary_backend_id):
+ """Force failover to a secondary replication target."""
+ if secondary_backend_id == self.FAILBACK_VALUE:
+ volume_update_list = self._replication_failback(volumes)
+ target_id = None
else:
- client = self._login(volume)
- try:
- if self._do_volume_replication_setup(volume, client):
- model_update['replication_status'] = "enabled"
- else:
- model_update['replication_status'] = "error"
- finally:
- self._logout(client)
-
- return model_update
-
- def replication_disable(self, context, volume):
- """Disable replication on the specified volume."""
- model_update = {}
- # If replication is not enabled and the volume is of replicated type,
- # we treat this as an error.
- if self._replication_enabled:
- model_update['replication_status'] = 'disabled'
- vol_name = volume['name']
-
- client = self._login(volume)
- try:
- name = vol_name + self.REP_SCHEDULE_SUFFIX + "_Pri"
- client.stopRemoteSnapshotSchedule(name)
- except Exception as ex:
- msg = (_LE("There was a problem disabling replication on "
- "volume '%(name)s': %(error)s") %
- {'name': vol_name,
- 'error': six.text_type(ex)})
+ failover_target = None
+ for target in self._replication_targets:
+ if target['backend_id'] == secondary_backend_id:
+ failover_target = target
+ break
+ if not failover_target:
+ msg = _("A valid secondary target MUST be specified in order "
+ "to failover.")
LOG.error(msg)
- model_update['replication_status'] = 'disable_failed'
- finally:
- self._logout(client)
- else:
- msg = _LE("Disabling replication failed because replication is "
- "not properly configured.")
- LOG.error(msg)
- model_update['replication_status'] = 'error'
-
- return model_update
-
- def replication_failover(self, context, volume, secondary):
- """Force failover to a secondary replication target."""
- failover_target = None
- for target in self._replication_targets:
- if target['target_device_id'] == secondary:
- failover_target = target
- break
-
- if not failover_target:
- msg = _("A valid secondary target MUST be specified in order "
- "to failover.")
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
-
- # Try and stop the remote snapshot schedule. If the priamry array is
- # down, we will continue with the failover.
- client = None
- try:
- client = self._login(timeout=30)
- name = volume['name'] + self.REP_SCHEDULE_SUFFIX + "_Pri"
- client.stopRemoteSnapshotSchedule(name)
- except Exception:
- LOG.warning(_LW("The primary array is currently offline, remote "
- "copy has been automatically paused."))
- pass
- finally:
- self._logout(client)
-
- # Update provider location to the new array.
- cl = None
- model_update = {}
- try:
- cl = self._create_replication_client(failover_target)
- # Make the volume primary so it can be attached after a fail-over.
- cl.makeVolumePrimary(volume['name'])
- # Stop snapshot schedule
- try:
- name = volume['name'] + self.REP_SCHEDULE_SUFFIX + "_Rmt"
- cl.stopRemoteSnapshotSchedule(name)
- except Exception:
- pass
- # Update the provider info for a proper fail-over.
- volume_info = cl.getVolumeByName(volume['name'])
- model_update = self._update_provider(
- volume_info, cluster_vip=failover_target['cluster_vip'])
- except Exception as ex:
- msg = (_("The fail-over was unsuccessful: %s") %
- six.text_type(ex))
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
- finally:
- self._destroy_replication_client(cl)
-
- rep_data = json.loads(volume['replication_driver_data'])
- rep_data['location'] = failover_target['hpelefthand_api_url']
- replication_driver_data = json.dumps(rep_data)
- model_update['replication_driver_data'] = replication_driver_data
- if failover_target['managed_backend_name']:
- # We want to update the volumes host if our target is managed.
- model_update['host'] = failover_target['managed_backend_name']
-
- return model_update
+ raise exception.VolumeBackendAPIException(data=msg)
- def list_replication_targets(self, context, volume):
- """Provides a means to obtain replication targets for a volume."""
- client = None
- try:
- client = self._login(timeout=30)
- except Exception:
- pass
- finally:
- self._logout(client)
+ target_id = failover_target['backend_id']
+ self._active_backend_id = target_id
+ volume_update_list = []
+ for volume in volumes:
+ if self._volume_of_replicated_type(volume):
+ # Try and stop the remote snapshot schedule. If the primary
+ # array is down, we will continue with the failover.
+ client = None
+ try:
+ client = self._login(timeout=30)
+ name = volume['name'] + self.REP_SCHEDULE_SUFFIX + (
+ "_Pri")
+ client.stopRemoteSnapshotSchedule(name)
+ except Exception:
+ LOG.warning(_LW("The primary array is currently "
+ "offline, remote copy has been "
+ "automatically paused."))
+ finally:
+ self._logout(client)
- replication_targets = []
- for target in self._replication_targets:
- list_vals = {}
- list_vals['target_device_id'] = (
- target.get('target_device_id'))
- replication_targets.append(list_vals)
+ # Update provider location to the new array.
+ cl = None
+ try:
+ cl = self._create_replication_client(failover_target)
+ # Make the volume primary so it can be attached after a
+ # fail-over.
+ cl.makeVolumePrimary(volume['name'])
+ # Stop snapshot schedule
+ try:
+ name = volume['name'] + (
+ self.REP_SCHEDULE_SUFFIX + "_Rmt")
+ cl.stopRemoteSnapshotSchedule(name)
+ except Exception:
+ pass
+ # Make the volume primary so it can be attached after a
+ # fail-over.
+ cl.makeVolumePrimary(volume['name'])
+
+ # Update the provider info for a proper fail-over.
+ volume_info = cl.getVolumeByName(volume['name'])
+ prov_location = self._update_provider(
+ volume_info,
+ cluster_vip=failover_target['cluster_vip'])
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'replication_status': 'failed-over',
+ 'provider_location':
+ prov_location['provider_location']}})
+ except Exception as ex:
+ msg = (_LE("There was a problem with the failover "
+ "(%(error)s) and it was unsuccessful. "
+ "Volume '%(volume)s will not be available "
+ "on the failed over target."),
+ {'error': six.text_type(ex),
+ 'volume': volume['id']})
+ LOG.error(msg)
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'replication_status': 'error'}})
+ finally:
+ self._destroy_replication_client(cl)
+ else:
+ # If the volume is not of replicated type, we need to
+ # force the status into error state so a user knows they
+ # do not have access to the volume.
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'status': 'error'}})
- return {'volume_id': volume['id'],
- 'targets': replication_targets}
+ return target_id, volume_update_list
def _do_replication_setup(self):
default_san_ssh_port = self.configuration.hpelefthand_ssh_port
dev.get('hpelefthand_iscsi_chap_enabled') == 'True')
remote_array['cluster_id'] = None
remote_array['cluster_vip'] = None
- array_name = remote_array['target_device_id']
+ array_name = remote_array['backend_id']
# Make sure we can log into the array, that it has been
# correctly configured, and its API version meets the
LOG.warning(msg)
elif not self._is_valid_replication_array(remote_array):
msg = (_LW("'%s' is not a valid replication array. "
- "In order to be valid, target_device_id, "
+ "In order to be valid, backend_id, "
"hpelefthand_api_url, "
"hpelefthand_username, "
"hpelefthand_password, and "
if self._is_replication_configured_correct():
self._replication_enabled = True
+ def _replication_failback(self, volumes):
+ array_config = {'hpelefthand_api_url':
+ self.configuration.hpelefthand_api_url,
+ 'hpelefthand_username':
+ self.configuration.hpelefthand_username,
+ 'hpelefthand_password':
+ self.configuration.hpelefthand_password,
+ 'hpelefthand_ssh_port':
+ self.configuration.hpelefthand_ssh_port}
+
+ # Make sure the proper steps on the backend have been completed before
+ # we allow a failback.
+ if not self._is_host_ready_for_failback(volumes, array_config):
+ msg = _("The host is not ready to be failed back. Please "
+ "resynchronize the volumes and resume replication on the "
+ "LeftHand backends.")
+ LOG.error(msg)
+ raise exception.VolumeDriverException(data=msg)
+
+ cl = None
+ volume_update_list = []
+ for volume in volumes:
+ if self._volume_of_replicated_type(volume):
+ try:
+ cl = self._create_replication_client(array_config)
+ # Update the provider info for a proper fail-back.
+ volume_info = cl.getVolumeByName(volume['name'])
+ cluster_info = cl.getClusterByName(
+ self.configuration.hpelefthand_clustername)
+ virtual_ips = cluster_info['virtualIPAddresses']
+ cluster_vip = virtual_ips[0]['ipV4Address']
+ provider_location = self._update_provider(
+ volume_info, cluster_vip=cluster_vip)
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'replication_status': 'available',
+ 'provider_location':
+ provider_location['provider_location']}})
+ except Exception as ex:
+ # The secondary array was not able to execute the fail-back
+ # properly. The replication status is now in an unknown
+ # state, so we will treat it as an error.
+ msg = (_LE("There was a problem with the failover "
+ "(%(error)s) and it was unsuccessful. "
+ "Volume '%(volume)s will not be available "
+ "on the failed over target."),
+ {'error': six.text_type(ex),
+ 'volume': volume['id']})
+ LOG.error(msg)
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'replication_status': 'error'}})
+ finally:
+ self._destroy_replication_client(cl)
+ else:
+ # Upon failing back, we can move the non-replicated volumes
+ # back into available state.
+ volume_update_list.append(
+ {'volume_id': volume['id'],
+ 'updates': {'status': 'available'}})
+
+ return volume_update_list
+
+ def _is_host_ready_for_failback(self, volumes, array_config):
+ """Checks to make sure the volumes have been synchronized
+
+ This entails ensuring the remote snapshot schedule has been resumed
+ on the backends and the secondary volume's data has been copied back
+ to the primary.
+ """
+ is_ready = True
+ cl = None
+ try:
+ for volume in volumes:
+ if self._volume_of_replicated_type(volume):
+ schedule_name = volume['name'] + (
+ self.REP_SCHEDULE_SUFFIX + "_Pri")
+ cl = self._create_replication_client(array_config)
+ schedule = cl.getRemoteSnapshotSchedule(schedule_name)
+ schedule = ''.join(schedule)
+ # We need to check the status of the schedule to make sure
+ # it is not paused.
+ result = re.search(".*paused\s+(\w+)", schedule)
+ is_schedule_active = result.group(1) == 'false'
+
+ volume_info = cl.getVolumeByName(volume['name'])
+ if not volume_info['isPrimary'] or not is_schedule_active:
+ is_ready = False
+ break
+ except Exception as ex:
+ LOG.error(_LW("There was a problem when trying to determine if "
+ "the volume can be failed-back: %s") %
+ six.text_type(ex))
+ is_ready = False
+ finally:
+ self._destroy_replication_client(cl)
+
+ return is_ready
+
+ def _get_replication_targets(self):
+ replication_targets = []
+ for target in self._replication_targets:
+ replication_targets.append(target['backend_id'])
+
+ return replication_targets
+
def _is_valid_replication_array(self, target):
required_flags = ['hpelefthand_api_url', 'hpelefthand_username',
- 'hpelefthand_password', 'target_device_id',
+ 'hpelefthand_password', 'backend_id',
'hpelefthand_clustername']
try:
self.check_replication_flags(target, required_flags)
exists = False
return exists
- def _get_lefthand_config(self, volume):
+ def _get_lefthand_config(self):
conf = None
- if volume:
- rep_location = None
- rep_data = volume.get('replication_driver_data')
- if rep_data:
- rep_data = json.loads(rep_data)
- rep_location = rep_data.get('location')
- if rep_location:
- for target in self._replication_targets:
- if target['hpelefthand_api_url'] == rep_location:
- conf = target
- break
+ for target in self._replication_targets:
+ if target['backend_id'] == self._active_backend_id:
+ conf = target
+ break
return conf