'vendor_name': 'Dell',
'storage_protocol': 'FC'}
+ # Start with none. Add in the specific tests later.
+ # Mock tests bozo this.
+ self.driver.backends = None
+ self.driver.replication_enabled = False
+
self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d'
self.volume_name = "volume" + self.volid
self.connector = {'ip': '192.168.0.77',
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
-from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.volume.drivers.dell import dell_storagecenter_iscsi
from cinder.volume import volume_types
'vendor_name': 'Dell',
'storage_protocol': 'iSCSI'}
+ # Start with none. Add in the specific tests later.
+ # Mock tests bozo this.
+ self.driver.backends = None
+ self.driver.replication_enabled = False
+
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
self.connector = {
# self.configuration.eqlx_chap_password)
}
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_get_volume_extra_specs')
+ def test__create_replications(self,
+ mock_get_volume_extra_specs,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ mock_get_volume_extra_specs.return_value = {
+ 'replication_enabled': '<is> True'}
+ model_update = {'replication_status': 'enabled',
+ 'replication_driver_data': '12345,67890'}
+
+ vol = {'id': 'guid', 'replication_driver_data': ''}
+ scvol = {'name': 'guid'}
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'otherqos'}]
+ mock_api = mock.MagicMock()
+ mock_api.create_replication = mock.MagicMock(
+ return_value={'instanceId': '1'})
+ # Create regular replication test.
+ res = self.driver._create_replications(mock_api, vol, scvol)
+ mock_api.create_replication.assert_any_call(
+ scvol, '12345', 'cinderqos', False, None, False)
+ mock_api.create_replication.assert_any_call(
+ scvol, '67890', 'otherqos', False, None, False)
+ self.assertEqual(model_update, res)
+ # Create replication with activereplay set.
+ mock_get_volume_extra_specs.return_value = {
+ 'replication:activereplay': '<is> True',
+ 'replication_enabled': '<is> True'}
+ res = self.driver._create_replications(mock_api, vol, scvol)
+ mock_api.create_replication.assert_any_call(
+ scvol, '12345', 'cinderqos', False, None, True)
+ mock_api.create_replication.assert_any_call(
+ scvol, '67890', 'otherqos', False, None, True)
+ self.assertEqual(model_update, res)
+ # Create replication with sync set.
+ mock_get_volume_extra_specs.return_value = {
+ 'replication:activereplay': '<is> True',
+ 'replication_enabled': '<is> True',
+ 'replication_type': '<in> sync'}
+ res = self.driver._create_replications(mock_api, vol, scvol)
+ mock_api.create_replication.assert_any_call(
+ scvol, '12345', 'cinderqos', True, None, True)
+ mock_api.create_replication.assert_any_call(
+ scvol, '67890', 'otherqos', True, None, True)
+ self.assertEqual(model_update, res)
+ # Create replication with disk folder set.
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos',
+ 'diskfolder': 'ssd'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'otherqos',
+ 'diskfolder': 'ssd'}]
+ mock_get_volume_extra_specs.return_value = {
+ 'replication:activereplay': '<is> True',
+ 'replication_enabled': '<is> True',
+ 'replication_type': '<in> sync'}
+ res = self.driver._create_replications(mock_api, vol, scvol)
+ mock_api.create_replication.assert_any_call(
+ scvol, '12345', 'cinderqos', True, 'ssd', True)
+ mock_api.create_replication.assert_any_call(
+ scvol, '67890', 'otherqos', True, 'ssd', True)
+ self.assertEqual(model_update, res)
+ # Failed to create replication test.
+ mock_api.create_replication.return_value = None
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver._create_replications,
+ mock_api,
+ vol,
+ scvol)
+ # Replication not enabled test
+ mock_get_volume_extra_specs.return_value = {}
+ res = self.driver._create_replications(mock_api, vol, scvol)
+ self.assertEqual({}, res)
+ self.driver.backends = backends
+
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_get_volume_extra_specs')
+ def test__delete_replications(self,
+ mock_get_volume_extra_specs,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ vol = {'id': 'guid'}
+ scvol = {'instanceId': '1'}
+ mock_api = mock.MagicMock()
+ mock_api.delete_replication = mock.MagicMock()
+ mock_api.find_volume = mock.MagicMock(return_value=scvol)
+ # Start replication disabled. Should fail immediately.
+ mock_get_volume_extra_specs.return_value = {}
+ self.driver._delete_replications(mock_api, vol)
+ self.assertFalse(mock_api.delete_replication.called)
+ # Replication enabled. No replications listed.
+ mock_get_volume_extra_specs.return_value = {
+ 'replication_enabled': '<is> True'}
+ vol = {'id': 'guid', 'replication_driver_data': ''}
+ self.driver._delete_replications(mock_api, vol)
+ self.assertFalse(mock_api.delete_replication.called)
+ # Something to call.
+ vol = {'id': 'guid', 'replication_driver_data': '12345,67890'}
+ self.driver._delete_replications(mock_api, vol)
+ mock_api.delete_replication.assert_any_call(scvol, 12345)
+ mock_api.delete_replication.assert_any_call(scvol, 67890)
+ self.driver.backends = backends
+
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
+ None,
None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
+ None,
None)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
- "HighPriority")
+ "HighPriority",
+ None)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=12345)
+ @mock.patch.object(
+ volume_types,
+ 'get_volume_type_extra_specs',
+ return_value={'storagetype:replayprofiles': 'Daily'})
+ def test_create_volume_replay_profiles(self,
+ mock_extra,
+ mock_find_sc,
+ mock_create_volume,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'}
+ self.driver.create_volume(volume)
+ mock_create_volume.assert_called_once_with(self.volume_name,
+ 1,
+ None,
+ 'Daily')
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications',
+ return_value={'replication_status': 'enabled',
+ 'replication_driver_data': 'ssn'})
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=12345)
+ def test_create_volume_replication(self,
+ mock_find_sc,
+ mock_create_replications,
+ mock_create_volume,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ volume = {'id': self.volume_name, 'size': 1}
+ ret = self.driver.create_volume(volume)
+ self.assertEqual({'replication_status': 'enabled',
+ 'replication_driver_data': 'ssn'}, ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'delete_volume')
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=12345)
+ def test_create_volume_replication_raises(self,
+ mock_find_sc,
+ mock_create_replications,
+ mock_delete_volume,
+ mock_create_volume,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ volume = {'id': self.volume_name, 'size': 1}
+ mock_create_replications.side_effect = (
+ exception.VolumeBackendAPIException(data='abc'))
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume,
+ volume)
+ self.assertTrue(mock_delete_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_delete_replications')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=True)
def test_delete_volume(self,
mock_find_sc,
mock_delete_volume,
+ mock_delete_replications,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.delete_volume(volume)
mock_delete_volume.assert_called_once_with(self.volume_name)
+ self.assertTrue(mock_delete_replications.called)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_delete_replications')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=False)
def test_delete_volume_failure(self,
mock_find_sc,
mock_delete_volume,
+ mock_delete_replications,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume,
volume)
+ self.assertTrue(mock_delete_replications.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
- assert mock_find_volume.call_count == 2
+ self.assertEqual(2, mock_find_volume.call_count)
expected = {'data': self.ISCSI_PROPERTIES,
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
- assert mock_find_volume.call_count == 2
+ self.assertEqual(2, mock_find_volume.call_count)
props = self.ISCSI_PROPERTIES
expected = {'data': props,
'driver_volume_type': 'iscsi'}
self.driver.create_snapshot,
snapshot)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
mock_find_volume,
mock_find_sc,
mock_find_replay_profile,
+ mock_create_replications,
mock_close_connection,
mock_open_connection,
mock_init):
+ model_update = {'something': 'something'}
+ mock_create_replications.return_value = model_update
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
- self.driver.create_volume_from_snapshot(volume, snapshot)
+ res = self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
- 'fake')
+ 'fake',
+ None)
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertFalse(mock_find_replay_profile.called)
+ # This just makes sure that we created
+ self.assertTrue(mock_create_replications.called)
+ self.assertEqual(model_update, res)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
mock_find_sc,
mock_update_cg_volumes,
mock_find_replay_profile,
+ mock_create_replications,
mock_close_connection,
mock_open_connection,
mock_init):
+ model_update = {'something': 'something'}
+ mock_create_replications.return_value = model_update
volume = {'id': 'fake', 'consistencygroup_id': 'guid'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
- self.driver.create_volume_from_snapshot(volume, snapshot)
+ res = self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
- 'fake')
+ 'fake',
+ None)
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
+ # This just makes sure that we created
+ self.assertTrue(mock_create_replications.called)
+ self.assertEqual(model_update, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=None)
def test_create_volume_from_snapshot_failed(self,
mock_create_view_volume,
+ mock_find_replay_profile,
mock_find_replay,
mock_find_volume,
mock_find_sc,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume, snapshot)
+ self.assertTrue(mock_find_replay.called)
+ self.assertTrue(mock_find_volume.called)
+ self.assertFalse(mock_find_replay_profile.called)
+
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=12345)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_replay',
+ return_value='fake')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_view_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'delete_volume')
+ def test_create_volume_from_snapshot_failed_replication(
+ self,
+ mock_delete_volume,
+ mock_create_view_volume,
+ mock_find_replay,
+ mock_find_volume,
+ mock_find_sc,
+ mock_create_replications,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_create_replications.side_effect = (
+ exception.VolumeBackendAPIException(data='abc'))
+ volume = {'id': 'fake'}
+ snapshot = {'id': 'fake', 'volume_id': 'fake'}
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume_from_snapshot,
+ volume, snapshot)
+ self.assertTrue(mock_delete_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
self.assertTrue(mock_find_replay.called)
self.assertFalse(mock_create_view_volume.called)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications',
+ return_value={})
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
+ mock_create_replications,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone'}
src_vref = {'id': self.volume_name}
- self.driver.create_cloned_volume(volume, src_vref)
+ ret = self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
- self.VOLUME)
+ self.VOLUME,
+ None)
self.assertTrue(mock_find_volume.called)
+ self.assertEqual({}, ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'delete_volume')
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=12345)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_cloned_volume',
+ return_value=VOLUME)
+ def test_create_cloned_volume_replication_fail(self,
+ mock_create_cloned_volume,
+ mock_find_volume,
+ mock_find_sc,
+ mock_create_replications,
+ mock_delete_volume,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_create_replications.side_effect = (
+ exception.VolumeBackendAPIException(data='abc'))
+ volume = {'id': self.volume_name + '_clone'}
+ src_vref = {'id': self.volume_name}
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_cloned_volume,
+ volume, src_vref)
+ self.assertTrue(mock_delete_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
- self.VOLUME)
+ self.VOLUME,
+ None)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
mock_init):
stats = self.driver.get_volume_stats(True)
self.assertEqual('iSCSI', stats['storage_protocol'])
- mock_get_storage_usage.called_once_with(64702)
+ self.assertTrue(mock_get_storage_usage.called)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc',
+ return_value=64702)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_storage_usage',
+ return_value={'availableSpace': 100, 'freeSpace': 50})
+ def test_update_volume_stats_with_refresh_and_repl(
+ self,
+ mock_get_storage_usage,
+ mock_find_sc,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ repliation_enabled = self.driver.replication_enabled
+ self.driver.backends = [{'a': 'a'}, {'b': 'b'}, {'c': 'c'}]
+ self.driver.replication_enabled = True
+ stats = self.driver.get_volume_stats(True)
+ self.assertEqual(3, stats['replication_count'])
+ self.assertEqual(['async', 'sync'], stats['replication_type'])
+ self.assertTrue(stats['replication_enabled'])
+ self.assertTrue(mock_get_storage_usage.called)
+ self.driver.backends = backends
+ self.driver.replication_enabled = repliation_enabled
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
mock_init):
stats = self.driver.get_volume_stats(False)
self.assertEqual('iSCSI', stats['storage_protocol'])
- assert mock_get_storage_usage.called is False
+ self.assertFalse(mock_get_storage_usage.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cgsnap['id'])
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value={'id': 'guid'})
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications',
+ return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing(self,
mock_manage_existing,
+ mock_create_replications,
+ mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
mock_manage_existing.assert_called_once_with(volume['id'],
existing_ref)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value={'id': 'guid'})
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications',
+ return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing_id(self,
mock_manage_existing,
+ mock_create_replications,
+ mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume,
existing_ref)
- def test_retype_not_extra_specs(self,
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'update_storage_profile')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'update_replay_profiles')
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'update_replicate_active_replay')
+ def test_retype_not_our_extra_specs(self,
+ mock_update_replicate_active_replay,
+ mock_create_replications,
+ mock_update_replay_profile,
+ mock_update_storage_profile,
+ mock_find_volume,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None, {'extra_specs': None}, None)
+ self.assertTrue(res)
+ self.assertFalse(mock_update_replicate_active_replay.called)
+ self.assertFalse(mock_create_replications.called)
+ self.assertFalse(mock_update_replay_profile.called)
+ self.assertFalse(mock_update_storage_profile.called)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'update_replay_profiles')
+ def test_retype_replay_profiles(self,
+ mock_update_replay_profiles,
+ mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
+ mock_update_replay_profiles.side_effect = [True, False]
+ # Normal successful run.
res = self.driver.retype(
- None, None, None, {'extra_specs': None}, None)
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'storagetype:replayprofiles': ['A', 'B']}},
+ None)
+ mock_update_replay_profiles.assert_called_once_with(self.VOLUME, 'B')
+ self.assertTrue(res)
+ # Run fails. Make sure this returns False.
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'storagetype:replayprofiles': ['B', 'A']}},
+ None)
self.assertFalse(res)
- def test_retype_not_storage_profile(self,
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_create_replications',
+ return_value={'replication_status': 'enabled',
+ 'replication_driver_data': '54321'})
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_delete_replications')
+ def test_retype_create_replications(self,
+ mock_delete_replications,
+ mock_create_replications,
+ mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
+
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication_enabled': [False, True]}},
+ None)
+ self.assertTrue(mock_create_replications.called)
+ self.assertFalse(mock_delete_replications.called)
+ self.assertEqual({'replication_status': 'enabled',
+ 'replication_driver_data': '54321'}, res)
res = self.driver.retype(
- None, None, None, {'extra_specs': {'something': 'else'}}, None)
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication_enabled': [True, False]}},
+ None)
+ self.assertTrue(mock_delete_replications.called)
+ self.assertEqual({'replication_status': 'disabled',
+ 'replication_driver_data': ''}, res)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'update_replicate_active_replay')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ def test_retype_active_replay(self,
+ mock_find_volume,
+ mock_update_replicate_active_replay,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Success, Success, Not called and fail.
+ mock_update_replicate_active_replay.side_effect = [True, True, False]
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication:activereplay': ['', '<is> True']}},
+ None)
+ self.assertTrue(res)
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication:activereplay': ['<is> True', '']}},
+ None)
+ self.assertTrue(res)
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication:activereplay': ['', '']}},
+ None)
+ self.assertTrue(res)
+ res = self.driver.retype(
+ None, {'id': 'guid'}, None,
+ {'extra_specs': {'replication:activereplay': ['', '<is> True']}},
+ None)
self.assertFalse(res)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
def test_retype_same(self,
+ mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
- None, None, None,
+ None, {'id': 'guid'}, None,
{'extra_specs': {'storagetype:storageprofile': ['A', 'A']}},
None)
self.assertTrue(res)
- def test_retype_malformed(self,
- mock_close_connection,
- mock_open_connection,
- mock_init):
- LOG = self.mock_object(dell_storagecenter_common, "LOG")
- res = self.driver.retype(
- None, None, None,
- {'extra_specs': {
- 'storagetype:storageprofile': ['something',
- 'not',
- 'right']}},
- None)
- self.assertFalse(res)
- self.assertEqual(1, LOG.warning.call_count)
-
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
mock_update_storage_profile.ssert_called_once_with(
self.VOLUME, 'B')
self.assertTrue(res)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'resume_replication')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_do_repl')
+ def test_replication_enable(self,
+ mock_do_repl,
+ mock_find_volume,
+ mock_resume_replication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Note that since we do nothing with sync or async here
+ # at all we do not bother testing it.
+ mock_do_repl.side_effect = [(False, False), # No run.
+ (True, False), # Good run.
+ (True, False), # Bad run.
+ (True, False), # Multiple replications.
+ (True, False)] # Multiple fail.
+ mock_resume_replication.side_effect = [True, # Good run.
+ False, # Bad run.
+ True, # Multiple replications.
+ True,
+ False] # Multiple fail.
+ vref = {'replication_driver_data': '',
+ 'id': 'guid'}
+ model_update = {}
+ # No run
+ ret = self.driver.replication_enable({}, vref)
+ self.assertEqual(model_update, ret)
+ # we didn't try to resume, right?
+ self.assertEqual(0, mock_resume_replication.call_count)
+ # Good run
+ vref = {'replication_driver_data': '12345',
+ 'id': 'guid'}
+ ret = self.driver.replication_enable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Hard to distinguish good from bad. Make sure we tried.
+ self.assertEqual(1, mock_resume_replication.call_count)
+ # Bad run
+ model_update = {'replication_status': 'error'}
+ ret = self.driver.replication_enable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Make sure we actually sent this down.
+ self.assertEqual(2, mock_resume_replication.call_count)
+ mock_resume_replication.assert_called_with(self.VOLUME, 12345)
+ # Multiple replications.
+ vref = {'replication_driver_data': '12345,67890',
+ 'id': 'guid'}
+ model_update = {}
+ ret = self.driver.replication_enable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Should be called two more times.
+ self.assertEqual(4, mock_resume_replication.call_count)
+ # This checks the last call
+ mock_resume_replication.assert_called_with(self.VOLUME, 67890)
+ # Multiple fail.
+ model_update = {'replication_status': 'error'}
+ ret = self.driver.replication_enable({}, vref)
+ self.assertEqual(model_update, ret)
+ # We are set to fail on the first call so one more.
+ self.assertEqual(5, mock_resume_replication.call_count)
+ # This checks the last call.
+ mock_resume_replication.assert_called_with(self.VOLUME, 12345)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'pause_replication')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_do_repl')
+ def test_replication_disable(self,
+ mock_do_repl,
+ mock_find_volume,
+ mock_pause_replication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Note that since we do nothing with sync or async here
+ # at all we do not bother testing it.
+ mock_do_repl.side_effect = [(False, False), # No run.
+ (True, False), # Good run.
+ (True, False), # Bad run.
+ (True, False), # Multiple replications.
+ (True, False)] # Multiple fail.
+ mock_pause_replication.side_effect = [True, # Good run.
+ False, # Bad run.
+ True, # Multiple replications.
+ True,
+ False] # Multiple fail.
+ vref = {'replication_driver_data': '',
+ 'id': 'guid'}
+ model_update = {}
+ # No run
+ ret = self.driver.replication_disable({}, vref)
+ self.assertEqual(model_update, ret)
+ # we didn't try to resume, right?
+ self.assertEqual(0, mock_pause_replication.call_count)
+ # Good run
+ vref = {'replication_driver_data': '12345',
+ 'id': 'guid'}
+ ret = self.driver.replication_disable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Hard to distinguish good from bad. Make sure we tried.
+ self.assertEqual(1, mock_pause_replication.call_count)
+ # Bad run
+ model_update = {'replication_status': 'error'}
+ ret = self.driver.replication_disable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Make sure we actually sent this down.
+ self.assertEqual(2, mock_pause_replication.call_count)
+ mock_pause_replication.assert_called_with(self.VOLUME, 12345)
+ # Multiple replications.
+ vref = {'replication_driver_data': '12345,67890',
+ 'id': 'guid'}
+ model_update = {}
+ ret = self.driver.replication_disable({}, vref)
+ self.assertEqual(model_update, ret)
+ # Should be called two more times.
+ self.assertEqual(4, mock_pause_replication.call_count)
+ # This checks the last call
+ mock_pause_replication.assert_called_with(self.VOLUME, 67890)
+ # Multiple fail.
+ model_update = {'replication_status': 'error'}
+ ret = self.driver.replication_disable({}, vref)
+ self.assertEqual(model_update, ret)
+ # We are set to fail on the first call so one more.
+ self.assertEqual(5, mock_pause_replication.call_count)
+ # This checks the last call.
+ mock_pause_replication.assert_called_with(self.VOLUME, 12345)
+
+ def test__find_host(self,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'cinderqos'}]
+ # Just make sure we are turning the correct bit..
+ # Good run
+ expected = 'host@dell2'
+ ret = self.driver._find_host('67890')
+ self.assertEqual(expected, ret)
+ # Bad run
+ ret = self.driver._find_host('54321')
+ self.assertIsNone(ret)
+ self.driver.backends = backends
+
+ def test__parse_secondary(self,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ vref = {'id': 'guid', 'replication_driver_data': '67890'}
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'cinderqos'}]
+ mock_api = mock.MagicMock()
+ # Good run. Secondary in replication_driver_data and backend. sc up.
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
+ self.assertEqual(67890, destssn)
+ self.assertEqual('host@dell2', host)
+ # Bad run. Secondary not in replication_driver_data
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
+ self.assertIsNone(destssn)
+ self.assertIsNone(host)
+ # Bad run. Secondary not in backend.
+ vref['replication_driver_data'] = '67891'
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
+ self.assertIsNone(destssn)
+ self.assertIsNone(host)
+ # Bad run. no driver data
+ vref['replication_driver_data'] = ''
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '67890')
+ self.assertIsNone(destssn)
+ self.assertIsNone(host)
+ # Good run. No secondary selected.
+ vref['replication_driver_data'] = '12345'
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
+ self.assertEqual(12345, destssn)
+ self.assertEqual('host@dell1', host)
+ self.driver.backends = backends
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc')
+ def test__parse_secondary_sc_down(self,
+ mock_find_sc,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ backends = self.driver.backends
+ vref = {'id': 'guid', 'replication_driver_data': '12345'}
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'cinderqos'}]
+ mock_api = mock.MagicMock()
+ # Bad run. Good selection. SC down.
+ vref['replication_driver_data'] = '12345'
+ mock_api.find_sc = mock.MagicMock(
+ side_effect=exception.VolumeBackendAPIException(data='1234'))
+ destssn, host = self.driver._parse_secondary(mock_api, vref, '12345')
+ self.assertIsNone(destssn)
+ self.assertIsNone(host)
+ self.driver.backends = backends
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'break_replication')
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_parse_secondary')
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_do_repl')
+ def test_replication_failover(self,
+ mock_do_repl,
+ mock_parse_secondary,
+ mock_break_replication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_parse_secondary.side_effect = [(12345, 'host@host#be'), # Good.
+ (12345, 'host@host#be'), # Bad.
+ (None, None)] # Not found.
+ mock_break_replication.side_effect = [True, # Good run.
+ False] # Bad run.
+ mock_do_repl.side_effect = [(False, False), # No run.
+ (True, False), # Good run.
+ (True, False), # Bad run.
+ (True, False)] # Secondary not found.
+ vref = {'id': 'guid'}
+ # No run. Not doing repl. Should raise.
+ self.assertRaises(exception.ReplicationError,
+ self.driver.replication_failover,
+ {},
+ vref,
+ '12345')
+ # Good run
+ expected = {'host': 'host@host#be',
+ 'replication_driver_data': None}
+ ret = self.driver.replication_failover({}, vref, '12345')
+ self.assertEqual(expected, ret)
+ # Bad run. (break_replication fails)
+ self.assertRaises(exception.ReplicationError,
+ self.driver.replication_failover,
+ {},
+ vref,
+ '12345')
+ # Secondary not found.
+ self.assertRaises(exception.ReplicationError,
+ self.driver.replication_failover,
+ {},
+ vref,
+ '54321')
+
+ @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
+ '_do_repl')
+ def test_list_replication_targets(self,
+ mock_do_repl,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_do_repl.side_effect = [(False, False), # No repl.
+ (True, False), # Good run.
+ (True, False)] # Target not found.
+ backends = self.driver.backends
+ self.driver.backends = [{'target_device_id': '12345',
+ 'managed_backend_name': 'host@dell1',
+ 'qosnode': 'cinderqos'},
+ {'target_device_id': '67890',
+ 'managed_backend_name': 'host@dell2',
+ 'qosnode': 'cinderqos'}]
+ # No repl.
+ expected = {'volume_id': 'guid',
+ 'targets': []}
+ vref = {'replication_driver_data': '',
+ 'id': 'guid'}
+ ret = self.driver.list_replication_targets({}, vref)
+ self.assertEqual(expected, ret)
+ # Good run.
+ expected = {'volume_id': 'guid',
+ 'targets': [{'type': 'managed',
+ 'target_device_id': '12345',
+ 'backend_name': 'host@dell1'},
+ {'type': 'managed',
+ 'target_device_id': '67890',
+ 'backend_name': 'host@dell2'}]}
+ vref = {'replication_driver_data': '12345,67890',
+ 'id': 'guid'}
+ ret = self.driver.list_replication_targets({}, vref)
+ self.assertEqual(expected, ret)
+ # Target not found.
+ # We find one target but not another. This could happen for a variety
+ # of reasons most of them administrator negligence. But the main one
+ # is that someone reconfigured their backends without taking into
+ # account how this would affect the children.
+ expected = {'volume_id': 'guid',
+ 'targets': [{'type': 'managed',
+ 'target_device_id': '12345',
+ 'backend_name': 'host@dell1'}]}
+ vref = {'replication_driver_data': '12345,99999',
+ 'id': 'guid'}
+ ret = self.driver.list_replication_targets({}, vref)
+ self.assertEqual(expected, ret)
+
+ self.driver.backends = backends
+
+ def test_get_replication_updates(self,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.driver.get_replication_updates({})
+ self.assertEqual([], ret)
u'chapSecret': u'',
u'maximumTransmissionUnit': 1500}
+ SCQOS = {u'linkSpeed': u'1 Gbps',
+ u'numberDevices': 1,
+ u'bandwidthLimited': False,
+ u'name': u'Cinder QoS',
+ u'instanceId': u'64702.2',
+ u'scName': u'Storage Center 64702',
+ u'scSerialNumber': 64702,
+ u'instanceName': u'Cinder QoS',
+ u'advancedSettings': {u'globalMaxSectorPerIo': 512,
+ u'destinationMaxSectorCount': 65536,
+ u'queuePassMaxSectorCount': 65536,
+ u'destinationMaxIoCount': 18,
+ u'globalMaxIoCount': 32,
+ u'queuePassMaxIoCount': 8},
+ u'objectType': u'ScReplicationQosNode'}
+
+ SCREPL = [{u'destinationVolume': {u'instanceId': u'65495.167',
+ u'instanceName': u'Cinder repl of abcd9'
+ u'5b2-1284-4cf0-a397-9'
+ u'70fa6c68092',
+ u'objectType': u'ScVolume'},
+ u'instanceId': u'64702.9',
+ u'scSerialNumber': 64702,
+ u'syncStatus': u'NotApplicable',
+ u'objectType': u'ScReplication',
+ u'sourceStorageCenter': {u'instanceId': u'64702',
+ u'instanceName': u'Storage Center '
+ '64702',
+ u'objectType': u'StorageCenter'},
+ u'secondaryTransportTypes': [],
+ u'dedup': False,
+ u'state': u'Up',
+ u'replicateActiveReplay': False,
+ u'qosNode': {u'instanceId': u'64702.2',
+ u'instanceName': u'Cinder QoS',
+ u'objectType': u'ScReplicationQosNode'},
+ u'sourceVolume': {u'instanceId': u'64702.13108',
+ u'instanceName': u'abcd95b2-1284-4cf0-a397-'
+ u'970fa6c68092',
+ u'objectType': u'ScVolume'},
+ u'type': u'Asynchronous',
+ u'statusMessage': u'',
+ u'status': u'Up',
+ u'syncMode': u'None',
+ u'stateMessage': u'',
+ u'managedByLiveVolume': False,
+ u'destinationScSerialNumber': 65495,
+ u'pauseAllowed': True,
+ u'instanceName': u"Replication of 'abcd95b2-1284-4cf0-"
+ u"a397-970fa6c68092'",
+ u'simulation': False,
+ u'transportTypes': [u'FibreChannel'],
+ u'replicateStorageToLowestTier': True,
+ u'scName': u'Storage Center 64702',
+ u'destinationStorageCenter': {u'instanceId': u'65495',
+ u'instanceName': u'Storage Center'
+ u' 65495',
+ u'objectType': u'StorageCenter'}}]
+
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
WWN = u'21000024FF30441C'
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_init_volume_failure(self,
mock_post,
mock_close_connection,
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_create_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_open_connection,
mock_init):
# Test calling find_volume with result of no volume found
- mock_get_volume_list.side_effect = [[], []]
+ mock_get_volume_list.side_effect = [[], [], []]
res = self.scapi.find_volume(self.volume_name)
self.assertIsNone(res, 'None expected')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_import_one',
+ return_value=VOLUME)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_volume_list')
+ def test_find_volume_complete_replication(self,
+ mock_get_volume_list,
+ mock_import_one,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST]
+ res = self.scapi.find_volume(self.volume_name)
+ self.assertEqual(self.VOLUME, res, 'Unexpected volume')
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_import_one',
+ return_value=None)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_volume_list')
+ def test_find_volume_complete_replication_fail(self,
+ mock_get_volume_list,
+ mock_import_one,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_get_volume_list.side_effect = [[], [], self.VOLUME_LIST]
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi.find_volume, self.volume_name)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_volume_list')
+ def test_find_volume_complete_replication_multi(self,
+ mock_get_volume_list,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Test case where multiple repl volumes are found.
+ mock_get_volume_list.side_effect = [[],
+ [],
+ self.VOLUME_LIST_MULTI_VOLS]
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi.find_volume, self.volume_name)
+
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=VOLUME_LIST_MULTI_VOLS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_add_hba_failure(self,
mock_post,
mock_close_connection,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_find_serveros_failed(self,
mock_post,
mock_close_connection,
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_create_server_failure(self,
mock_post,
mock_find_serveros,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_find_fc_initiators_error(self,
mock_get,
mock_close_connection,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_get_volume_count_failure(self,
mock_get,
mock_close_connection,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_find_mappings_failure(self,
mock_get,
mock_close_connection,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_find_active_controller_failure(self,
mock_get,
mock_close_connection,
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value={'result': True})
def test_unmap_volume(self,
+ mock_get_json,
mock_find_mapping_profiles,
mock_delete,
mock_close_connection,
return_value=TST_RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_200)
+ return_value=RESPONSE_400)
def test_delete_replay_failure(self,
mock_post,
mock_find_replay,
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
- self.TST_RPLAY)
+ self.TST_RPLAY,
+ None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
- self.TST_RPLAY)
+ self.TST_RPLAY,
+ None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
- self.TST_RPLAY)
+ self.TST_RPLAY,
+ None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
- self.TST_RPLAY)
+ self.TST_RPLAY,
+ None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertIsNone(res, 'Expected None')
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
- self.VOLUME)
+ self.VOLUME,
+ ['Daily'])
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
mock_create_view_volume.assert_called_once_with(
vol_name,
- self.RPLAY)
+ self.RPLAY,
+ ['Daily'])
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- 'create_replay',
+ 'create_view_volume',
return_value=None)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'create_replay')
def test_create_cloned_volume_failure(self,
mock_create_replay,
+ mock_create_view_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create cloned volumes fails because create_replay
# fails
vol_name = u'Test_create_clone_vol'
+ mock_create_replay.return_value = None
res = self.scapi.create_cloned_volume(
vol_name,
- self.VOLUME)
+ self.VOLUME,
+ ['Daily'])
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
+ self.assertFalse(mock_create_view_volume.called)
self.assertIsNone(res, 'Expected None')
+ # Again buy let create_view_volume fail.
+ mock_create_replay.return_value = self.RPLAY
+ res = self.scapi.create_cloned_volume(
+ vol_name,
+ self.VOLUME,
+ ['Daily'])
+ mock_create_view_volume.assert_called_once_with(
+ vol_name,
+ self.RPLAY,
+ ['Daily'])
+ self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_rename_volume_failure(self,
mock_post,
mock_close_connection,
mock_close_connection,
mock_open_connection,
mock_init):
- LOG = self.mock_object(dell_storagecenter_api, "LOG")
res = self.scapi._get_user_preferences()
self.assertEqual({}, res)
- self.assertTrue(LOG.error.call_count > 0)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
self.assertEqual(100, rem)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_get_volume_list',
- return_value=[{'configuredSize':
- '1.073741824E9 Bytes'}])
- @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_size_to_gb',
- return_value=(1, 0))
- @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_find_mappings',
- return_value=[])
- @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_find_volume_folder',
- return_value={'id': '1'})
+ '_find_volume_folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_get_id')
- def test_manage_existing(self,
- mock_get_id,
- mock_put,
- mock_find_volume_folder,
- mock_find_mappings,
- mock_size_to_gb,
- mock_get_volume_list,
- mock_close_connection,
- mock_open_connection,
- mock_init):
+ '_get_json',
+ return_value=VOLUME)
+ def test_import_one(self,
+ mock_get_json,
+ mock_put,
+ mock_find_volume_folder,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
newname = 'guid'
- existing = {'source-name': 'scvolname'}
- # First call is foldername, second is vollist. This is reflected
- # in the payload.
- mock_get_id.side_effect = ['1', '100']
+ # First test is folder found. Second ist is not found.
+ mock_find_volume_folder.side_effect = [{'instanceId': '1'}, None]
expected_url = 'StorageCenter/ScVolume/100'
expected_payload = {'Name': newname,
'VolumeFolder': '1'}
- self.scapi.manage_existing(newname, existing)
- mock_get_volume_list.asert_called_once_with(existing, False)
- self.assertTrue(mock_get_id.called)
+ self.scapi._import_one({'instanceId': '100'}, newname)
mock_put.assert_called_once_with(expected_url, expected_payload)
self.assertTrue(mock_find_volume_folder.called)
- self.assertTrue(mock_find_mappings.called)
- self.assertTrue(mock_size_to_gb.called)
+ expected_payload = {'Name': newname}
+ self.scapi._import_one({'instanceId': '100'}, newname)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_find_volume_folder',
- return_value=None)
- @mock.patch.object(dell_storagecenter_api.HttpClient,
- 'put',
- return_value=RESPONSE_200)
- @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_get_id',
- return_value='100')
- def test_manage_existing_folder_not_found(self,
- mock_get_id,
- mock_put,
- mock_find_volume_folder,
- mock_find_mappings,
- mock_size_to_gb,
- mock_get_volume_list,
- mock_close_connection,
- mock_open_connection,
- mock_init):
- # Same as above only we don't have a volume folder.
+ '_import_one',
+ return_value=VOLUME)
+ def test_manage_existing(self,
+ mock_import_one,
+ mock_find_mappings,
+ mock_size_to_gb,
+ mock_get_volume_list,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
newname = 'guid'
existing = {'source-name': 'scvolname'}
- expected_url = 'StorageCenter/ScVolume/100'
- expected_payload = {'Name': newname}
self.scapi.manage_existing(newname, existing)
- mock_get_volume_list.asert_called_once_with(
- existing.get('source-name'),
- existing.get('source-id'),
- False)
- mock_put.assert_called_once_with(expected_url, expected_payload)
- self.assertTrue(mock_get_id.called)
- self.assertTrue(mock_find_volume_folder.called)
+ mock_get_volume_list.asert_called_once_with(existing, False)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_size_to_gb.called)
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_find_volume_folder',
+ '_import_one',
return_value=None)
- @mock.patch.object(dell_storagecenter_api.HttpClient,
- 'put',
- return_value=RESPONSE_400)
- @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
- '_get_id',
- return_value='100')
- def test_manage_existing_rename_fail(self,
- mock_get_id,
- mock_put,
- mock_find_volume_folder,
+ def test_manage_existing_import_fail(self,
+ mock_import_one,
mock_find_mappings,
mock_size_to_gb,
mock_get_volume_list,
# We fail on the _find_volume_folder to make this easier.
newname = 'guid'
existing = {'source-name': 'scvolname'}
- expected_url = 'StorageCenter/ScVolume/100'
- expected_payload = {'Name': newname}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.manage_existing,
newname,
existing.get('source-name'),
existing.get('source-id'),
False)
- self.assertTrue(mock_get_id.called)
- mock_put.assert_called_once_with(expected_url, expected_payload)
- self.assertTrue(mock_find_volume_folder.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_size_to_gb.called)
self.assertTrue(mock_get_id.called)
mock_put.assert_called_once_with(expected_url, expected_payload)
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=[SCQOS])
+ # def _find_qos(self, qosnode):
+ def test__find_qos(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi._find_qos('Cinder QoS')
+ self.assertDictEqual(self.SCQOS, ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json')
+ # def _find_qos(self, qosnode):
+ def test__find_qos_not_found(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # set side effect for posts.
+ # first empty second returns qosnode
+ mock_get_json.side_effect = [[], self.SCQOS]
+ ret = self.scapi._find_qos('Cinder QoS')
+ self.assertDictEqual(self.SCQOS, ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_400)
+ # def _find_qos(self, qosnode):
+ def test__find_qos_find_fail(self,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi._find_qos,
+ 'Cinder QoS')
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=[])
+ # def _find_qos(self, qosnode):
+ def test__find_qos_create_fail(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_post.side_effect = [self.RESPONSE_200, self.RESPONSE_400]
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi._find_qos,
+ 'Cinder QoS')
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'get',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=SCREPL)
+ def test_get_screplication(self,
+ mock_get_json,
+ mock_get,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.get_screplication({'instanceId': '1'}, 65495)
+ self.assertDictEqual(self.SCREPL[0], ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'get',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=[])
+ def test_get_screplication_not_found(self,
+ mock_get_json,
+ mock_get,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.get_screplication({'instanceId': '1'}, 65496)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'get',
+ return_value=RESPONSE_400)
+ def test_get_screplication_error(self,
+ mock_get,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.get_screplication({'instanceId': '1'}, 65495)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL[0])
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'delete',
+ return_value=RESPONSE_200)
+ def test_delete_replication(self,
+ mock_delete,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ destssn = 65495
+ expected = 'StorageCenter/ScReplication/%s' % (
+ self.SCREPL[0]['instanceId'])
+ ret = self.scapi.delete_replication(self.VOLUME, destssn)
+ mock_delete.assert_any_call(expected)
+ self.assertTrue(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=None)
+ def test_delete_replication_not_found(self,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ destssn = 65495
+ ret = self.scapi.delete_replication(self.VOLUME, destssn)
+ self.assertFalse(ret)
+ ret = self.scapi.delete_replication(self.VOLUME, destssn)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL[0])
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'delete',
+ return_value=RESPONSE_400)
+ def test_delete_replication_error(self,
+ mock_delete,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ destssn = 65495
+ expected = 'StorageCenter/ScReplication/%s' % (
+ self.SCREPL[0]['instanceId'])
+ ret = self.scapi.delete_replication(self.VOLUME, destssn)
+ mock_delete.assert_any_call(expected)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_find_qos',
+ return_value=SCQOS)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc')
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=SCREPL[0])
+ def test_create_replication(self,
+ mock_get_json,
+ mock_post,
+ mock_find_sc,
+ mock_find_qos,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # We don't test diskfolder. If one is found we include it. If not
+ # then we leave it out. Checking for disk folder is tested elsewhere.
+ ssn = 64702
+ destssn = 65495
+ qosnode = 'Cinder QoS'
+ notes = 'Created by Dell Cinder Driver'
+ repl_prefix = 'Cinder repl of '
+
+ mock_find_sc.side_effect = [destssn, ssn, destssn, ssn, destssn, ssn]
+ payload = {'DestinationStorageCenter': destssn,
+ 'QosNode': self.SCQOS['instanceId'],
+ 'SourceVolume': self.VOLUME['instanceId'],
+ 'StorageCenter': ssn,
+ 'ReplicateActiveReplay': False,
+ 'Type': 'Asynchronous',
+ 'DestinationVolumeAttributes':
+ {'CreateSourceVolumeFolderPath': True,
+ 'Notes': notes,
+ 'Name': repl_prefix + self.VOLUME['name']}
+ }
+ ret = self.scapi.create_replication(self.VOLUME,
+ str(destssn),
+ qosnode,
+ False,
+ None,
+ False)
+ mock_post.assert_any_call('StorageCenter/ScReplication', payload)
+ self.assertDictEqual(self.SCREPL[0], ret)
+ payload['Type'] = 'Synchronous'
+ payload['ReplicateActiveReplay'] = True
+ ret = self.scapi.create_replication(self.VOLUME,
+ str(destssn),
+ qosnode,
+ True,
+ None,
+ False)
+ mock_post.assert_any_call('StorageCenter/ScReplication', payload)
+ self.assertDictEqual(self.SCREPL[0], ret)
+ ret = self.scapi.create_replication(self.VOLUME,
+ str(destssn),
+ qosnode,
+ True,
+ None,
+ True)
+ mock_post.assert_any_call('StorageCenter/ScReplication', payload)
+ self.assertDictEqual(self.SCREPL[0], ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_find_qos',
+ return_value=SCQOS)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_sc')
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=SCREPL[0])
+ def test_create_replication_error(self,
+ mock_get_json,
+ mock_post,
+ mock_find_sc,
+ mock_find_qos,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ssn = 64702
+ destssn = 65495
+ qosnode = 'Cinder QoS'
+ notes = 'Created by Dell Cinder Driver'
+ repl_prefix = 'Cinder repl of '
+
+ mock_find_sc.side_effect = [destssn, ssn, destssn, ssn]
+ mock_post.side_effect = [self.RESPONSE_400, self.RESPONSE_400,
+ self.RESPONSE_400, self.RESPONSE_400]
+ payload = {'DestinationStorageCenter': destssn,
+ 'QosNode': self.SCQOS['instanceId'],
+ 'SourceVolume': self.VOLUME['instanceId'],
+ 'StorageCenter': ssn,
+ 'ReplicateActiveReplay': False,
+ 'Type': 'Asynchronous',
+ 'DestinationVolumeAttributes':
+ {'CreateSourceVolumeFolderPath': True,
+ 'Notes': notes,
+ 'Name': repl_prefix + self.VOLUME['name']}
+ }
+ ret = self.scapi.create_replication(self.VOLUME,
+ str(destssn),
+ qosnode,
+ False,
+ None,
+ False)
+ mock_post.assert_any_call('StorageCenter/ScReplication', payload)
+ self.assertIsNone(ret)
+
+ payload['Type'] = 'Synchronous'
+ payload['ReplicateActiveReplay'] = True
+ ret = self.scapi.create_replication(self.VOLUME,
+ str(destssn),
+ qosnode,
+ True,
+ None,
+ True)
+ mock_post.assert_any_call('StorageCenter/ScReplication', payload)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL)
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ def test_pause_replication(self,
+ mock_post,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.pause_replication(self.VOLUME, 65495)
+ self.assertTrue(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL)
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_400)
+ def test_pause_replication_error(self,
+ mock_post,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.pause_replication(self.VOLUME, 65495)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=None)
+ def test_pause_replication_not_found(self,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.pause_replication(self.VOLUME, 65495)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL)
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ def test_resume_replication(self,
+ mock_post,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.resume_replication(self.VOLUME, 65495)
+ self.assertTrue(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=SCREPL)
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_400)
+ def test_resume_replication_error(self,
+ mock_post,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.resume_replication(self.VOLUME, 65495)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication',
+ return_value=None)
+ def test_resume_replication_not_found(self,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Not much to test here without an SC.
+ ret = self.scapi.resume_replication(self.VOLUME, 65495)
+ self.assertFalse(ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=SCREPL)
+ def test_find_repl_volume(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.find_repl_volume('guid', 65495)
+ self.assertDictEqual(self.SCREPL[0], ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=[])
+ def test_find_repl_volume_empty_list(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.find_repl_volume('guid', 65495)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_200)
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json',
+ return_value=[{'instanceId': '1'}, {'instanceId': '2'}])
+ def test_find_repl_volume_multiple_results(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.find_repl_volume('guid', 65495)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post',
+ return_value=RESPONSE_400)
+ def test_find_repl_volume_error(self,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ ret = self.scapi.find_repl_volume('guid', 65495)
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'get_screplication')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'rename_volume')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_repl_volume')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ 'find_volume')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_remove_mappings')
+ def test_break_replication(self,
+ mock_remove_mappings,
+ mock_find_volume,
+ mock_find_repl_volume,
+ mock_rename_volume,
+ mock_get_screplication,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Find_volume doesn't actually matter. We do not gate on this.
+ # Switch it up just to prove that.
+ mock_find_volume.side_effect = [self.VOLUME, # 1
+ self.VOLUME, # 2
+ None, # 3
+ None, # 4
+ None] # 5
+ # Much like find volume we do not gate on this.
+ mock_get_screplication.side_effect = [self.SCREPL[0], # 1
+ None, # 2
+ None, # 3
+ None, # 4
+ None] # 5
+ # This
+ mock_find_repl_volume.side_effect = [self.VOLUME, # 1
+ self.VOLUME, # 2
+ self.VOLUME, # 3
+ self.VOLUME, # 4
+ None] # 5
+ mock_remove_mappings.side_effect = [True, # 1
+ True,
+ True, # 2
+ False,
+ True, # 3
+ True,
+ True, # 4
+ True,
+ False] # 5
+
+ mock_rename_volume.side_effect = [True, # 1
+ True, # 2
+ True, # 3
+ False] # 4
+ # Good path.
+ ret = self.scapi.break_replication('name', 65495)
+ self.assertTrue(ret)
+ self.assertEqual(1, mock_rename_volume.call_count)
+ # Source found, screpl not found.
+ ret = self.scapi.break_replication('name', 65495)
+ self.assertTrue(ret)
+ self.assertEqual(2, mock_rename_volume.call_count)
+ # No source vol good path.
+ ret = self.scapi.break_replication('name', 65495)
+ self.assertTrue(ret)
+ self.assertEqual(3, mock_rename_volume.call_count)
+ # rename fail
+ ret = self.scapi.break_replication('name', 65495)
+ self.assertFalse(ret)
+ self.assertEqual(4, mock_rename_volume.call_count)
+ # fail remove mappings
+ ret = self.scapi.break_replication('name', 65495)
+ self.assertFalse(ret)
+ self.assertEqual(4, mock_rename_volume.call_count)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_user_preferences')
+ def test__find_user_replay_profiles(self,
+ mock_get_user_preferences,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_get_user_preferences.return_value = {}
+ ret = self.scapi._find_user_replay_profiles()
+ self.assertEqual([], ret)
+ mock_get_user_preferences.return_value = {'test': 'test',
+ 'replayProfileList': []}
+ ret = self.scapi._find_user_replay_profiles()
+ self.assertEqual([], ret)
+ mock_get_user_preferences.return_value = {
+ 'test': 'test', 'replayProfileList': [{'instanceId': 'a'},
+ {'instanceId': 'b'}]}
+ ret = self.scapi._find_user_replay_profiles()
+ self.assertEqual(['a', 'b'], ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json')
+ def test__find_daily_replay_profile(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ mock_post.return_value = self.RESPONSE_200
+ mock_get_json.return_value = [{'instanceId': 'a'}]
+ ret = self.scapi._find_daily_replay_profile()
+ self.assertEqual('a', ret)
+ mock_get_json.return_value = []
+ ret = self.scapi._find_daily_replay_profile()
+ self.assertIsNone(ret)
+ mock_get_json.return_value = None
+ ret = self.scapi._find_daily_replay_profile()
+ self.assertIsNone(ret)
+ mock_post.return_value = self.RESPONSE_400
+ ret = self.scapi._find_daily_replay_profile()
+ self.assertIsNone(ret)
+
+ @mock.patch.object(dell_storagecenter_api.HttpClient,
+ 'post')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_get_json')
+ def test__find_replay_profiles(self,
+ mock_get_json,
+ mock_post,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ # Good run.
+ rps = 'a,b'
+ mock_post.return_value = self.RESPONSE_200
+ mock_get_json.return_value = [{'name': 'a', 'instanceId': 'a'},
+ {'name': 'b', 'instanceId': 'b'},
+ {'name': 'c', 'instanceId': 'c'}]
+ reta, retb = self.scapi._find_replay_profiles(rps)
+ self.assertEqual(['a', 'b'], reta)
+ self.assertEqual(['c'], retb)
+ # Looking for profile that doesn't exist.
+ rps = 'a,b,d'
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi._find_replay_profiles,
+ rps)
+ # Looking for nothing.
+ rps = ''
+ reta, retb = self.scapi._find_replay_profiles(rps)
+ self.assertEqual([], reta)
+ self.assertEqual([], retb)
+ # Still Looking for nothing.
+ rps = None
+ reta, retb = self.scapi._find_replay_profiles(rps)
+ self.assertEqual([], reta)
+ self.assertEqual([], retb)
+ # Bad call.
+ rps = 'a,b'
+ mock_post.return_value = self.RESPONSE_400
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi._find_replay_profiles,
+ rps)
+
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_find_replay_profiles')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_find_user_replay_profiles')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_find_daily_replay_profile')
+ @mock.patch.object(dell_storagecenter_api.StorageCenterApi,
+ '_update_volume_profiles')
+ def test_update_replay_profiles(self,
+ mock_update_volume_profiles,
+ mock_find_daily_replay_profile,
+ mock_find_user_replay_profiles,
+ mock_find_replay_profiles,
+ mock_close_connection,
+ mock_open_connection,
+ mock_init):
+ scvol = {}
+ mock_find_replay_profiles.return_value = (['a', 'b'], ['c'])
+ mock_update_volume_profiles.side_effect = [
+ True, True, True,
+ False,
+ True, True, False,
+ True, True, True, True, True,
+ True, True, True, True,
+ False]
+ ret = self.scapi.update_replay_profiles(scvol, 'a,b')
+ # Two adds and one remove
+ self.assertEqual(3, mock_update_volume_profiles.call_count)
+ self.assertTrue(ret)
+ # Now update fails.
+ ret = self.scapi.update_replay_profiles(scvol, 'a,b')
+ # 1 failed update plus 3 from before.
+ self.assertEqual(4, mock_update_volume_profiles.call_count)
+ self.assertFalse(ret)
+ # Fail adding Ids..
+ ret = self.scapi.update_replay_profiles(scvol, 'a,b')
+ # 3 more 4 from before.
+ self.assertEqual(7, mock_update_volume_profiles.call_count)
+ self.assertFalse(ret)
+ # User clearing profiles.
+ mock_find_replay_profiles.return_value = ([], ['a', 'b', 'c'])
+ mock_find_user_replay_profiles.return_value = ['d', 'u']
+ ret = self.scapi.update_replay_profiles(scvol, '')
+ # 3 removes and 2 adds plus 7 from before
+ self.assertEqual(12, mock_update_volume_profiles.call_count)
+ self.assertTrue(ret)
+ # User clearing profiles and no defaults. (Probably not possible.)
+ mock_find_user_replay_profiles.return_value = []
+ mock_find_daily_replay_profile.return_value = 'd'
+ ret = self.scapi.update_replay_profiles(scvol, '')
+ # 3 removes and 1 add plus 12 from before.
+ self.assertEqual(16, mock_update_volume_profiles.call_count)
+ self.assertTrue(ret)
+ # _find_replay_profiles blows up so we do too.
+ mock_find_replay_profiles.side_effect = (
+ exception.VolumeBackendAPIException('aaa'))
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.scapi.update_replay_profiles,
+ scvol,
+ 'a,b')
+
class DellSCSanAPIConnectionTestCase(test.TestCase):
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
- # Create a Response object that indicates a failure (no content)
+ # Create a Response object with no content
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
+ # Create a Response object is a pure error.
+ response_bad = models.Response()
+ response_bad.status_code = 400
+ response_bad.reason = u'bad request'
+ RESPONSE_400 = response_bad
+
APIDICT = {u'instanceId': u'0',
u'hostName': u'192.168.0.200',
u'userId': 434226,
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
- return_value=RESPONSE_204)
+ return_value=RESPONSE_400)
def test_open_connection_failure(self,
mock_post):
self.assertRaises(exception.VolumeBackendAPIException,
from oslo_log import log as logging
import requests
+from simplejson import scanner
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
-
LOG = logging.getLogger(__name__)
class PayloadFilter(object):
-
"""PayloadFilter
Simple class for creating filters for interacting with the Dell
- Storage API DropTop2 and later.
+ Storage API 15.3 and later.
"""
def __init__(self, filtertype='AND'):
class LegacyPayloadFilter(object):
-
"""LegacyPayloadFilter
Simple class for creating filters for interacting with the Dell
- Storage API pre DropTop2.
+ Storage API 15.1 and 15.2.
"""
def __init__(self, filter_type='AND'):
class HttpClient(object):
-
"""HttpClient
Helper for making the REST calls.
should be turned on or not.
"""
self.baseUrl = 'https://%s:%s/api/rest/' % (host, port)
+
self.session = requests.Session()
self.session.auth = (user, password)
+
self.header = {}
self.header['Content-Type'] = 'application/json; charset=utf-8'
+ self.header['Accept'] = 'application/json'
self.header['x-dell-api-version'] = '2.0'
self.verify = verify
def __formatUrl(self, url):
return '%s%s' % (self.baseUrl, url if url[0] != '/' else url[1:])
- @utils.retry(exceptions=(requests.ConnectionError, ))
+ @utils.retry(exceptions=(requests.ConnectionError,))
def get(self, url):
return self.session.get(
self.__formatUrl(url),
headers=self.header,
verify=self.verify)
- @utils.retry(exceptions=(requests.ConnectionError, ))
+ @utils.retry(exceptions=(requests.ConnectionError,))
def post(self, url, payload):
return self.session.post(
self.__formatUrl(url),
headers=self.header,
verify=self.verify)
- @utils.retry(exceptions=(requests.ConnectionError, ))
+ @utils.retry(exceptions=(requests.ConnectionError,))
def put(self, url, payload):
return self.session.put(
self.__formatUrl(url),
headers=self.header,
verify=self.verify)
- @utils.retry(exceptions=(requests.ConnectionError, ))
+ @utils.retry(exceptions=(requests.ConnectionError,))
def delete(self, url):
return self.session.delete(
self.__formatUrl(url),
class StorageCenterApiHelper(object):
-
"""StorageCenterApiHelper
Helper class for API access. Handles opening and closing the
- connection to the Dell Enterprise Manager.
+ connection to the Dell REST API.
"""
def __init__(self, config):
{'ssn': ssn,
'ip': self.config.san_ip})
if ssn:
- """Open connection to Enterprise Manager."""
+ """Open connection to REST API."""
connection = StorageCenterApi(self.config.san_ip,
self.config.dell_sc_api_port,
self.config.san_login,
connection.sfname = self.config.dell_sc_server_folder
connection.open_connection()
else:
- raise exception.VolumeBackendAPIException('Configuration error. '
- 'dell_sc_ssn not set.')
+ raise exception.VolumeBackendAPIException(
+ data=_('Configuration error: dell_sc_ssn not set.'))
return connection
class StorageCenterApi(object):
-
"""StorageCenterApi
- Handles calls to Dell Enterprise Manager (EM) via the REST API interface.
+ Handles calls to Dell SC and EM via the REST API interface.
Version history:
1.0.0 - Initial driver
2.2.0 - Added API 2.2 support.
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
+ 2.4.0 - Added Replication V2 support.
"""
- APIVERSION = '2.3.1'
+ APIVERSION = '2.4.0'
def __init__(self, host, port, user, password, verify):
- """This creates a connection to Dell Enterprise Manager.
+ """This creates a connection to Dell SC or EM.
- :param host: IP address of the Dell Data Collector.
- :param port: Port the Data Collector is listening on.
+ :param host: IP address of the REST interface..
+ :param port: Port the REST interface is listening on.
:param user: User account to login with.
:param password: Password.
:param verify: Boolean indicating whether certificate verification
should be turned on or not.
"""
self.notes = 'Created by Dell Cinder Driver'
+ self.repl_prefix = 'Cinder repl of '
+ self.failover_prefix = 'Cinder failover '
self.ssn = None
self.vfname = 'openstack'
self.sfname = 'openstack'
self.legacypayloadfilters = False
self.consisgroups = True
+ # Nothing other than Replication should care if we are direct connect
+ # or not.
+ self.is_direct_connect = False
self.client = HttpClient(host,
port,
user,
def __exit__(self, type, value, traceback):
self.close_connection()
- def _path_to_array(self, path):
+ @staticmethod
+ def _check_result(rest_response):
+ """Checks and logs API responses.
+
+ :param rest_response: The result from a REST API call.
+ :param expected_response: The expected result.
+ :returns: ``True`` if success, ``False`` otherwise.
+ """
+ if 200 <= rest_response.status_code < 300:
+ # API call was a normal success
+ return True
+
+ LOG.debug('REST call result:\n'
+ '\tUrl: %(url)s\n'
+ '\tCode: %(code)d\n'
+ '\tReason: %(reason)s\n'
+ '\tText: %(text)s',
+ {'url': rest_response.url,
+ 'code': rest_response.status_code,
+ 'reason': rest_response.reason,
+ 'text': rest_response.text})
+ return False
+
+ @staticmethod
+ def _path_to_array(path):
"""Breaks a path into a reversed string array.
:param path: Path to a folder on the Storage Center.
return array
array.append(tail)
+ @staticmethod
+ def _lower_first(s):
+ return s[:1].lower() + s[1:] if s else ''
+
+ def _lower_key(self, in_dict):
+ if type(in_dict) is dict:
+ out_dict = {}
+ for key, item in in_dict.items():
+ out_dict[self._lower_first(key)] = self._lower_key(item)
+ return out_dict
+ elif type(in_dict) is list:
+ return [self._lower_key(obj) for obj in in_dict]
+ else:
+ return in_dict
+
def _first_result(self, blob):
"""Get the first result from the JSON return value.
:returns: JSON or None on error.
"""
try:
- return blob.json()
+ return self._lower_key(blob.json())
except AttributeError:
LOG.error(_LE('Error invalid json: %s'),
blob)
+ except TypeError as ex:
+ LOG.error(_LE('Error TypeError. %s'), ex)
+ except scanner.JSONDecodeError as ex:
+ LOG.error(_LE('Error JSONDecodeError. %s'), ex)
+ # We are here so this went poorly. Log our blob.
+ LOG.debug('_get_json blob %s', blob)
return None
def _get_id(self, blob):
except AttributeError:
LOG.error(_LE('Invalid API object: %s'),
blob)
+ except TypeError as ex:
+ LOG.error(_LE('Error TypeError. %s'), ex)
+ except scanner.JSONDecodeError as ex:
+ LOG.error(_LE('Error JSONDecodeError. %s'), ex)
+ LOG.debug('_get_json blob %s', blob)
return None
def _get_payload_filter(self, filterType='AND'):
return PayloadFilter(filterType)
def open_connection(self):
- """Authenticate against Dell Enterprise Manager.
+ """Authenticate with Dell REST interface.
:raises: VolumeBackendAPIException.
"""
r = self.client.post('ApiConnection/Login',
payload)
- if r.status_code == 200:
- # We should be logged in. Try to grab the api version out of the
- # response.
- try:
- apidict = self._get_json(r)
- version = apidict['apiVersion']
- splitver = version.split('.')
- if splitver[0] == '2':
- if splitver[1] == '0':
- self.consisgroups = False
- self.legacypayloadfilters = True
-
- elif splitver[1] == '1':
- self.legacypayloadfilters = True
- return
-
- except Exception:
- # Good return but not the login response we were expecting.
- # Log it and error out.
- LOG.error(_LE('Unrecognized Login Response: %s'), r)
- else:
- # Call error.
- LOG.error(_LE('Login error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
+ if not self._check_result(r):
+ raise exception.VolumeBackendAPIException(
+ data=_('Failed to connect to Dell REST API'))
- # Bad request.
- # TODO(Swanson): Should add this to all returns.
- if r.status_code == 400:
- LOG.debug('Bad Request. Return text: %s', r.text)
+ # We should be logged in. Try to grab the api version out of the
+ # response.
+ try:
+ apidict = self._get_json(r)
+ version = apidict['apiVersion']
+ self.is_direct_connect = apidict['provider'] == 'StorageCenter'
+ splitver = version.split('.')
+ if splitver[0] == '2':
+ if splitver[1] == '0':
+ self.consisgroups = False
+ self.legacypayloadfilters = True
+
+ elif splitver[1] == '1':
+ self.legacypayloadfilters = True
+ return
- # If we fell to this point then raise an exception.
- raise exception.VolumeBackendAPIException(
- _('Failed to connect to Enterprise Manager'))
+ except Exception:
+ # Good return but not the login response we were expecting.
+ # Log it and error out.
+ LOG.error(_LE('Unrecognized Login Response: %s'), r)
def close_connection(self):
- """Logout of Dell Enterprise Manager."""
+ """Logout of Dell REST API."""
r = self.client.post('ApiConnection/Logout',
{})
- if r.status_code != 204:
- LOG.warning(_LW('Logout error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
+ # 204 expected.
+ self._check_result(r)
self.client = None
- def find_sc(self):
+ def find_sc(self, ssn=-1):
"""Check that the SC is there and being managed by EM.
:returns: The SC SSN.
:raises: VolumeBackendAPIException
"""
+ # We might be looking for another ssn. If not then
+ # look for our default.
+ if ssn == -1:
+ ssn = self.ssn
+
r = self.client.get('StorageCenter/StorageCenter')
result = self._get_result(r,
'scSerialNumber',
- self.ssn)
+ ssn)
if result is None:
LOG.error(_LE('Failed to find %(s)s. Result %(r)s'),
- {'s': self.ssn,
+ {'s': ssn,
'r': r})
raise exception.VolumeBackendAPIException(
- _('Failed to find Storage Center'))
+ data=_('Failed to find Storage Center'))
return self._get_id(result)
r = self.client.post(url,
payload)
- if r.status_code != 201:
- LOG.debug('%(url)s error: %(code)d %(reason)s',
- {'url': url,
- 'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 201 expected.
+ if self._check_result(r):
scfolder = self._first_result(r)
return scfolder
folder = None
r = self.client.post(url,
pf.payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
folder = self._get_result(r,
'folderPath',
folderpath)
- else:
- LOG.debug('%(url)s error: %(code)d %(reason)s',
- {'url': url,
- 'code': r.status_code,
- 'reason': r.reason})
return folder
def _find_volume_folder(self, create=False):
pf = self._get_payload_filter()
pf.append('scSerialNumber', scvolume.get('scSerialNumber'), 'Equals')
r = self.client.post('StorageCenter/ScServer/GetList', pf.payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
scservers = self._get_json(r)
# Sort through the servers looking for one with connectivity.
for scserver in scservers:
pf.append('scSerialNumber', self.ssn, 'Equals')
r = self.client.post(
'StorageCenter/ScStorageProfile/GetList', pf.payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
profiles = self._get_json(r)
for profile in profiles:
# Look for the stripped, case insensitive match
return profile
return None
- def create_volume(self, name, size, storage_profile=None):
+ def _find_user_replay_profiles(self):
+ """Find user default profiles.
+
+ Note that this only deals with standard and not cg profiles.
+
+ :return: List of replay profiles.
+ """
+ user_prefs = self._get_user_preferences()
+ if user_prefs:
+ profileids = [profile['instanceId'] for profile in
+ user_prefs['replayProfileList']]
+ return profileids
+ return []
+
+ def _find_daily_replay_profile(self):
+ """Find the system replay profile named "Daily".
+
+ :return: Profile instanceId or None.
+ """
+ pf = self._get_payload_filter()
+ pf.append('scSerialNumber', self.ssn)
+ pf.append('instanceName', 'Daily')
+ r = self.client.post('StorageCenter/ScReplayProfile/GetList',
+ pf.payload)
+ if self._check_result(r):
+ profiles = self._get_json(r)
+ if profiles:
+ return profiles[0]['instanceId']
+ return None
+
+ def _find_replay_profiles(self, replay_profile_string):
+ """Find our replay profiles.
+
+ Note that if called on volume creation the removeids list can be safely
+ ignored.
+
+ :param replay_profile_string: Comma separated list of profile names.
+ :return: List replication profiles to use, List to remove.
+ :raises VolumeBackendAPIException: If we can't find our profiles.
+ """
+ addids = []
+ removeids = []
+ replay_profiles = []
+ if replay_profile_string:
+ replay_profiles = replay_profile_string.split(',')
+ # Most of the time they will not specify this so don't call anything.
+ if replay_profiles:
+ pf = self._get_payload_filter()
+ pf.append('scSerialNumber', self.ssn)
+ pf.append('type', 'Standard')
+ r = self.client.post('StorageCenter/ScReplayProfile/GetList',
+ pf.payload)
+ if self._check_result(r):
+ profiles = self._get_json(r)
+ for profile in profiles:
+ if replay_profiles.count(profile['name']) > 0:
+ addids.append(profile['instanceId'])
+ else:
+ # in the volume.
+ removeids.append(profile['instanceId'])
+ # Check that we've found what we are looking for if anything
+ if len(addids) != len(replay_profiles):
+ msg = (_('Unable to locate specified replay profiles %s ') %
+ replay_profile_string)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return addids, removeids
+
+ def update_replay_profiles(self, scvolume, replay_profile_string):
+ """Update our replay profiles.
+
+ If the replay_profile_string is empty we look for the user's default
+ profiles. If those aren't found we look for the Daily profile.
+
+ Note that this is in addition to the CG profiles which we do not touch.
+
+ :param scvolume: SC Volume object.
+ :param replay_profile_string: Comma separated string of replay profile
+ names.
+ :return: True/False.
+ """
+ # Find our replay_profiles.
+ addids, removeids = self._find_replay_profiles(replay_profile_string)
+ # We either found what we were looking for.
+ # If we are clearing out our ids then find a default.
+ if not addids:
+ # if no replay profiles specified we must be clearing out.
+ addids = self._find_user_replay_profiles()
+ if not addids:
+ addids = [self._find_daily_replay_profile()]
+ # Do any removals first.
+ for id in removeids:
+ # We might have added to the addids list after creating removeids.
+ # User preferences or the daily profile could have been added.
+ # If our id is in both lists just skip it and remove it from
+ # The add list.
+ if addids.count(id):
+ addids.remove(id)
+ elif not self._update_volume_profiles(
+ scvolume, addid=None, removeid=id):
+ return False
+ # Add anything new.
+ for id in addids:
+ if not self._update_volume_profiles(
+ scvolume, addid=id, removeid=None):
+ return False
+ return True
+
+ def create_volume(self, name, size, storage_profile=None,
+ replay_profile_string=None):
"""Creates a new volume on the Storage Center.
It will create it in a folder called self.vfname. If self.vfname
This is the cinder volume ID.
:param size: The size of the volume to be created in GB.
:param storage_profile: Optional storage profile to set for the volume.
+ :param replay_profile: Optional replay profile to set for the volume.
:returns: Dell Volume object or None.
"""
LOG.debug('Create Volume %(name)s %(ssn)s %(folder)s %(profile)s',
'ssn': self.ssn,
'folder': self.vfname,
'profile': storage_profile,
+ 'replay': replay_profile_string
})
# Find our folder
profile = self._find_storage_profile(storage_profile)
if storage_profile and profile is None:
msg = _('Storage Profile %s not found.') % storage_profile
- raise exception.VolumeBackendAPIException(
- data=msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Find our replay_profiles.
+ addids, removeids = self._find_replay_profiles(replay_profile_string)
# Init our return.
scvolume = None
payload['VolumeFolder'] = self._get_id(folder)
if profile:
payload['StorageProfile'] = self._get_id(profile)
+ # This is a new volume so there is nothing to remove.
+ if addids:
+ payload['ReplayProfileList'] = addids
r = self.client.post('StorageCenter/ScVolume',
payload)
- if r.status_code == 201:
+ # 201 expected.
+ if self._check_result(r):
scvolume = self._get_json(r)
if scvolume:
LOG.info(_LI('Created volume %(instanceId)s: %(name)s'),
# Try one last time to find it before returning.
scvolume = self.find_volume(name)
else:
- LOG.error(_LE('ScVolume create error '
- '%(name)s: %(code)d %(reason)s'),
- {'name': name,
- 'code': r.status_code,
- 'reason': r.reason})
+ LOG.error(_LE('Unable to create volume on SC: %s'), name)
return scvolume
pf.append('volumeFolderPath', vfname)
r = self.client.post('StorageCenter/ScVolume/GetList',
pf.payload)
- if r.status_code != 200:
- LOG.debug('ScVolume GetList error '
- '%(name)s: %(code)d %(reason)s',
- {'name': name,
- 'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 200 expected.
+ if self._check_result(r):
result = self._get_json(r)
# We return None if there was an error and a list if the command
# succeeded. It might be an empty list.
return None
# Look for our volume in our folder.
- vollist = self._get_volume_list(name,
- None,
- True)
+ vollist = self._get_volume_list(name, None, True)
# If an empty list was returned they probably moved the volumes or
# changed the folder name so try again without the folder.
if not vollist:
LOG.debug('Cannot find volume %(n)s in %(v)s. Searching SC.',
{'n': name,
'v': self.vfname})
- vollist = self._get_volume_list(name,
- None,
- False)
+ vollist = self._get_volume_list(name, None, False)
+ # Failover Check.
+ # If an empty list was returned then either there is no such volume
+ # or we are in a failover state. Look for failover volume.
+ if not vollist:
+ LOG.debug('Unable to locate volume. Checking for failover.')
+ # Get our failover name.
+ fn = self._failover_name(name)
+ vollist = self._get_volume_list(fn, None, False)
+ # Same deal as the rest of these. If 0 not found. If greater than
+ # one we have multiple copies and cannot return a valid result.
+ if len(vollist) == 1:
+ # So we are in failover. Rename the volume and move it to our
+ # volume folder.
+ LOG.info(_LI('Found failover volume. Competing failover.'))
+ # Import our found volume. This completes our failover.
+ scvolume = self._import_one(vollist[0], name)
+ if scvolume:
+ LOG.info(_LI('Imported %(fail)s to %(guid)s.'),
+ {'fail': fn,
+ 'guid': name})
+ return scvolume
+ msg = _('Unable to complete import of %s.') % fn
+ raise exception.VolumeBackendAPIException(data=msg)
# If multiple volumes of the same name are found we need to error.
if len(vollist) > 1:
# blow up
- raise exception.VolumeBackendAPIException(
- _('Multiple copies of volume %s found.') % name)
+ msg = _('Multiple copies of volume %s found.') % name
+ raise exception.VolumeBackendAPIException(data=msg)
# We made it and should have a valid volume.
return None if not vollist else vollist[0]
if vol is not None:
r = self.client.delete('StorageCenter/ScVolume/%s'
% self._get_id(vol))
- if r.status_code != 200:
- raise exception.VolumeBackendAPIException(
- _('Error deleting volume '
- '%(ssn)s: %(volume)s: %(code)d %(reason)s') %
- {'ssn': self.ssn,
- 'volume': name,
- 'code': r.status_code,
- 'reason': r.reason})
+ # 200 expected
+ if not self._check_result(r):
+ msg = _('Error deleting volume %(ssn)s: %(volume)s') % {
+ 'ssn': self.ssn,
+ 'volume': name}
+ raise exception.VolumeBackendAPIException(data=msg)
+
# json return should be true or false
return self._get_json(r)
+
+ # If we can't find the volume then it is effectively gone.
LOG.warning(_LW('delete_volume: unable to find volume %s'),
name)
- # If we can't find the volume then it is effectively gone.
return True
def _find_server_folder(self, create=False):
r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba'
% self._get_id(scserver),
payload)
- if r.status_code != 200:
- LOG.error(_LE('AddHba error: '
- '%(wwn)s to %(srvname)s : %(code)d %(reason)s'),
+ # 200 expected.
+ if not self._check_result(r):
+ LOG.error(_LE('AddHba error: %(wwn)s to %(srvname)s'),
{'wwn': wwnoriscsiname,
- 'srvname': scserver['name'],
- 'code': r.status_code,
- 'reason': r.reason})
+ 'srvname': scserver['name']})
return False
return True
pf.append('scSerialNumber', self.ssn)
r = self.client.post('StorageCenter/ScServerOperatingSystem/GetList',
pf.payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
oslist = self._get_json(r)
for srvos in oslist:
name = srvos.get('name', 'nope')
# Found it return the id
return self._get_id(srvos)
- LOG.warning(_LW('ScServerOperatingSystem GetList return: '
- '%(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
+ LOG.warning(_LW('Unable to find appropriate OS %s'), osname)
+
return None
def create_server_multiple_hbas(self, wwns):
:param isfc: Boolean indicating whether this is an FC HBA or not.
:returns: Dell server object.
"""
+
+ LOG.info(_LI('Creating server %s'), wwnoriscsiname)
+
scserver = None
payload = {}
payload['Name'] = 'Server_' + wwnoriscsiname
# create our server
r = self.client.post('StorageCenter/ScPhysicalServer',
payload)
- if r.status_code != 201:
- LOG.error(_LE('ScPhysicalServer create error: '
- '%(wwn)s: %(code)d %(reason)s'),
- {'wwn': wwnoriscsiname,
- 'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 201 expected.
+ if self._check_result(r):
# Server was created
+ LOG.info(_LI('SC server created %s'), scserver)
scserver = self._first_result(r)
# Add hba to our server
# Can't have a server without an HBA
self._delete_server(scserver)
scserver = None
+
# Success or failure is determined by the caller
return scserver
pf.append('instanceId', self._get_id(hba['server']))
r = self.client.post('StorageCenter/ScServer/GetList',
pf.payload)
- if r.status_code != 200:
- LOG.error(_LE('ScServer error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 200 expected.
+ if self._check_result(r):
scserver = self._first_result(r)
+
if scserver is None:
- LOG.debug('Server (%s) not found.',
- instance_name)
+ LOG.debug('Server (%s) not found.', instance_name)
return scserver
def _find_serverhba(self, instance_name):
pf.append('instanceName', instance_name)
r = self.client.post('StorageCenter/ScServerHba/GetList',
pf.payload)
- if r.status_code != 200:
- LOG.debug('ScServerHba error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 200 expected.
+ if self._check_result(r):
scserverhba = self._first_result(r)
return scserverhba
"""
r = self.client.get('StorageCenter/ScControllerPort/%s/FaultDomainList'
% cportid)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
domains = self._get_json(r)
return domains
- else:
- LOG.debug('FaultDomainList error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
- LOG.error(_LE('Error getting FaultDomainList'))
+
+ LOG.error(_LE('Error getting FaultDomainList for %s'), cportid)
return None
def _find_fc_initiators(self, scserver):
initiators = []
r = self.client.get('StorageCenter/ScServer/%s/HbaList'
% self._get_id(scserver))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
hbas = self._get_json(r)
for hba in hbas:
wwn = hba.get('instanceName')
wwn is not None):
initiators.append(wwn)
else:
- LOG.debug('HbaList error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
LOG.error(_LE('Unable to find FC initiators'))
- LOG.debug(initiators)
+ LOG.debug('fc_initiators: %s', initiators)
return initiators
def get_volume_count(self, scserver):
"""
r = self.client.get('StorageCenter/ScServer/%s/MappingList'
% self._get_id(scserver))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
mappings = self._get_json(r)
return len(mappings)
# Panic mildly but do not return 0.
if scvolume.get('active', False):
r = self.client.get('StorageCenter/ScVolume/%s/MappingList'
% self._get_id(scvolume))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
mappings = self._get_json(r)
- else:
- LOG.debug('MappingList error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
- LOG.error(_LE('Unable to find volume mappings: %s'),
- scvolume.get('name'))
else:
LOG.error(_LE('_find_mappings: volume is not active'))
- LOG.debug(mappings)
+ LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'),
+ {'name': scvolume.get('name'),
+ 'mappings': mappings})
return mappings
def _find_mapping_profiles(self, scvolume):
mapping_profiles = []
r = self.client.get('StorageCenter/ScVolume/%s/MappingProfileList'
% self._get_id(scvolume))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
mapping_profiles = self._get_json(r)
else:
- LOG.debug('MappingProfileList error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
+ LOG.error(_LE('Unable to find mapping profiles: %s'),
+ scvolume.get('name'))
LOG.debug(mapping_profiles)
return mapping_profiles
controllerport = None
r = self.client.get('StorageCenter/ScControllerPort/%s'
% cportid)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
controllerport = self._first_result(r)
else:
- LOG.debug('ScControllerPort error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
LOG.error(_LE('Unable to find controller port: %s'),
cportid)
LOG.debug(controllerport)
# TODO(Swanson): We have a function that gets this. Call that.
r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration'
% self._get_id(scvolume))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
volconfig = self._first_result(r)
controller = volconfig.get('controller')
actvctrl = self._get_id(controller)
else:
- LOG.debug('VolumeConfiguration error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
LOG.error(_LE('Unable to retrieve VolumeConfiguration: %s'),
self._get_id(scvolume))
LOG.debug('activecontroller %s', actvctrl)
def _is_virtualport_mode(self):
isvpmode = False
r = self.client.get('StorageCenter/ScConfiguration/%s' % self.ssn)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
scconfig = self._get_json(r)
if scconfig:
isvpmode = True if (scconfig['iscsiTransportMode'] ==
r = self.client.get('StorageCenter/'
'ScControllerPortIscsiConfiguration/%s'
% cportid)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
controllerport = self._first_result(r)
else:
- LOG.debug('ScControllerPortIscsiConfiguration error: '
- '%(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
LOG.error(_LE('Unable to find controller '
'port iscsi configuration: %s'),
cportid)
# Since we just mapped this and can't find that mapping the world
# is wrong so we raise exception.
raise exception.VolumeBackendAPIException(
- _('Unable to find iSCSI mappings.'))
+ data=_('Unable to find iSCSI mappings.'))
# Make sure we point to the best portal we can. This means it is
# on the active controller and, preferably, up. If it isn't return
r = self.client.post('StorageCenter/ScVolume/%s/MapToServer'
% volumeid,
payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
# We just return our mapping
return self._first_result(r)
- # Should not be here.
- LOG.debug('MapToServer error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
+
# Error out
LOG.error(_LE('Unable to map %(vol)s to %(srv)s'),
{'vol': scvolume['name'],
if prosrv is not None and self._get_id(prosrv) == serverid:
r = self.client.delete('StorageCenter/ScMappingProfile/%s'
% self._get_id(profile))
- if (r.status_code != 200 or r.ok is False):
- LOG.debug('ScMappingProfile error: '
- '%(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
- LOG.error(_LE('Unable to unmap Volume %s'),
- volumeid)
- # 1 failed unmap is as good as 100.
- # Fail it and leave
- rtn = False
- break
- LOG.debug('Volume %(vol)s unmapped from %(srv)s',
- {'vol': volumeid,
- 'srv': serverid})
+ # 200 expected.
+ if self._check_result(r):
+ # Check our result in the json.
+ result = self._get_json(r)
+ # EM 15.1 and 15.2 return a boolean directly.
+ # 15.3 on up return it in a dict under 'result'.
+ if result is True or (type(result) is dict and
+ result.get('result')):
+ LOG.debug('Volume %(vol)s unmapped from %(srv)s',
+ {'vol': volumeid,
+ 'srv': serverid})
+ continue
+
+ LOG.error(_LE('Unable to unmap Volume %s'),
+ volumeid)
+ # 1 failed unmap is as good as 100.
+ # Fail it and leave
+ rtn = False
+ break
+ # return true/false.
return rtn
def get_storage_usage(self):
if self.ssn is not None:
r = self.client.get('StorageCenter/StorageCenter/%s/StorageUsage'
% self.ssn)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
storageusage = self._get_json(r)
- else:
- LOG.debug('StorageUsage error: %(code)d %(reason)s',
- {'code': r.status_code,
- 'reason': r.reason})
return storageusage
r = self.client.post('StorageCenter/ScVolume/%s/CreateReplay'
% self._get_id(scvolume),
payload)
- if r.status_code != 200:
- LOG.error(_LE('CreateReplay error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
- else:
+ # 200 expected.
+ if self._check_result(r):
replay = self._first_result(r)
# Quick double check.
r = self.client.post('StorageCenter/ScReplay/%s/Expire'
% self._get_id(replay),
{})
- if r.status_code != 204:
- LOG.error(_LE('ScReplay Expire error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
+ # 204 expected.
+ if not self._check_result(r):
return False
# We either couldn't find it or expired it.
return True
- def create_view_volume(self, volname, screplay):
+ def create_view_volume(self, volname, screplay, replay_profile_string):
"""Creates a new volume named volname from the screplay.
:param volname: Name of new volume. This is the cinder volume ID.
:param screplay: Dell replay object from which to make a new volume.
+ :param replay_profile_string: Profiles to be applied to the volume
:returns: Dell volume object or None.
"""
folder = self._find_volume_folder(True)
+ # Find our replay_profiles.
+ addids, removeids = self._find_replay_profiles(replay_profile_string)
+
# payload is just the volume name and folder if we have one.
payload = {}
payload['Name'] = volname
payload['Notes'] = self.notes
if folder is not None:
payload['VolumeFolder'] = self._get_id(folder)
+ if addids:
+ payload['ReplayProfileList'] = addids
r = self.client.post('StorageCenter/ScReplay/%s/CreateView'
% self._get_id(screplay),
payload)
volume = None
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
volume = self._first_result(r)
- else:
- LOG.error(_LE('ScReplay CreateView error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
if volume is None:
LOG.error(_LE('Unable to create volume %s from replay'),
return volume
- def create_cloned_volume(self, volumename, scvolume):
+ def create_cloned_volume(self, volumename, scvolume, replay_profile_list):
"""Creates a volume named volumename from a copy of scvolume.
This is done by creating a replay and then a view volume from
:param volumename: Name of new volume. This is the cinder volume ID.
:param scvolume: Dell volume object.
+ :param replay_profile_list: List of snapshot profiles.
:returns: The new volume's Dell volume object.
"""
clone = None
60)
if replay is not None:
clone = self.create_view_volume(volumename,
- replay)
+ replay,
+ replay_profile_list)
else:
LOG.error(_LE('Error: unable to snap replay'))
return clone
% self._get_id(scvolume),
payload)
vol = None
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
vol = self._get_json(r)
- else:
- LOG.error(_LE('Error expanding volume '
- '%(name)s: %(code)d %(reason)s'),
- {'name': scvolume['name'],
- 'code': r.status_code,
- 'reason': r.reason})
+
+ # More info might be good.
if vol is not None:
LOG.debug('Volume expanded: %(name)s %(size)s',
{'name': vol['name'],
'size': vol['configuredSize']})
+ else:
+ LOG.error(_LE('Error expanding volume %s.'),
+ scvolume['name'])
return vol
def rename_volume(self, scvolume, name):
r = self.client.post('StorageCenter/ScVolume/%s/Modify'
% self._get_id(scvolume),
payload)
- if r.status_code != 200:
- LOG.error(_LE('Error renaming volume '
- '%(original)s to %(name)s: %(code)d %(reason)s'),
+ # 200 expected.
+ if not self._check_result(r):
+ LOG.error(_LE('Error renaming volume %(original)s to %(name)s'),
{'original': scvolume['name'],
- 'name': name,
- 'code': r.status_code,
- 'reason': r.reason})
+ 'name': name})
return False
return True
'/%s/Modify'
% self._get_id(scvolume),
payload)
- if r.status_code != 200:
+ # 200 expected.
+ if not self._check_result(r):
LOG.error(_LE('Error changing Storage Profile for volume '
- '%(original)s to %(name)s: %(code)d %(reason)s '
- '%(text)s'),
+ '%(original)s to %(name)s'),
{'original': scvolume['name'],
- 'name': storage_profile,
- 'code': r.status_code,
- 'reason': r.reason,
- 'text': r.text})
+ 'name': storage_profile})
return False
return True
"""
r = self.client.get('StorageCenter/StorageCenter/%s/UserPreferences' %
self.ssn)
- if r.status_code != 200:
- LOG.error(_LE('Error getting user preferences: '
- '%(code)d %(reason)s %(text)s'),
- {'code': r.status_code,
- 'reason': r.reason,
- 'text': r.text})
+ # 200 expected.
+ if not self._check_result(r):
return {}
return self._get_json(r)
:param scserver: Dell server object to delete.
:returns: Nothing. Only logs messages.
"""
+ LOG.debug('ScServer delete %s', self._get_id(scserver))
if scserver.get('deleteAllowed') is True:
r = self.client.delete('StorageCenter/ScServer/%s'
% self._get_id(scserver))
- LOG.debug('ScServer %(id)s delete return: %(code)d %(reason)s',
- {'id': self._get_id(scserver),
- 'code': r.status_code,
- 'reason': r.reason})
+ if self._check_result(r):
+ LOG.debug('ScServer deleted.')
else:
LOG.debug('_delete_server: deleteAllowed is False.')
pf.append('Name', name)
r = self.client.post('StorageCenter/ScReplayProfile/GetList',
pf.payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
profilelist = self._get_json(r)
if profilelist:
if len(profilelist) > 1:
LOG.error(_LE('Multiple replay profiles under name %s'),
name)
raise exception.VolumeBackendAPIException(
- _('Multiple profiles found.'))
+ data=_('Multiple profiles found.'))
return profilelist[0]
- else:
- LOG.error(_LE('find_replay_profile error %s'), r)
return None
def create_replay_profile(self, name):
payload['Notes'] = self.notes
r = self.client.post('StorageCenter/ScReplayProfile',
payload)
- if r.status_code == 201:
+ # 201 expected.
+ if self._check_result(r):
profile = self._first_result(r)
- else:
- LOG.error(_LE('create_replay_profile failed %s'), r)
return profile
def delete_replay_profile(self, profile):
r = self.client.delete('StorageCenter/ScReplayProfile/%s' %
self._get_id(profile))
# 200 is a good return. Log and leave.
- if r.status_code == 200:
+ if self._check_result(r):
LOG.info(_LI('Profile %s has been deleted.'),
profile.get('name'))
else:
# We failed due to a failure to delete an existing profile.
# This is reason to raise an exception.
- LOG.error(_LE('Unable to delete profile %(cg)s : %(reason)s'),
- {'cg': profile.get('name'),
- 'reason': r})
+ LOG.error(_LE('Unable to delete profile %s.'), profile.get('name'))
raise exception.VolumeBackendAPIException(
- _('Error deleting replay profile.'))
+ data=_('Error deleting replay profile.'))
def _get_volume_configuration(self, scvolume):
"""Get the ScVolumeConfiguration object.
"""
r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration' %
self._get_id(scvolume))
- if r.status_code == 200:
- LOG.debug('get_volume_configuration %s', r)
+ # 200 expected.
+ if self._check_result(r):
return self._first_result(r)
return None
# Make sure it isn't one we want removed and that we
# haven't already added it. (IE it isn't the addid.)
if (profileid != removeid and
- newprofilelist.count(profileid) == 0):
+ newprofilelist.count(profileid) == 0):
newprofilelist.append(profileid)
# Update our volume configuration.
payload = {}
self._get_id(scvolume),
profilelist,
r)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
return True
return False
"""
r = self.client.get('StorageCenter/ScReplayProfile/%s/VolumeList' %
profileid)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
vols = self._get_json(r)
for vol in vols:
if (vol.get('active') is not True or
- vol.get('replayAllowed') is not True):
+ vol.get('replayAllowed') is not True):
self._init_volume(vol)
def snap_cg_replay(self, profile, replayid, expire):
'CreateReplay'
% self._get_id(profile),
payload)
- # 204 appears to be the correct return.
- if r.status_code == 204:
- LOG.debug('CreateReplay result %s', r)
+ # 204 expected.
+ if self._check_result(r):
+ LOG.info(_LI('CreateReplay success %s'), replayid)
return True
- LOG.error(_LE('snap_cg error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
return False
def _find_sc_cg(self, profile, replayid):
r = self.client.get(
'StorageCenter/ScReplayProfile/%s/ConsistencyGroupList'
% self._get_id(profile))
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
cglist = self._get_json(r)
if cglist and isinstance(cglist, list):
for cg in cglist:
r = self.client.post('StorageCenter/ScReplay/%s/Expire'
% instanceid,
{})
- if r.status_code != 204:
- LOG.error(_LE('ScReplay Expire error: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
+ # 204 expected.
+ if not self._check_result(r):
return False
# We either couldn't find it or expired it.
return True
if not self.consisgroups:
msg = _('Dell API 2.1 or later required'
' for Consistency Group support')
- raise NotImplementedError(msg)
+ raise NotImplementedError(data=msg)
def _size_to_gb(self, spacestring):
"""Splits a SC size string into GB and a remainder.
except Exception:
# We received an invalid size string. Blow up.
raise exception.VolumeBackendAPIException(
- _('Error retrieving volume size'))
+ data=_('Error retrieving volume size'))
+
+ def _import_one(self, scvolume, newname):
+ # Find our folder
+ folder = self._find_volume_folder(True)
+
+ # If we actually have a place to put our volume create it
+ if folder is None:
+ LOG.warning(_LW('Unable to create folder %s'),
+ self.vfname)
+
+ # Rename and move our volume.
+ payload = {}
+ payload['Name'] = newname
+ if folder:
+ payload['VolumeFolder'] = self._get_id(folder)
+
+ r = self.client.put('StorageCenter/ScVolume/%s' %
+ self._get_id(scvolume),
+ payload)
+ # 200 expected.
+ if self._check_result(r):
+ return self._get_json(r)
+ return None
def manage_existing(self, newname, existing):
"""Finds the volume named existing and renames it.
sz, rem = self._size_to_gb(vollist[0]['configuredSize'])
if rem > 0:
raise exception.VolumeBackendAPIException(
- _('Volume size must multiple of 1 GB.'))
+ data=_('Volume size must multiple of 1 GB.'))
# We only want to grab detached volumes.
mappings = self._find_mappings(vollist[0])
if len(mappings) > 0:
- raise exception.VolumeBackendAPIException(
- _('Volume is attached to a server. (%s)') % existing)
-
- # Find our folder
- folder = self._find_volume_folder(True)
+ msg = _('Volume is attached to a server. (%s)') % existing
+ raise exception.VolumeBackendAPIException(data=msg)
- # If we actually have a place to put our volume create it
- if folder is None:
- LOG.warning(_LW('Unable to create folder %s'),
- self.vfname)
+ scvolume = self._import_one(vollist[0], newname)
- # Rename and move our volume.
- payload = {}
- payload['Name'] = newname
- if folder:
- payload['VolumeFolder'] = self._get_id(folder)
-
- r = self.client.put('StorageCenter/ScVolume/%s' %
- self._get_id(vollist[0]),
- payload)
- if r.status_code != 200:
- LOG.error(_LE('ScVolume error on rename: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
- raise exception.VolumeBackendAPIException(
- _('Unable to manage volume %s') % existing)
+ if not scvolume:
+ msg = _('Unable to manage volume %s') % existing
+ raise exception.VolumeBackendAPIException(data=msg)
elif count > 1:
raise exception.ManageExistingInvalidReference(
- _('Volume not unique. (%s)') % existing)
+ existing_ref=existing, reason=_('Volume not unique.'))
else:
raise exception.ManageExistingInvalidReference(
- _('Volume not found. (%s)') % existing)
+ existing_ref=existing, reason=_('Volume not found.'))
def get_unmanaged_volume_size(self, existing):
"""Looks up the volume named existing and returns its size string.
sz, rem = self._size_to_gb(vollist[0]['configuredSize'])
if rem > 0:
raise exception.VolumeBackendAPIException(
- _('Volume size must multiple of 1 GB.'))
+ data=_('Volume size must multiple of 1 GB.'))
return sz
elif count > 1:
raise exception.ManageExistingInvalidReference(
- _('Volume not unique. (%s)') % existing)
+ existing_ref=existing, reason=_('Volume not unique.'))
else:
raise exception.ManageExistingInvalidReference(
- _('Volume not found. (%s)') % existing)
+ existing_ref=existing, reason=_('Volume not found.'))
def unmanage(self, scvolume):
"""Unmanage our volume.
r = self.client.put('StorageCenter/ScVolume/%s' %
self._get_id(scvolume),
payload)
- if r.status_code == 200:
+ # 200 expected.
+ if self._check_result(r):
LOG.info(_LI('Volume %s unmanaged.'), scvolume['name'])
else:
- LOG.error(_LE('ScVolume error on rename: %(code)d %(reason)s'),
- {'code': r.status_code,
- 'reason': r.reason})
- raise exception.VolumeBackendAPIException(
- _('Unable to rename volume %(existing)s to %(newname)s') %
- {'existing': scvolume['name'],
- 'newname': newname})
+ msg = _('Unable to rename volume %(existing)s to %(newname)s') % {
+ 'existing': scvolume['name'],
+ 'newname': newname}
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def _find_qos(self, qosnode):
+ """Find Dell SC QOS Node entry for replication.
+
+ :param qosnode: Name of qosnode.
+ :return: scqos node object.
+ """
+ pf = self._get_payload_filter()
+ pf.append('scSerialNumber', self.ssn)
+ pf.append('name', qosnode)
+ r = self.client.post('StorageCenter/ScReplicationQosNode/GetList',
+ pf.payload)
+ # 200 expected.
+ if self._check_result(r):
+ nodes = self._get_json(r)
+ if len(nodes) > 0:
+ return nodes[0]
+ else:
+ payload = {}
+ payload['LinkSpeed'] = '1 Gbps'
+ payload['Name'] = qosnode
+ payload['StorageCenter'] = self.ssn
+ payload['BandwidthLimited'] = False
+ r = self.client.post('StorageCenter/ScReplicationQosNode',
+ payload)
+ # 201 expected.
+ if self._check_result(r):
+ return self._get_json(r)
+
+ LOG.error(_LE('Unable to find or create QoS Node named %s'), qosnode)
+ raise exception.VolumeBackendAPIException(
+ data=_('Failed to find QoSnode'))
+
+ def update_replicate_active_replay(self, scvolume, replactive):
+ """Enables or disables replicating the active replay for given vol.
+
+ :param scvolume: SC Volume object.
+ :param replactive: True or False
+ :return: True or False
+ """
+ r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' %
+ self._get_id(scvolume))
+ # 200 expected.
+ if self._check_result(r):
+ replications = self._get_json(r)
+ for replication in replications:
+ if replication['replicateActiveReplay'] != replactive:
+ payload = {'ReplicateActiveReplay': replactive}
+ r = self.client.put('StorageCenter/ScReplication/%s' %
+ replication['instanceId'],
+ payload)
+ if not self._check_result(r):
+ return False
+ return True
+
+ def get_screplication(self, scvolume, destssn):
+ """Find the screplication object for the volume on the dest backend.
+
+ :param scvolume:
+ :param destssn:
+ :return:
+ """
+ LOG.debug('get_screplication')
+ r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' %
+ self._get_id(scvolume))
+ # 200 expected.
+ if self._check_result(r):
+ replications = self._get_json(r)
+ for replication in replications:
+ # So we need to find the replication we are looking for.
+ LOG.debug(replication)
+ LOG.debug('looking for %s', destssn)
+ if replication.get('destinationScSerialNumber') == destssn:
+ return replication
+ # Unable to locate replication.
+ LOG.warning(_LW('Unable to locate replication %(vol)s to %(ssn)s'),
+ {'vol': scvolume.get('name'),
+ 'ssn': destssn})
+ return None
+
+ def delete_replication(self, scvolume, destssn):
+ """Deletes the SC replication object from scvolume to the destssn.
+
+ :param scvolume: Dell SC Volume object.
+ :param destssn: SC the replication is replicating to.S
+ :return: True on success. False on fail.
+ """
+ replication = self.get_screplication(scvolume, destssn)
+ if replication:
+ # TODO(tswanson): Sort out why we cannot send down attributes.
+ r = self.client.delete('StorageCenter/ScReplication/%s' %
+ self._get_id(replication))
+ if self._check_result(r):
+ # check that we whacked the dest volume
+ LOG.info(_LI('Replication %(vol)s to %(dest)s.'),
+ {'vol': scvolume.get('name'),
+ 'dest': destssn})
+
+ return True
+ else:
+ LOG.error(_LE('Unable to delete replication for '
+ '%(vol)s to %(dest)s.'),
+ {'vol': scvolume.get('name'),
+ 'dest': destssn})
+ return False
+
+ def _repl_name(self, name):
+ return self.repl_prefix + name
+
+ def _failover_name(self, name):
+ return self.failover_prefix + name
+
+ def _get_disk_folder(self, ssn, foldername):
+ # TODO(tswanson): Harden this.
+ diskfolder = None
+ # If no folder name we just pass through this.
+ if foldername:
+ pf = self._get_payload_filter()
+ pf.append('scSerialNumber', ssn)
+ pf.append('name', foldername)
+ r = self.client.post('StorageCenter/ScDiskFolder/GetList',
+ pf.payload)
+ if self._check_result(r):
+ try:
+ # Go for broke.
+ diskfolder = self._get_json(r)[0]
+ except Exception:
+ # We just log this as an error and return nothing.
+ LOG.error(_LE('Unable to find '
+ 'disk folder %(name)s on %(ssn)s'),
+ {'name': foldername,
+ 'ssn': ssn})
+ return diskfolder
+
+ def create_replication(self, scvolume, destssn, qosnode,
+ synchronous, diskfolder, replicate_active):
+ """Create repl from scvol to destssn.
+
+ :param scvolume: Dell SC volume object.
+ :param destssn: Destination SSN string.
+ :param qosnode: Name of Dell SC QOS Node for this replication.
+ :param synchronous: Boolean.
+ :param diskfolder: optional disk folder name.
+ :param replicate_active: replicate active replay.
+ :return: Dell SC replication object.
+ """
+ screpl = None
+ ssn = self.find_sc(int(destssn))
+ payload = {}
+ payload['DestinationStorageCenter'] = ssn
+ payload['QosNode'] = self._get_id(self._find_qos(qosnode))
+ payload['SourceVolume'] = self._get_id(scvolume)
+ payload['StorageCenter'] = self.find_sc()
+ # Have to replicate the active replay.
+ payload['ReplicateActiveReplay'] = replicate_active or synchronous
+ payload['Type'] = 'Synchronous' if synchronous else 'Asynchronous'
+ destinationvolumeattributes = {}
+ destinationvolumeattributes['CreateSourceVolumeFolderPath'] = True
+ destinationvolumeattributes['Notes'] = self.notes
+ destinationvolumeattributes['Name'] = self._repl_name(scvolume['name'])
+ # Find our disk folder. If they haven't specified one this will just
+ # drop through. If they have specified one and it can't be found the
+ # error will be logged but this will keep going.
+ df = self._get_disk_folder(destssn, diskfolder)
+ if df:
+ destinationvolumeattributes['DiskFolder'] = self._get_id(df)
+ payload['DestinationVolumeAttributes'] = destinationvolumeattributes
+ r = self.client.post('StorageCenter/ScReplication', payload)
+ # 201 expected.
+ if self._check_result(r):
+ LOG.info(_LI('Replication created for %(volname)s to %(destsc)s'),
+ {'volname': scvolume.get('name'),
+ 'destsc': destssn})
+ screpl = self._get_json(r)
+
+ # Check we did something.
+ if not screpl:
+ # Failed to launch. Inform user. Throw.
+ LOG.error(_LE('Unable to replicate %(volname)s to %(destsc)s'),
+ {'volname': scvolume.get('name'),
+ 'destsc': destssn})
+ return screpl
+
+ def pause_replication(self, scvolume, destssn):
+ # destssn should probably be part of the object.
+ replication = self.get_screplication(scvolume, destssn)
+ if replication:
+ r = self.client.post('StorageCenter/ScReplication/%s/Pause' %
+ self._get_id(replication), {})
+ if self._check_result(r):
+ return True
+ return False
+
+ def resume_replication(self, scvolume, destssn):
+ # destssn should probably be part of the object.
+ replication = self.get_screplication(scvolume, destssn)
+ if replication:
+ r = self.client.post('StorageCenter/ScReplication/%s/Resume' %
+ self._get_id(replication), {})
+ if self._check_result(r):
+ return True
+ return False
+
+ def find_repl_volume(self, guid, destssn, instance_id=None):
+ # Do a normal volume search.
+ pf = self._get_payload_filter()
+ pf.append('scSerialNumber', destssn)
+ pf.append('ReplicationDestination', True)
+ # There is a chance we know the exact volume. If so then use that.
+ if instance_id:
+ pf.append('instanceId', instance_id)
+ else:
+ # Try the name.
+ pf.append('Name', self.repl_prefix + guid)
+ r = self.client.post('StorageCenter/ScVolume/GetList',
+ pf.payload)
+ if self._check_result(r):
+ volumes = self._get_json(r)
+ if len(volumes) == 1:
+ return volumes[0]
+ return None
+
+ def _remove_mappings(self, scvol):
+ """Peels all the mappings off of scvol.
+
+ :param scvol:
+ :return:
+ """
+ if scvol:
+ r = self.client.post('StorageCenter/ScVolume/%s/Unmap' %
+ self._get_id(scvol),
+ {})
+ return self._check_result(r)
+ return None
+
+ def break_replication(self, volumename, destssn):
+ """This just breaks the replication.
+
+ If we find the source we just delete the replication. If the source
+ is down then we find the destination and unmap it. Fail pretty much
+ every time this goes south.
+
+ :param volumename:
+ :param destssn:
+ :return: True False
+ """
+ ret = False
+ replid = None
+ scvolume = self.find_volume(volumename)
+ screplication = self.get_screplication(scvolume, destssn)
+ # if we got our replication volume we can do this nicely.
+ if screplication:
+ replid = screplication['destinationVolume']['instanceId']
+ screplvol = self.find_repl_volume(volumename, destssn, replid)
+ # delete_replication fails to delete replication without also
+ # stuffing it into the recycle bin.
+ # Instead we try to unmap the destination volume which will break
+ # the replication but leave the replication object on the SC.
+ ret = self._remove_mappings(screplvol)
+ # If the volume is free of replication.
+ if ret:
+ # Move and rename it.
+ ret = self.rename_volume(screplvol,
+ self._failover_name(volumename))
+ # Try to kill mappings on the source.
+ # We don't care that this succeeded or failed. Just move on.
+ self._remove_mappings(scvolume)
+
+ return ret
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
- driver.ExtendVD, driver.SnapshotVD, driver.BaseVD):
+ driver.ExtendVD, driver.ReplicaV2VD,
+ driver.SnapshotVD, driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
+ self.backends = self.configuration.safe_get('replication_device')
+ self.replication_enabled = True if self.backends else False
+ self.is_direct_connect = False
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
+ self.is_direct_connect = api.is_direct_connect
+ if self.is_direct_connect and self.replication_enabled:
+ msg = _('Dell Cinder driver configuration error replication '
+ 'not supported with direct connect.')
+ raise exception.InvalidHost(reason=msg)
+
+ if self.replication_enabled:
+ # Check that our replication destinations are available.
+ # TODO(tswanson): Check if we need a diskfolder. (Or not.)
+ # TODO(tswanson): Can we check that the backend specifies
+ # TODO(tswanson): the same ssn as target_device_id.
+ for backend in self.backends:
+ replssn = backend['target_device_id']
+ try:
+ # Just do a find_sc on it. If it raises we catch
+ # that and raise with a correct exception.
+ api.find_sc(int(replssn))
+ except exception.VolumeBackendAPIException:
+ msg = _('Dell Cinder driver configuration error '
+ 'replication_device %s not found') % replssn
+ raise exception.InvalidHost(reason=msg)
def _get_volume_extra_specs(self, volume):
"""Gets extra specs for the given volume."""
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
- :return: Nothing.
+ :returns: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
if profile:
api.update_cg_volumes(profile, [volume])
+ def _do_repl(self, api, volume):
+ """Checks if we can do replication.
+
+ Need the extra spec set and we have to be talking to EM.
+
+ :param api: Dell REST API object.
+ :param volume: Cinder Volume object.
+ :return: Boolean (True if replication enabled), Boolean (True if
+ replication type is sync.
+ """
+ do_repl = False
+ sync = False
+ if not self.is_direct_connect:
+ specs = self._get_volume_extra_specs(volume)
+ do_repl = specs.get('replication_enabled') == '<is> True'
+ sync = specs.get('replication_type') == '<in> sync'
+ return do_repl, sync
+
+ def _create_replications(self, api, volume, scvolume):
+ """Creates any appropriate replications for a given volume.
+
+ :param api: Dell REST API object.
+ :param volume: Cinder volume object.
+ :param scvolume: Dell Storage Center Volume object.
+ :return: model_update
+ """
+ # Replication V2
+ # for now we assume we have an array named backends.
+ replication_driver_data = None
+ # Replicate if we are supposed to.
+ do_repl, sync = self._do_repl(api, volume)
+ if do_repl:
+ for backend in self.backends:
+ # Check if we are to replicate the active replay or not.
+ specs = self._get_volume_extra_specs(volume)
+ replact = specs.get('replication:activereplay') == '<is> True'
+ if not api.create_replication(scvolume,
+ backend['target_device_id'],
+ backend.get('qosnode',
+ 'cinderqos'),
+ sync,
+ backend.get('diskfolder', None),
+ replact):
+ # Create replication will have printed a better error.
+ msg = _('Replication %(name)s to %(ssn)s failed.') % {
+ 'name': volume['id'],
+ 'ssn': backend['target_device_id']}
+ raise exception.VolumeBackendAPIException(data=msg)
+ if not replication_driver_data:
+ replication_driver_data = backend['target_device_id']
+ else:
+ replication_driver_data += ','
+ replication_driver_data += backend['target_device_id']
+ # If we did something return model update.
+ model_update = {}
+ if replication_driver_data:
+ model_update = {'replication_status': 'enabled',
+ 'replication_driver_data': replication_driver_data}
+ return model_update
+
def create_volume(self, volume):
"""Create a volume."""
+ model_update = {}
# We use id as our name as it is unique.
volume_name = volume.get('id')
+ # Look for our volume
volume_size = volume.get('size')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
storage_profile = specs.get('storagetype:storageprofile')
+ replay_profile_string = specs.get('storagetype:replayprofiles')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
if api.find_sc():
scvolume = api.create_volume(volume_name,
volume_size,
- storage_profile)
+ storage_profile,
+ replay_profile_string)
+
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
+
+ # Create replications. (Or not. It checks.)
+ model_update = self._create_replications(api, volume, scvolume)
+
except Exception:
+ # if we actually created a volume but failed elsewhere
+ # clean up the volume now.
+ if scvolume:
+ api.delete_volume(volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
- volume['name'])
+ volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
- _('Unable to create volume'))
+ data=_('Unable to create volume'))
+
+ return model_update
+
+ def _split(self, replication_driver_data):
+ ssnstrings = []
+ if replication_driver_data:
+ for str in replication_driver_data.split(','):
+ ssnstring = str.strip()
+ if ssnstring:
+ ssnstrings.append(ssnstring)
+ return ssnstrings
+
+ def _delete_replications(self, api, volume):
+ """Delete replications associated with a given volume.
+
+ We should be able to roll through the replication_driver_data list
+ of SSNs and delete replication objects between them and the source
+ volume.
+
+ :param api: Dell REST API object.
+ :param volume: Cinder Volume object
+ :return:
+ """
+ do_repl, sync = self._do_repl(api, volume)
+ if do_repl:
+ volume_name = volume.get('id')
+ scvol = api.find_volume(volume_name)
+ replication_driver_data = volume.get('replication_driver_data')
+ # This is just a string of ssns separated by commas.
+ ssnstrings = self._split(replication_driver_data)
+ # Trundle through these and delete them all.
+ for ssnstring in ssnstrings:
+ ssn = int(ssnstring)
+ if not api.delete_replication(scvol, ssn):
+ LOG.warning(_LW('Unable to delete replication of '
+ 'Volume %(vname)s to Storage Center '
+ '%(sc)s.'),
+ {'vname': volume_name,
+ 'sc': ssnstring})
+ # If none of that worked or there was nothing to do doesn't matter.
+ # Just move on.
def delete_volume(self, volume):
deleted = False
with self._client.open_connection() as api:
try:
if api.find_sc():
+ self._delete_replications(api, volume)
deleted = api.delete_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
volume_name)
snapshot['status'] = 'error_creating'
- raise exception.VolumeBackendAPIException(
- _('Failed to create snapshot %s') %
- snapshot_id)
+ msg = _('Failed to create snapshot %s') % snapshot_id
+ raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
+ model_update = {}
scvolume = None
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
- scvolume = api.create_view_volume(volume_name,
- replay)
+ # See if we have any extra specs.
+ specs = self._get_volume_extra_specs(volume)
+ replay_profile_string = specs.get(
+ 'storagetype:replayprofiles')
+ scvolume = api.create_view_volume(
+ volume_name, replay, replay_profile_string)
+
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
+ # Replicate if we are supposed to.
+ model_update = self._create_replications(api,
+ volume,
+ scvolume)
+
except Exception:
+ # Clean up after ourselves.
+ if scvolume:
+ api.delete_volume(volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
{'vol': volume_name,
'snap': snapshot_id})
else:
- raise exception.VolumeBackendAPIException(
- _('Failed to create volume %s') % volume_name)
+ msg = _('Failed to create volume %s') % volume_name
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
+ model_update = {}
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
- scvolume = api.create_cloned_volume(volume_name,
- srcvol)
+ # See if we have any extra specs.
+ specs = self._get_volume_extra_specs(volume)
+ replay_profile_string = specs.get(
+ 'storagetype:replayprofiles')
+ # Create our volume
+ scvolume = api.create_cloned_volume(
+ volume_name, srcvol, replay_profile_string)
+
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
+ # Replicate if we are supposed to.
+ model_update = self._create_replications(api,
+ volume,
+ scvolume)
except Exception:
+ # Clean up after ourselves.
+ if scvolume:
+ api.delete_volume(volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
{'vol': volume_name,
'src': src_volume_name})
else:
- raise exception.VolumeBackendAPIException(
- _('Failed to create volume %s') % volume_name)
+ msg = _('Failed to create volume %s') % volume_name
+ raise exception.VolumeBackendAPIException(data=msg)
+ return model_update
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
- raise exception.VolumeBackendAPIException(
- _('Failed to delete snapshot %s') % snapshot_id)
+ msg = _('Failed to delete snapshot %s') % snapshot_id
+ raise exception.VolumeBackendAPIException(data=msg)
def create_export(self, context, volume, connector):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
+ # TODO(tswanson): Move mapping code here.
pass
def ensure_export(self, context, volume):
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
- raise exception.VolumeBackendAPIException(
- _('Unable to find volume %s') % volume_name)
+ msg = _('Unable to find volume %s') % volume_name
+ raise exception.VolumeBackendAPIException(data=msg)
def remove_export(self, context, volume):
"""Remove an export of a volume.
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
- raise exception.VolumeBackendAPIException(
- _('Unable to extend volume %s') % volume_name)
+ msg = _('Unable to extend volume %s') % volume_name
+ raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
"""Get volume status.
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
+ data['replication_enabled'] = self.replication_enabled
+ if self.replication_enabled:
+ data['replication_type'] = ['async', 'sync']
+ data['replication_count'] = len(self.backends)
+
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
- :return model_update to update DB with any needed changes
+ :returns: model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
scvolume = api.find_volume(current_name)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
- model_update = {'_name_id': None}
+ # Replicate if we are supposed to.
+ model_update = self._create_replications(api,
+ new_volume,
+ scvolume)
+ model_update['_name_id'] = None
+
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
- :return: Nothing on success.
+ :returns: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
- raise exception.VolumeBackendAPIException(
- _('Unable to create consistency group %s') % gid)
+ msg = _('Unable to create consistency group %s') % gid
+ raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
- :return: Updated model_update, volumes.
+ :returns: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
- :return model_update, add_volumes_update, remove_volumes_update
+ :returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
- raise exception.VolumeBackendAPIException(
- _('Unable to update consistency group %s') % gid)
+ msg = _('Unable to update consistency group %s') % gid
+ raise exception.VolumeBackendAPIException(data=msg)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
- :return: Updated model_update, snapshots.
+ :returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
- raise exception.VolumeBackendAPIException(
- _('Unable to snap Consistency Group %s') % cgid)
+ msg = _('Unable to snap Consistency Group %s') % cgid
+ raise exception.VolumeBackendAPIException(data=msg)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
- :return: Updated model_update, snapshots.
+ :returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
- raise exception.VolumeBackendAPIException(
- _('Unable to delete Consistency Group snapshot %s') %
- snapshotid)
+ msg = (_('Unable to delete Consistency Group snapshot %s')
+ % snapshotid)
+ raise exception.VolumeBackendAPIException(data=msg)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
+ # Replicate if we are supposed to.
+ scvolume = api.find_volume(volume['id'])
+ model_update = self._create_replications(api, volume, scvolume)
+ if model_update:
+ return model_update
else:
+ msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
- _('Must specify source-name or source-id. (%s)') %
- existing_ref)
+ existing_ref=existing_ref, reason=msg)
+ # Only return a model_update if we have replication info to add.
+ return None
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
+ msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
- _('Must specify source-name or source-id. (%s)') %
- existing_ref)
+ existing_ref=existing_ref, reason=msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
if scvolume:
api.unmanage(scvolume)
+ def _get_retype_spec(self, diff, volume_name, specname, spectype):
+ """Helper function to get current and requested spec.
+
+ :param diff: A difference dictionary.
+ :param volume_name: The volume name we are working with.
+ :param specname: The pretty name of the parameter.
+ :param spectype: The actual spec string.
+ :return: current, requested spec.
+ :raises: VolumeBackendAPIException
+ """
+ spec = (diff['extra_specs'].get(spectype))
+ if spec:
+ if len(spec) != 2:
+ msg = _('Unable to retype %(specname)s, expected to receive '
+ 'current and requested %(spectype)s values. Value '
+ 'received: %(spec)s') % {'specname': specname,
+ 'spectype': spectype,
+ 'spec': spec}
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ current = spec[0]
+ requested = spec[1]
+
+ if current != requested:
+ LOG.debug('Retyping volume %(vol)s to use %(specname)s '
+ '%(spec)s.',
+ {'vol': volume_name,
+ 'specname': specname,
+ 'spec': requested})
+ return current, requested
+ else:
+ LOG.info(_LI('Retype was to same Storage Profile.'))
+ return None, None
+
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
- # We currently only support retyping for the Storage Profile extra spec
+ model_update = None
+ # Any spec changes?
if diff['extra_specs']:
- storage_profiles = diff['extra_specs'].get(
- 'storagetype:storageprofile')
- if storage_profiles:
- if len(storage_profiles) != 2:
- LOG.warning(_LW('Unable to retype Storage Profile, '
- 'expected to receive current and '
- 'requested storagetype:storageprofile '
- 'values. Value received: %s'),
- storage_profiles)
+ volume_name = volume.get('id')
+ with self._client.open_connection() as api:
+ try:
+ # Get our volume
+ scvolume = api.find_volume(volume_name)
+ if scvolume is None:
+ LOG.error(_LE('Retype unable to find volume %s.'),
+ volume_name)
+ return False
+ # Check our specs.
+ # Storage profiles.
+ current, requested = (
+ self._get_retype_spec(diff, volume_name,
+ 'Storage Profile',
+ 'storagetype:storageprofile'))
+ # if there is a change and it didn't work fast fail.
+ if (current != requested and not
+ api.update_storage_profile(scvolume, requested)):
+ LOG.error(_LE('Failed to update storage profile'))
+ return False
+
+ # Replay profiles.
+ current, requested = (
+ self._get_retype_spec(diff, volume_name,
+ 'Replay Profiles',
+ 'storagetype:replayprofiles'))
+ # if there is a change and it didn't work fast fail.
+ if requested and not api.update_replay_profiles(scvolume,
+ requested):
+ LOG.error(_LE('Failed to update replay profiles'))
+ return False
+
+ # Replication_enabled.
+ current, requested = (
+ self._get_retype_spec(diff,
+ volume_name,
+ 'replication_enabled',
+ 'replication_enabled'))
+ # if there is a change and it didn't work fast fail.
+ if current != requested:
+ if requested:
+ model_update = self._create_replications(api,
+ volume,
+ scvolume)
+ else:
+ self._delete_replications(api, volume)
+ model_update = {'replication_status': 'disabled',
+ 'replication_driver_data': ''}
+
+ # Active Replay
+ current, requested = (
+ self._get_retype_spec(diff, volume_name,
+ 'Replicate Active Replay',
+ 'replication:activereplay'))
+ if current != requested and not (
+ api.update_replicate_active_replay(
+ scvolume, requested == '<is> True')):
+ LOG.error(_LE('Failed to apply '
+ 'replication:activereplay setting'))
+ return False
+
+ # TODO(tswanson): replaytype once it actually works.
+
+ except exception.VolumeBackendAPIException:
+ # We do nothing with this. We simply return failure.
return False
+ # If we have something to send down...
+ if model_update:
+ return model_update
+ return True
+
+ def replication_enable(self, context, vref):
+ """Re-enable replication on vref.
+
+ :param context: NA
+ :param vref: Cinder volume reference.
+ :return: model_update.
+ """
+ volumename = vref.get('id')
+ LOG.info(_LI('Enabling replication on %s'), volumename)
+ model_update = {}
+ with self._client.open_connection() as api:
+ replication_driver_data = vref.get('replication_driver_data')
+ destssns = self._split(replication_driver_data)
+ do_repl, sync = self._do_repl(api, vref)
+ if destssns and do_repl:
+ scvolume = api.find_volume(volumename)
+ if scvolume:
+ for destssn in destssns:
+ if not api.resume_replication(scvolume, int(destssn)):
+ LOG.error(_LE('Unable to resume replication on '
+ 'volume %(vol)s to SC %(ssn)s'),
+ {'vol': volumename,
+ 'ssn': destssn})
+ model_update['replication_status'] = 'error'
+ break
+ else:
+ LOG.error(_LE('Volume %s not found'), volumename)
+ else:
+ LOG.error(_LE('Replication not enabled or no replication '
+ 'destinations found. %s'),
+ volumename)
+ return model_update
+
+ def replication_disable(self, context, vref):
+ """Disable replication on vref.
+
+ :param context: NA
+ :param vref: Cinder volume reference.
+ :return: model_update.
+ """
+ volumename = vref.get('id')
+ LOG.info(_LI('Disabling replication on %s'), volumename)
+ model_update = {}
+ with self._client.open_connection() as api:
+ replication_driver_data = vref.get('replication_driver_data')
+ destssns = self._split(replication_driver_data)
+ do_repl, sync = self._do_repl(api, vref)
+ if destssns and do_repl:
+ scvolume = api.find_volume(volumename)
+ if scvolume:
+ for destssn in destssns:
+ if not api.pause_replication(scvolume, int(destssn)):
+ LOG.error(_LE('Unable to pause replication on '
+ 'volume %(vol)s to SC %(ssn)s'),
+ {'vol': volumename,
+ 'ssn': destssn})
+ model_update['replication_status'] = 'error'
+ break
+ else:
+ LOG.error(_LE('Volume %s not found'), volumename)
+ else:
+ LOG.error(_LE('Replication not enabled or no replication '
+ 'destinations found. %s'),
+ volumename)
+ return model_update
- current = storage_profiles[0]
- requested = storage_profiles[1]
-
- if current != requested:
- volume_name = volume.get('id')
- LOG.debug('Retyping volume %(vol)s to use storage '
- 'profile %(profile)s.',
- {'vol': volume_name,
- 'profile': requested})
- with self._client.open_connection() as api:
- if api.find_sc():
- scvolume = api.find_volume(volume_name)
- return api.update_storage_profile(
- scvolume, requested)
+ def _find_host(self, ssnstring):
+ """Find the backend associated with this ssnstring.
+
+ :param ssnstring: The ssn of the storage center we are looking for.
+ :return: The managed_backend_name associated with said storage center.
+ """
+ for backend in self.backends:
+ if ssnstring == backend['target_device_id']:
+ return backend['managed_backend_name']
+ return None
+
+ def _parse_secondary(self, api, vref, secondary):
+ """Find the replication destination associated with secondary.
+
+ :param api: Dell StorageCenterApi
+ :param vref: Cinder Volume
+ :param secondary: String indicating the secondary to failover to.
+ :return: Destination SSN and the host string for the given secondary.
+ """
+ LOG.debug('_parse_secondary. Looking for %s.', secondary)
+ replication_driver_data = vref['replication_driver_data']
+ destssn = None
+ host = None
+ ssnstrings = self._split(replication_driver_data)
+ # Trundle through these and delete them all.
+ for ssnstring in ssnstrings:
+ # If they list a secondary it has to match.
+ # If they do not list a secondary we return the first
+ # replication on a working system.
+ if not secondary or secondary == ssnstring:
+ # Is a string. Need an int.
+ ssn = int(ssnstring)
+ # Without the source being up we have no good
+ # way to pick a destination to failover to. So just
+ # look for one that is just up.
+ try:
+ # If the SC ssn exists check if we are configured to
+ # use it.
+ if api.find_sc(ssn):
+ host = self._find_host(ssnstring)
+ # If host then we are configured.
+ if host:
+ # Save our ssn and get out of here.
+ destssn = ssn
+ break
+ except exception.VolumeBackendAPIException:
+ LOG.warning(_LW('SSN %s appears to be down.'), ssn)
+ LOG.info(_LI('replication failover secondary is %(ssn)s %(host)s'),
+ {'ssn': destssn,
+ 'host': host})
+ return destssn, host
+
+ def replication_failover(self, context, vref, secondary):
+ """Failover to secondary.
+
+ The flow is as follows.
+ 1.The user explicitly requests a failover of a replicated volume.
+ 2.Driver breaks replication.
+ a. Neatly by deleting the SCReplication object if the
+ primary is still up.
+ b. Brutally by unmapping the replication volume if it isn't.
+ 3.We rename the volume to "Cinder failover <Volume GUID>"
+ 4.Change Cinder DB entry for which backend controls the volume
+ to the backend listed in the replication_device.
+ 5.That's it.
+
+ Completion of the failover is done on first use on the new backend.
+ We do this by modifying the find_volume function.
+
+ Find volume searches the following places in order:
+ 1. "<Volume GUID>" in the backend's volume folder.
+ 2. "<Volume GUID>" outside of the volume folder.
+ 3. "Cinder failover <Volume GUID>" anywhere on the system.
+
+ If "Cinder failover <Volume GUID>" was found:
+ 1.Volume is renamed to "<Volume GUID>".
+ 2.Volume is moved to the new backend's volume folder.
+ 3.The volume is now available on the secondary backend.
+
+ :param context;
+ :param vref: Cinder volume reference.
+ :param secondary: SSN of the destination Storage Center
+ :return: model_update on failover.
+ """
+ LOG.info(_LI('Failing replication %(vol)s to %(sec)s'),
+ {'vol': vref.get('id'),
+ 'sec': secondary})
+ # If we fall through this is our error.
+ msg = _('Unable to failover replication.')
+ with self._client.open_connection() as api:
+ # Basic check. We should never get here.
+ do_repl, sync = self._do_repl(api, vref)
+ if not do_repl:
+ # If we did get here then there is a disconnect. Set our
+ # message and raise (below).
+ msg = _('Unable to failover unreplicated volume.')
+ else:
+ # Look for the specified secondary.
+ destssn, host = self._parse_secondary(api, vref, secondary)
+ if destssn and host:
+ volumename = vref.get('id')
+ # This will break the replication on the SC side. At the
+ # conclusion of this the destination volume will be
+ # renamed to indicate failover is in progress. We will
+ # pick the volume up on the destination backend later.
+ if api.break_replication(volumename, destssn):
+ model_update = {}
+ model_update['host'] = host
+ model_update['replication_driver_data'] = None
+ return model_update
+ # We are here. Nothing went well.
+ LOG.error(_LE('Unable to break replication from '
+ '%(from)s to %(to)d.'),
+ {'from': volumename,
+ 'to': destssn})
else:
- # We only support retype of Storage Profile and they are
- # the same, so just return True to avoid unnecessary data
- # migration.
- LOG.info(_LI('Retype was to same Storage Profile.'))
- return True
+ LOG.error(_LE('Unable to find valid destination.'))
+
+ # We raise to indicate something bad happened.
+ raise exception.ReplicationError(volume_id=vref.get('id'),
+ reason=msg)
+
+ def list_replication_targets(self, context, vref):
+ """Lists replication targets for the given vref.
- return False
+ We return targets the volume has been setup to replicate to and that
+ are configured on this backend.
+
+ :param context: NA
+ :param vref: Cinder volume object.
+ :return: A dict of the form {'volume_id': id,
+ 'targets': [ {'type': xxx,
+ 'target_device_id': xxx,
+ 'backend_name': xxx}]}
+ """
+ LOG.debug('list_replication_targets for volume %s', vref.get('id'))
+ targets = []
+ with self._client.open_connection() as api:
+ do_repl, sync = self._do_repl(api, vref)
+ # If we have no replication_driver_data then we have no replication
+ # targets
+ replication_driver_data = vref.get('replication_driver_data')
+ ssnstrings = self._split(replication_driver_data)
+ # If we have data.
+ if ssnstrings:
+ # Trundle through our backends.
+ for backend in self.backends:
+ # If we find a backend then we report it.
+ if ssnstrings.count(backend['target_device_id']):
+ target = {}
+ target['type'] = 'managed'
+ target['target_device_id'] = (
+ backend['target_device_id'])
+ target['backend_name'] = (
+ backend['managed_backend_name'])
+ targets.append(target)
+ else:
+ # We note if the source is not replicated to a
+ # configured destination for the backend.
+ LOG.info(_LI('Volume %(guid)s not replicated to '
+ 'backend %(name)s'),
+ {'guid': vref['id'],
+ 'name': backend['managed_backend_name']})
+ # At this point we note that what we found and what we
+ # expected to find were two different things.
+ if len(ssnstrings) != len(targets):
+ LOG.warning(_LW('Expected replication count %(rdd)d does '
+ 'match configured replication count '
+ '%(tgt)d.'),
+ {'rdd': len(ssnstrings),
+ 'tgt': len(targets)})
+ # Format response.
+ replication_targets = {'volume_id': vref.get('id'), 'targets': targets}
+ LOG.info(_LI('list_replication_targets: %s'), replication_targets)
+ return replication_targets
+
+ def get_replication_updates(self, context):
+ # No idea what to do with this.
+ return []
2.2.0 - Driver retype support for switching volume's Storage Profile
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
+ 2.4.0 - Added Replication V2 support.
"""
- VERSION = '2.3.1'
+ VERSION = '2.4.0'
def __init__(self, *args, **kwargs):
super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs)
Added API 2.2 support.
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
+ 2.4.0 - Added Replication V2 support.
"""
- VERSION = '2.3.1'
+ VERSION = '2.4.0'
def __init__(self, *args, **kwargs):
super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs)
--- /dev/null
+---
+features:
+ - Added replication v2 support to the Dell Storage Center drivers.