from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.eseries import utils
import cinder.volume.drivers.netapp.options as na_opts
+import cinder.volume.drivers.netapp.utils as na_utils
+
+FAKE_CINDER_VOLUME = {
+ 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'size': 1,
+ 'volume_name': 'lun1',
+ 'host': 'hostname@backend#DDP',
+ 'os_type': 'linux',
+ 'provider_location': 'lun1',
+ 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'provider_auth': 'provider a b',
+ 'project_id': 'project',
+ 'display_name': None,
+ 'display_description': 'lun1',
+ 'volume_type_id': None,
+ 'migration_status': None,
+ 'attach_status': "detached"
+}
+FAKE_CINDER_SNAPSHOT = {
+ 'id': '78f95b9d-3f02-4781-a512-1a1c921d48a1',
+ 'volume': FAKE_CINDER_VOLUME
+}
MULTIATTACH_HOST_GROUP = {
'clusterRef': '8500000060080E500023C7340036035F515B78FC',
}
SNAPSHOT_GROUP = {
+ 'id': '3300000060080E500023C7340000098D5294AC9A',
'status': 'optimal',
'autoDeleteLimit': 0,
'maxRepositoryCapacity': '-65536',
'rollbackStatus': 'none',
'unusableRepositoryCapacity': '0',
- 'pitGroupRef':
- '3300000060080E500023C7340000098D5294AC9A',
+ 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A',
'clusterSize': 65536,
- 'label': 'C6JICISVHNG2TFZX4XB5ZWL7O',
+ 'label': 'C6JICISVHNG2TFZX4XB5ZWL7F',
'maxBaseCapacity': '476187142128128',
'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C',
'fullWarnThreshold': 99,
'consistencyGroupRef': '0000000000000000000000000000000000000000',
'volumeHandle': 49153,
'consistencyGroup': False,
- 'baseVolume': '0200000060080E500023C734000009825294A534'
+ 'baseVolume': '0200000060080E500023C734000009825294A534',
+ 'snapshotCount': 32
}
SNAPSHOT_IMAGE = {
+ 'id': '3400000060080E500023BB3400631F335294A5A8',
+ 'baseVol': '0200000060080E500023C734000009825294A534',
'status': 'optimal',
'pitCapacity': '2147483648',
'pitTimestamp': '1389315375',
'pitSequenceNumber': '19'
}
+SNAPSHOT_VOLUME = {
+ 'id': '35000000600A0980006077F80000F8BF566581AA',
+ 'viewRef': '35000000600A0980006077F80000F8BF566581AA',
+ 'worldWideName': '600A0980006077F80000F8BF566581AA',
+ 'baseVol': '02000000600A0980006077F80000F89B56657E26',
+ 'basePIT': '0000000000000000000000000000000000000000',
+ 'boundToPIT': False,
+ 'accessMode': 'readOnly',
+ 'label': 'UZJ45SLUKNGWRF3QZHBTOG4C4E_DEL',
+ 'status': 'stopped',
+ 'currentManager': '070000000000000000000001',
+ 'preferredManager': '070000000000000000000001',
+ 'repositoryVolume': '0000000000000000000000000000000000000000',
+ 'fullWarnThreshold': 0,
+ 'viewTime': '1449453419',
+ 'viewSequenceNumber': '2104',
+ 'volumeHandle': 16510,
+ 'clusterSize': 0,
+ 'maxRepositoryCapacity': '0',
+ 'unusableRepositoryCapacity': '0',
+ 'membership': {
+ 'viewType': 'individual',
+ 'cgViewRef': None
+ },
+ 'mgmtClientAttribute': 0,
+ 'offline': False,
+ 'volumeFull': False,
+ 'repositoryCapacity': '0',
+ 'baseVolumeCapacity': '1073741824',
+ 'totalSizeInBytes': '0',
+ 'consistencyGroupId': None,
+ 'volumeCopyTarget': False,
+ 'cloneCopy': False,
+ 'volumeCopySource': False,
+ 'pitBaseVolume': False,
+ 'asyncMirrorTarget': False,
+ 'asyncMirrorSource': False,
+ 'protectionType': 'type0Protection',
+ 'remoteMirrorSource': False,
+ 'remoteMirrorTarget': False,
+ 'wwn': '600A0980006077F80000F8BF566581AA',
+ 'listOfMappings': [],
+ 'mapped': False,
+ 'currentControllerId': '070000000000000000000001',
+ 'preferredControllerId': '070000000000000000000001',
+ 'onlineVolumeCopy': False,
+ 'objectType': 'pitView',
+ 'name': 'UZJ45SLUKNGWRF3QZHBTOG4C4E',
+}
+
+FAKE_BACKEND_STORE = {
+ 'key': 'cinder-snapshots',
+ 'value': '{"3300000060080E50003416400000E90D56B047E5":"2"}'
+}
+
+FAKE_CINDER_VOLUME = {
+ 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'size': 1,
+ 'volume_name': 'lun1',
+ 'host': 'hostname@backend#DDP',
+ 'os_type': 'linux',
+ 'provider_location': 'lun1',
+ 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
+ 'provider_auth': 'provider a b',
+ 'project_id': 'project',
+ 'display_name': None,
+ 'display_description': 'lun1',
+ 'volume_type_id': None,
+ 'migration_status': None,
+ 'attach_status': "detached"
+}
+
+FAKE_CINDER_SNAPSHOT = {
+ 'id': '78f95b9d-3f02-4781-a512-1a1c921d48a1',
+ 'volume': FAKE_CINDER_VOLUME,
+ 'provider_id': '3400000060080E500023BB3400631F335294A5A8'
+}
+
HARDWARE_INVENTORY_SINGLE_CONTROLLER = {
'controllers': [
{
}
+def list_snapshot_groups(numGroups):
+ snapshots = []
+ for n in range(0, numGroups):
+ s = copy.deepcopy(SNAPSHOT_GROUP)
+ s['label'] = s['label'][:-1] + str(n)
+ snapshots.append(s)
+ return snapshots
+
+
def create_configuration_eseries():
config = conf.Configuration(None)
config.append_config_values(na_opts.netapp_connection_opts)
@deepcopy_return_value_class_decorator
class FakeEseriesClient(object):
- features = mock.Mock()
+ features = na_utils.Features()
def __init__(self, *args, **kwargs):
- pass
+ self.features.add_feature('AUTOSUPPORT')
+ self.features.add_feature('SSC_API_V2')
+ self.features.add_feature('REST_1_3_RELEASE')
+ self.features.add_feature('REST_1_4_RELEASE')
def list_storage_pools(self):
return STORAGE_POOLS
def get_host(self, *args, **kwargs):
return HOST
+ def create_volume(self, *args, **kwargs):
+ return VOLUME
+
def create_volume_mapping(self, *args, **kwargs):
return VOLUME_MAPPING
def list_snapshot_images(self):
return [SNAPSHOT_IMAGE]
+ def list_snapshot_image(self):
+ return SNAPSHOT_IMAGE
+
def list_host_types(self):
return [
{
def delete_vol_copy_job(self, *args, **kwargs):
pass
+ def create_snapshot_image(self, *args, **kwargs):
+ return SNAPSHOT_IMAGE
+
+ def create_snapshot_volume(self, *args, **kwargs):
+ return SNAPSHOT_VOLUME
+
+ def list_snapshot_volumes(self, *args, **kwargs):
+ return [SNAPSHOT_VOLUME]
+
+ def list_snapshot_volume(self, *args, **kwargs):
+ return SNAPSHOT_IMAGE
+
+ def create_snapshot_group(self, *args, **kwargs):
+ return SNAPSHOT_GROUP
+
+ def list_snapshot_group(self, *args, **kwargs):
+ return SNAPSHOT_GROUP
+
def delete_snapshot_volume(self, *args, **kwargs):
pass
def update_stored_system_password(self, *args, **kwargs):
pass
+
+ def update_snapshot_volume(self, *args, **kwargs):
+ return SNAPSHOT_VOLUME
+
+ def delete_snapshot_image(self, *args, **kwargs):
+ pass
+
+ def delete_snapshot_group(self, *args, **kwargs):
+ pass
+
+ def restart_snapshot_volume(self, *args, **kwargs):
+ pass
+
+ def list_backend_store(self, key):
+ return {}
+
+ def save_backend_store(self, key, val):
+ pass
import copy
import ddt
+import json
import mock
from simplejson import scanner
**{'object-id':
fake_volume['id']})
+ def test_list_snapshot_group(self):
+ grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=grp))
+ fake_ref = 'fake'
+
+ result = self.my_client.list_snapshot_group(fake_ref)
+
+ self.assertEqual(grp, result)
+ invoke.assert_called_once_with(
+ 'GET', self.my_client.RESOURCE_PATHS['snapshot_group'],
+ **{'object-id': fake_ref})
+
+ def test_list_snapshot_groups(self):
+ grps = [copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)]
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=grps))
+
+ result = self.my_client.list_snapshot_groups()
+
+ self.assertEqual(grps, result)
+ invoke.assert_called_once_with(
+ 'GET', self.my_client.RESOURCE_PATHS['snapshot_groups'])
+
+ def test_delete_snapshot_group(self):
+ invoke = self.mock_object(self.my_client, '_invoke')
+ fake_ref = 'fake'
+
+ self.my_client.delete_snapshot_group(fake_ref)
+
+ invoke.assert_called_once_with(
+ 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_group'],
+ **{'object-id': fake_ref})
+
+ @ddt.data((None, None, None, None, None), ('1', 50, 75, 32, 'purgepit'))
+ @ddt.unpack
+ def test_create_snapshot_group(self, pool_id, repo, warn, limit, policy):
+ vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=vol))
+ snap_grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+
+ result = self.my_client.create_snapshot_group(
+ snap_grp['label'], snap_grp['id'], pool_id, repo, warn, limit,
+ policy)
+
+ self.assertEqual(vol, result)
+ invoke.assert_called_once_with(
+ 'POST', self.my_client.RESOURCE_PATHS['snapshot_groups'],
+ {'baseMappableObjectId': snap_grp['id'], 'name': snap_grp['label'],
+ 'storagePoolId': pool_id, 'repositoryPercentage': repo,
+ 'warningThreshold': warn, 'autoDeleteLimit': limit,
+ 'fullPolicy': policy})
+
+ def test_list_snapshot_volumes(self):
+ vols = [copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)]
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=vols))
+
+ result = self.my_client.list_snapshot_volumes()
+
+ self.assertEqual(vols, result)
+ invoke.assert_called_once_with(
+ 'GET', self.my_client.RESOURCE_PATHS['snapshot_volumes'])
+
+ def test_delete_snapshot_volume(self):
+ invoke = self.mock_object(self.my_client, '_invoke')
+ fake_ref = 'fake'
+
+ self.my_client.delete_snapshot_volume(fake_ref)
+
+ invoke.assert_called_once_with(
+ 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_volume'],
+ **{'object-id': fake_ref})
+
+ @ddt.data((None, None, None, None), ('1', 50, 75, 'readWrite'))
+ @ddt.unpack
+ def test_create_snapshot_volume(self, pool_id, repo, warn, mode):
+ vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=vol))
+
+ result = self.my_client.create_snapshot_volume(
+ vol['basePIT'], vol['label'], vol['id'], pool_id,
+ repo, warn, mode)
+
+ self.assertEqual(vol, result)
+ invoke.assert_called_once_with(
+ 'POST', self.my_client.RESOURCE_PATHS['snapshot_volumes'],
+ mock.ANY)
+
+ def test_update_snapshot_volume(self):
+ snap_id = '1'
+ label = 'name'
+ pct = 99
+ vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=vol))
+
+ result = self.my_client.update_snapshot_volume(snap_id, label, pct)
+
+ self.assertEqual(vol, result)
+ invoke.assert_called_once_with(
+ 'POST', self.my_client.RESOURCE_PATHS['snapshot_volume'],
+ {'name': label, 'fullThreshold': pct}, **{'object-id': snap_id})
+
+ def test_create_snapshot_image(self):
+ img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=img))
+ grp_id = '1'
+
+ result = self.my_client.create_snapshot_image(grp_id)
+
+ self.assertEqual(img, result)
+ invoke.assert_called_once_with(
+ 'POST', self.my_client.RESOURCE_PATHS['snapshot_images'],
+ {'groupId': grp_id})
+
+ def test_list_snapshot_image(self):
+ img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=img))
+ fake_ref = 'fake'
+
+ result = self.my_client.list_snapshot_image(fake_ref)
+
+ self.assertEqual(img, result)
+ invoke.assert_called_once_with(
+ 'GET', self.my_client.RESOURCE_PATHS['snapshot_image'],
+ **{'object-id': fake_ref})
+
+ def test_list_snapshot_images(self):
+ imgs = [copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)]
+ invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
+ return_value=imgs))
+
+ result = self.my_client.list_snapshot_images()
+
+ self.assertEqual(imgs, result)
+ invoke.assert_called_once_with(
+ 'GET', self.my_client.RESOURCE_PATHS['snapshot_images'])
+
+ def test_delete_snapshot_image(self):
+ invoke = self.mock_object(self.my_client, '_invoke')
+ fake_ref = 'fake'
+
+ self.my_client.delete_snapshot_image(fake_ref)
+
+ invoke.assert_called_once_with(
+ 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_image'],
+ **{'object-id': fake_ref})
+
@ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3',
'01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4')
def test_api_version_not_support_asup(self, api_version):
self.assertTrue(self.my_client.features.SSC_API_V2.supported)
+ @ddt.data('00.00.00.00', '01.52.9000.5', '01.52.9001.2', '00.53.9001.3',
+ '01.52.9090.1', '1.52.9010.7', '0.53.9011.7')
+ def test_api_version_not_support_1_3(self, api_version):
+
+ self.mock_object(client.RestClient,
+ 'get_eseries_api_info',
+ mock.Mock(return_value=('proxy', api_version)))
+
+ client.RestClient._init_features(self.my_client)
+
+ self.assertFalse(self.my_client.features.REST_1_3_RELEASE.supported)
+
+ @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
+ '01.54.9010.20', '01.54.9000.1', '02.51.9000.3',
+ '02.52.8999.3', '02.51.8999.2')
+ def test_api_version_1_3(self, api_version):
+
+ self.mock_object(client.RestClient,
+ 'get_eseries_api_info',
+ mock.Mock(return_value=('proxy', api_version)))
+
+ client.RestClient._init_features(self.my_client)
+
+ self.assertTrue(self.my_client.features.REST_1_3_RELEASE.supported)
+
def test_invoke_bad_content_type(self):
"""Tests the invoke behavior with a non-JSON response"""
fake_response = mock.Mock()
self.my_client._invoke, 'GET',
eseries_fake.FAKE_ENDPOINT_HTTP)
+ def test_list_backend_store(self):
+ path = self.my_client.RESOURCE_PATHS.get('persistent-store')
+ fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
+ invoke = self.mock_object(
+ self.my_client, '_invoke', mock.Mock(
+ return_value=fake_store))
+ expected = json.loads(fake_store.get('value'))
+
+ result = self.my_client.list_backend_store('key')
+
+ self.assertEqual(expected, result)
+ invoke.assert_called_once_with('GET', path, key='key')
+
+ def test_save_backend_store(self):
+ path = self.my_client.RESOURCE_PATHS.get('persistent-stores')
+ fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
+ key = 'key'
+ invoke = self.mock_object(
+ self.my_client, '_invoke',
+ mock.Mock())
+
+ self.my_client.save_backend_store(key, fake_store)
+
+ invoke.assert_called_once_with('POST', path, mock.ANY)
+
@ddt.ddt
class TestWebserviceClientTestCase(test.TestCase):
self.library = self.driver.library
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
+ self.mock_object(self.library, '_version_check')
self.mock_object(self.driver.library, '_check_storage_system')
self.driver.do_setup(context='context')
self.driver.library._client._endpoint = fakes.FAKE_ENDPOINT_HTTP
+ self.driver.library._client.features = mock.Mock()
+ self.driver.library._client.features.REST_1_4_RELEASE = True
def _set_config(self, configuration):
configuration.netapp_storage_family = 'eseries'
pass
def test_embedded_mode(self):
- self.mock_object(self.driver.library,
- '_check_mode_get_or_register_storage_system')
self.mock_object(client.RestClient, '_init_features')
configuration = self._set_config(self.create_configuration())
configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3'
-
driver = common.NetAppDriver(configuration=configuration)
+ self.mock_object(driver.library, '_version_check')
self.mock_object(client.RestClient, 'list_storage_systems', mock.Mock(
return_value=[fakes.STORAGE_SYSTEM]))
driver.do_setup(context='context')
import copy
import ddt
import time
+import uuid
import mock
from oslo_utils import units
from six.moves import range
from six.moves import reduce
+from cinder import context
from cinder import exception
from cinder import test
def get_fake_volume():
+ """Return a fake Cinder Volume that can be used a parameter"""
return {
'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
# Deprecated Option
self.library.configuration.netapp_storage_pools = None
self.library._client = eseries_fake.FakeEseriesClient()
+
+ self.mock_object(self.library, '_start_periodic_tasks',
+ new_attr=mock.Mock())
+
+ self.mock_object(library.cinder_utils, 'synchronized',
+ mock.Mock(return_value=lambda f: f))
+
with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new = cinder_utils.ZeroIntervalLoopingCall):
self.library.check_for_setup_error()
+ self.ctxt = context.get_admin_context()
+
def test_do_setup(self):
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.library = library.NetAppESeriesLibrary("FAKE", **kwargs)
self.library._client = eseries_fake.FakeEseriesClient()
- # We don't want the looping calls to run
+ self.mock_object(library.cinder_utils, 'synchronized',
+ mock.Mock(return_value=lambda f: f))
self.mock_object(self.library, '_start_periodic_tasks',
new_attr=mock.Mock())
+ self.ctxt = context.get_admin_context()
+
with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new = cinder_utils.ZeroIntervalLoopingCall):
self.library.check_for_setup_error()
get_fake_volume())
self.assertFalse(self.library._client.create_volume.call_count)
+ @ddt.data(0, 1, 2)
+ def test_create_snapshot(self, group_count):
+ """Successful Snapshot creation test"""
+ fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
+ self.library._get_volume = mock.Mock(return_value=fake_eseries_volume)
+ fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
+ self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
+ fake_cinder_snapshot = copy.deepcopy(
+ eseries_fake.FAKE_CINDER_SNAPSHOT)
+ fake_snapshot_group_list = eseries_fake.list_snapshot_groups(
+ group_count)
+ fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ self.library._client.create_snapshot_group = mock.Mock(
+ return_value=fake_snapshot_group)
+ self.library._client.list_snapshot_groups = mock.Mock(
+ return_value=fake_snapshot_group_list)
+ self.library._client.create_snapshot_image = mock.Mock(
+ return_value=fake_snapshot_image)
+
+ self.library.create_snapshot(fake_cinder_snapshot)
+
+ @ddt.data(0, 1, 3)
+ def test_create_cloned_volume(self, snapshot_group_count):
+ """Test creating cloned volume with different exist group counts. """
+ fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
+ self.library._get_volume = mock.Mock(return_value=fake_eseries_volume)
+ fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
+ self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
+ fake_snapshot_group_list = eseries_fake.list_snapshot_groups(
+ snapshot_group_count)
+ self.library._client.list_snapshot_groups = mock.Mock(
+ return_value=fake_snapshot_group_list)
+ fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ self.library._client.create_snapshot_group = mock.Mock(
+ return_value=fake_snapshot_group)
+ fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ self.library._client.create_snapshot_image = mock.Mock(
+ return_value=fake_snapshot_image)
+ self.library._get_snapshot_group_for_snapshot = mock.Mock(
+ return_value=copy.deepcopy(eseries_fake.SNAPSHOT_GROUP))
+ fake_created_volume = copy.deepcopy(eseries_fake.VOLUMES[1])
+ self.library.create_volume_from_snapshot = mock.Mock(
+ return_value = fake_created_volume)
+ fake_cinder_volume = copy.deepcopy(eseries_fake.FAKE_CINDER_VOLUME)
+ extend_vol = {'id': uuid.uuid4(), 'size': 10}
+
+ self.library.create_cloned_volume(extend_vol, fake_cinder_volume)
+
def test_create_volume_from_snapshot(self):
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
+ fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_eseries_volume))
- self.mock_object(self.library, "_create_snapshot_volume",
- mock.Mock(return_value=fake_eseries_volume))
- self.mock_object(self.library._client, "delete_snapshot_volume")
+ self.mock_object(self.library, "_get_snapshot",
+ mock.Mock(return_value=copy.deepcopy(
+ eseries_fake.SNAPSHOT_IMAGE)))
self.library.create_volume_from_snapshot(
- get_fake_volume(), fake_snapshot.fake_snapshot_obj(None))
+ get_fake_volume(), fake_snap)
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
- self.assertEqual(1, self.library._create_snapshot_volume.call_count)
- self.assertEqual(
- 1, self.library._client.delete_snapshot_volume.call_count)
def test_create_volume_from_snapshot_create_fails(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
- self.mock_object(self.library, "_create_snapshot_volume",
- mock.Mock(side_effect=exception.NetAppDriverException)
- )
- self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
+ self.mock_object(self.library._client, "delete_snapshot_volume")
+ self.mock_object(self.library, "_get_snapshot",
+ mock.Mock(return_value=copy.deepcopy(
+ eseries_fake.SNAPSHOT_IMAGE)))
+ self.mock_object(self.library._client, "create_snapshot_volume",
+ mock.Mock(
+ side_effect=exception.NetAppDriverException))
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume_from_snapshot,
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
- self.assertEqual(1, self.library._create_snapshot_volume.call_count)
- self.assertEqual(
- 0, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
- self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
+ self.mock_object(self.library, "_get_snapshot",
+ mock.Mock(return_value=copy.deepcopy(
+ eseries_fake.SNAPSHOT_IMAGE)))
fake_failed_volume_copy_job = copy.deepcopy(
eseries_fake.VOLUME_COPY_JOB)
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
- self.assertEqual(1, self.library._create_snapshot_volume.call_count)
- self.assertEqual(
- 1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
fake_dest_eseries_volume['volumeRef'] = 'fake_volume_ref'
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
+ self.mock_object(self.library, "_get_snapshot",
+ mock.Mock(return_value=copy.deepcopy(
+ eseries_fake.SNAPSHOT_IMAGE)))
+ self.mock_object(self.library, '_create_snapshot_volume',
+ mock.Mock(return_value=copy.deepcopy(
+ eseries_fake.SNAPSHOT_VOLUME)))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=copy.deepcopy(
eseries_fake.VOLUME)))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
- self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we created is not cleaned up
self.assertEqual(0, self.library._client.delete_volume.call_count)
+ def test_create_snapshot_group(self):
+ label = 'label'
+
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ get_call = self.mock_object(
+ self.library, '_get_storage_pools', mock.Mock(return_value=None))
+ create_call = self.mock_object(
+ self.library._client, 'create_snapshot_group',
+ mock.Mock(return_value=snapshot_group))
+
+ actual = self.library._create_snapshot_group(label, vol)
+
+ get_call.assert_not_called()
+ create_call.assert_called_once_with(label, vol['id'], repo_percent=20)
+ self.assertEqual(snapshot_group, actual)
+
+ def test_create_snapshot_group_legacy_ddp(self):
+ self.library._client.features.REST_1_3_RELEASE = False
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ pools = copy.deepcopy(eseries_fake.STORAGE_POOLS)
+ pool = pools[-1]
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ vol['volumeGroupRef'] = pool['id']
+ pool['raidLevel'] = 'raidDiskPool'
+ get_call = self.mock_object(
+ self.library, '_get_storage_pools', mock.Mock(return_value=pools))
+ create_call = self.mock_object(
+ self.library._client, 'create_snapshot_group',
+ mock.Mock(return_value=snapshot_group))
+
+ actual = self.library._create_snapshot_group('label', vol)
+
+ create_call.assert_called_with('label', vol['id'],
+ vol['volumeGroupRef'],
+ repo_percent=mock.ANY)
+ get_call.assert_called_once_with()
+ self.assertEqual(snapshot_group, actual)
+
+ def test_create_snapshot_group_legacy_vg(self):
+ self.library._client.features.REST_1_3_RELEASE = False
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
+ pools = copy.deepcopy(eseries_fake.STORAGE_POOLS)
+ pool = pools[0]
+ pool['raidLevel'] = 'raid6'
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ vol['volumeGroupRef'] = pool['id']
+
+ get_call = self.mock_object(
+ self.library, '_get_sorted_available_storage_pools',
+ mock.Mock(return_value=pools))
+ self.mock_object(self.library._client, 'create_snapshot_group',
+ mock.Mock(return_value=snapshot_group))
+ actual = self.library._create_snapshot_group('label', vol)
+
+ get_call.assert_called_once_with(vol_size_gb)
+ self.assertEqual(snapshot_group, actual)
+
+ def test_get_snapshot(self):
+ fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ get_snap = self.mock_object(
+ self.library._client, 'list_snapshot_image', mock.Mock(
+ return_value=snap))
+
+ result = self.library._get_snapshot(fake_snap)
+
+ self.assertEqual(snap, result)
+ get_snap.assert_called_once_with(fake_snap['provider_id'])
+
+ def test_get_snapshot_fail(self):
+ fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+ get_snap = self.mock_object(
+ self.library._client, 'list_snapshot_image', mock.Mock(
+ side_effect=exception.NotFound))
+
+ self.assertRaises(exception.NotFound, self.library._get_snapshot,
+ fake_snap)
+
+ get_snap.assert_called_once_with(fake_snap['provider_id'])
+
+ def test_get_snapshot_group_for_snapshot(self):
+ fake_id = 'id'
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ get_snap = self.mock_object(
+ self.library, '_get_snapshot',
+ mock.Mock(return_value=snap))
+ get_grp = self.mock_object(self.library._client, 'list_snapshot_group',
+ mock.Mock(return_value=grp))
+
+ result = self.library._get_snapshot_group_for_snapshot(fake_id)
+
+ self.assertEqual(grp, result)
+ get_grp.assert_called_once_with(snap['pitGroupRef'])
+ get_snap.assert_called_once_with(fake_id)
+
+ def test_get_snapshot_group_for_snapshot_fail(self):
+ fake_id = 'id'
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ get_snap = self.mock_object(
+ self.library, '_get_snapshot',
+ mock.Mock(return_value=snap))
+ get_grp = self.mock_object(self.library._client, 'list_snapshot_group',
+ mock.Mock(side_effect=exception.NotFound))
+
+ self.assertRaises(exception.NotFound,
+ self.library._get_snapshot_group_for_snapshot,
+ fake_id)
+
+ get_grp.assert_called_once_with(snap['pitGroupRef'])
+ get_snap.assert_called_once_with(fake_id)
+
+ def test_get_snapshot_groups_for_volume(self):
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ # Generate some snapshot groups that will not match
+ snapshot_groups = [copy.deepcopy(snapshot_group) for i in range(
+ self.library.MAX_SNAPSHOT_GROUP_COUNT)]
+ for i, group in enumerate(snapshot_groups):
+ group['baseVolume'] = str(i)
+ snapshot_groups.append(snapshot_group)
+ get_call = self.mock_object(
+ self.library._client, 'list_snapshot_groups', mock.Mock(
+ return_value=snapshot_groups))
+
+ groups = self.library._get_snapshot_groups_for_volume(vol)
+
+ get_call.assert_called_once_with()
+ self.assertEqual([snapshot_group], groups)
+
+ def test_get_available_snapshot_group(self):
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ snapshot_group['snapshotCount'] = 0
+ # Generate some snapshot groups that will not match
+
+ reserved_group = copy.deepcopy(snapshot_group)
+ reserved_group['label'] += self.library.SNAPSHOT_VOL_COPY_SUFFIX
+
+ full_group = copy.deepcopy(snapshot_group)
+ full_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT
+
+ snapshot_groups = [snapshot_group, reserved_group, full_group]
+ get_call = self.mock_object(
+ self.library, '_get_snapshot_groups_for_volume', mock.Mock(
+ return_value=snapshot_groups))
+
+ group = self.library._get_available_snapshot_group(vol)
+
+ get_call.assert_called_once_with(vol)
+ self.assertEqual(snapshot_group, group)
+
+ def test_get_snapshot_groups_for_volume_not_found(self):
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snapshot_group['baseVolume'] = vol['id']
+ snapshot_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT
+ # Generate some snapshot groups that will not match
+
+ get_call = self.mock_object(
+ self.library, '_get_snapshot_groups_for_volume', mock.Mock(
+ return_value=[snapshot_group]))
+
+ group = self.library._get_available_snapshot_group(vol)
+
+ get_call.assert_called_once_with(vol)
+ self.assertIsNone(group)
+
+ def test_create_snapshot_available_snap_group(self):
+ expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ expected = {'provider_id': expected_snap['id']}
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ fake_label = 'fakeName'
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ create_call = self.mock_object(
+ self.library._client, 'create_snapshot_image', mock.Mock(
+ return_value=expected_snap))
+ self.mock_object(self.library, '_get_available_snapshot_group',
+ mock.Mock(return_value=snapshot_group))
+ self.mock_object(utils, 'convert_uuid_to_es_fmt',
+ mock.Mock(return_value=fake_label))
+ fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+
+ model_update = self.library.create_snapshot(fake_snapshot)
+
+ self.assertEqual(expected, model_update)
+ create_call.assert_called_once_with(snapshot_group['id'])
+
+ @ddt.data(False, True)
+ def test_create_snapshot_failure(self, cleanup_failure):
+ """Validate the behavior for a failure during snapshot creation"""
+
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
+ fake_label = 'fakeName'
+ create_fail_exc = exception.NetAppDriverException('fail_create')
+ cleanup_fail_exc = exception.NetAppDriverException('volume_deletion')
+ if cleanup_failure:
+ exc_msg = cleanup_fail_exc.msg
+ delete_snap_grp = self.mock_object(
+ self.library, '_delete_snapshot_group',
+ mock.Mock(side_effect=cleanup_fail_exc))
+ else:
+ exc_msg = create_fail_exc.msg
+ delete_snap_grp = self.mock_object(
+ self.library, '_delete_snapshot_group')
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ self.mock_object(self.library._client, 'create_snapshot_image',
+ mock.Mock(
+ side_effect=create_fail_exc))
+ self.mock_object(self.library._client, 'create_snapshot_volume',
+ mock.Mock(return_value=snap_vol))
+ self.mock_object(self.library, '_get_available_snapshot_group',
+ mock.Mock(return_value=snapshot_group))
+ self.mock_object(utils, 'convert_uuid_to_es_fmt',
+ mock.Mock(return_value=fake_label))
+ fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+
+ self.assertRaisesRegexp(exception.NetAppDriverException,
+ exc_msg,
+ self.library.create_snapshot,
+ fake_snapshot)
+ self.assertTrue(delete_snap_grp.called)
+
+ def test_create_snapshot_no_snap_group(self):
+ self.library._client.features = mock.Mock()
+ expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ fake_label = 'fakeName'
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ create_call = self.mock_object(
+ self.library._client, 'create_snapshot_image', mock.Mock(
+ return_value=expected_snap))
+ self.mock_object(self.library, '_get_snapshot_groups_for_volume',
+ mock.Mock(return_value=[snapshot_group]))
+ self.mock_object(self.library, '_get_available_snapshot_group',
+ mock.Mock(return_value=None))
+ self.mock_object(utils, 'convert_uuid_to_es_fmt',
+ mock.Mock(return_value=fake_label))
+ fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+
+ snapshot = self.library.create_snapshot(fake_snapshot)
+
+ expected = {'provider_id': expected_snap['id']}
+ self.assertEqual(expected, snapshot)
+ create_call.assert_called_once_with(snapshot_group['id'])
+
+ def test_create_snapshot_no_snapshot_groups_remaining(self):
+ """Test the failure condition where all snap groups are allocated"""
+
+ self.library._client.features = mock.Mock()
+ expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
+ grp_count = (self.library.MAX_SNAPSHOT_GROUP_COUNT -
+ self.library.RESERVED_SNAPSHOT_GROUP_COUNT)
+ fake_label = 'fakeName'
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ self.mock_object(self.library._client, 'create_snapshot_image',
+ mock.Mock(return_value=expected_snap))
+ self.mock_object(self.library._client, 'create_snapshot_volume',
+ mock.Mock(return_value=snap_vol))
+ self.mock_object(self.library, '_get_available_snapshot_group',
+ mock.Mock(return_value=None))
+ self.mock_object(self.library, '_get_snapshot_groups_for_volume',
+ mock.Mock(return_value=[snapshot_group] * grp_count))
+ self.mock_object(utils, 'convert_uuid_to_es_fmt',
+ mock.Mock(return_value=fake_label))
+ fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+
+ # Error message should contain the maximum number of supported
+ # snapshots
+ self.assertRaisesRegexp(exception.SnapshotLimitExceeded,
+ str(self.library.MAX_SNAPSHOT_COUNT *
+ grp_count),
+ self.library.create_snapshot, fake_snapshot)
+
+ def test_delete_snapshot(self):
+ fake_vol = cinder_utils.create_volume(self.ctxt)
+ fake_snap = cinder_utils.create_snapshot(self.ctxt, fake_vol['id'])
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ self.mock_object(self.library, '_get_snapshot', mock.Mock(
+ return_value=snap))
+
+ del_snap = self.mock_object(self.library, '_delete_es_snapshot',
+ mock.Mock())
+
+ self.library.delete_snapshot(fake_snap)
+
+ del_snap.assert_called_once_with(snap)
+
+ def test_delete_es_snapshot(self):
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snap_count = 30
+ # Ensure that it's the oldest PIT
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ fake_volume_refs = ['1', '2', snap['baseVol']]
+ fake_snapshot_group_refs = ['3', '4', snapshot_group['id']]
+ snapshots = [copy.deepcopy(snap) for i in range(snap_count)]
+ bitset = na_utils.BitSet(0)
+ for i, snapshot in enumerate(snapshots):
+ volume_ref = fake_volume_refs[i % len(fake_volume_refs)]
+ group_ref = fake_snapshot_group_refs[i %
+ len(fake_snapshot_group_refs)]
+ snapshot['pitGroupRef'] = group_ref
+ snapshot['baseVol'] = volume_ref
+ snapshot['pitSequenceNumber'] = str(i)
+ snapshot['id'] = i
+ bitset.set(i)
+ snapshots.append(snap)
+
+ filtered_snaps = list(filter(lambda x: x['pitGroupRef'] == snap[
+ 'pitGroupRef'], snapshots))
+
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ self.mock_object(self.library, '_get_snapshot', mock.Mock(
+ return_value=snap))
+ self.mock_object(self.library, '_get_soft_delete_map', mock.Mock(
+ return_value={snap['pitGroupRef']: repr(bitset)}))
+ self.mock_object(self.library._client, 'list_snapshot_images',
+ mock.Mock(return_value=snapshots))
+ delete_image = self.mock_object(
+ self.library, '_cleanup_snapshot_images',
+ mock.Mock(return_value=({snap['pitGroupRef']: repr(bitset)},
+ None)))
+
+ self.library._delete_es_snapshot(snap)
+
+ delete_image.assert_called_once_with(filtered_snaps, bitset)
+
+ def test_delete_snapshot_oldest(self):
+ vol = copy.deepcopy(eseries_fake.VOLUME)
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ snapshots = [snap]
+ self.mock_object(self.library, '_get_volume', mock.Mock(
+ return_value=vol))
+ self.mock_object(self.library, '_get_snapshot', mock.Mock(
+ return_value=snap))
+ self.mock_object(self.library, '_get_soft_delete_map', mock.Mock(
+ return_value={}))
+ self.mock_object(self.library._client, 'list_snapshot_images',
+ mock.Mock(return_value=snapshots))
+ delete_image = self.mock_object(
+ self.library, '_cleanup_snapshot_images',
+ mock.Mock(return_value=(None, [snap['pitGroupRef']])))
+
+ self.library._delete_es_snapshot(snap)
+
+ delete_image.assert_called_once_with(snapshots,
+ na_utils.BitSet(1))
+
+ def test_get_soft_delete_map(self):
+ fake_val = 'fake'
+ self.mock_object(self.library._client, 'list_backend_store', mock.Mock(
+ return_value=fake_val))
+
+ actual = self.library._get_soft_delete_map()
+
+ self.assertEqual(fake_val, actual)
+
+ def test_cleanup_snapshot_images_delete_all(self):
+ image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ images = [image] * 32
+ bitset = na_utils.BitSet()
+ for i, image in enumerate(images):
+ image['pitSequenceNumber'] = i
+ bitset.set(i)
+ delete_grp = self.mock_object(self.library._client,
+ 'delete_snapshot_group')
+
+ updt, keys = self.library._cleanup_snapshot_images(
+ images, bitset)
+
+ delete_grp.assert_called_once_with(image['pitGroupRef'])
+ self.assertIsNone(updt)
+ self.assertEqual([image['pitGroupRef']], keys)
+
+ def test_cleanup_snapshot_images_delete_all_fail(self):
+ image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ bitset = na_utils.BitSet(2 ** 32 - 1)
+ delete_grp = self.mock_object(
+ self.library._client, 'delete_snapshot_group',
+ mock.Mock(side_effect=exception.NetAppDriverException))
+
+ updt, keys = self.library._cleanup_snapshot_images(
+ [image], bitset)
+
+ delete_grp.assert_called_once_with(image['pitGroupRef'])
+ self.assertIsNone(updt)
+ self.assertEqual([image['pitGroupRef']], keys)
+
+ def test_cleanup_snapshot_images(self):
+ image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+ images = [image] * 32
+ del_count = 16
+ bitset = na_utils.BitSet()
+ for i, image in enumerate(images):
+ image['pitSequenceNumber'] = i
+ if i < del_count:
+ bitset.set(i)
+ exp_bitset = copy.deepcopy(bitset)
+ exp_bitset >>= 16
+ delete_img = self.mock_object(
+ self.library, '_delete_snapshot_image')
+
+ updt, keys = self.library._cleanup_snapshot_images(
+ images, bitset)
+
+ self.assertEqual(del_count, delete_img.call_count)
+ self.assertIsNone(keys)
+ self.assertEqual({image['pitGroupRef']: exp_bitset}, updt)
+
+ def test_delete_snapshot_image(self):
+ snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+
+ self.mock_object(self.library._client, 'list_snapshot_group',
+ mock.Mock(return_value=snap_group))
+
+ self.library._delete_snapshot_image(snap)
+
+ def test_delete_snapshot_image_fail_cleanup(self):
+ snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
+ snap_group['snapshotCount'] = 0
+ snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
+
+ self.mock_object(self.library._client, 'list_snapshot_group',
+ mock.Mock(return_value=snap_group))
+
+ self.library._delete_snapshot_image(snap)
+
+ def test_delete_snapshot_not_found(self):
+ fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)
+ get_snap = self.mock_object(self.library, '_get_snapshot',
+ mock.Mock(side_effect=exception.NotFound))
+
+ with mock.patch.object(library, 'LOG', mock.Mock()):
+ self.library.delete_snapshot(fake_snapshot)
+ get_snap.assert_called_once_with(fake_snapshot)
+ self.assertTrue(library.LOG.warning.called)
+
+ @ddt.data(['key1', 'key2'], [], None)
+ def test_merge_soft_delete_changes_keys(self, keys_to_del):
+ count = len(keys_to_del) if keys_to_del is not None else 0
+ save_store = self.mock_object(
+ self.library._client, 'save_backend_store')
+ index = {'key1': 'val'}
+ get_store = self.mock_object(self.library, '_get_soft_delete_map',
+ mock.Mock(return_value=index))
+
+ self.library._merge_soft_delete_changes(None, keys_to_del)
+
+ if count:
+ expected = copy.deepcopy(index)
+ for key in keys_to_del:
+ expected.pop(key, None)
+ get_store.assert_called_once_with()
+ save_store.assert_called_once_with(
+ self.library.SNAPSHOT_PERSISTENT_STORE_KEY, expected)
+ else:
+ get_store.assert_not_called()
+ save_store.assert_not_called()
+
@ddt.data(False, True)
def test_get_pool_operation_progress(self, expect_complete):
"""Validate the operation progress is interpreted correctly"""
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
+# Copyright (c) 2016 Michael Price. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
def test_get_attr_missing(self):
self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4')
+
+
+@ddt.ddt
+class BitSetTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BitSetTestCase, self).setUp()
+
+ def test_default(self):
+ self.assertEqual(na_utils.BitSet(0), na_utils.BitSet())
+
+ def test_set(self):
+ bitset = na_utils.BitSet(0)
+ bitset.set(16)
+
+ self.assertEqual(na_utils.BitSet(1 << 16), bitset)
+
+ def test_unset(self):
+ bitset = na_utils.BitSet(1 << 16)
+ bitset.unset(16)
+
+ self.assertEqual(na_utils.BitSet(0), bitset)
+
+ def test_is_set(self):
+ bitset = na_utils.BitSet(1 << 16)
+
+ self.assertTrue(bitset.is_set(16))
+
+ def test_not_equal(self):
+ set1 = na_utils.BitSet(1 << 15)
+ set2 = na_utils.BitSet(1 << 16)
+
+ self.assertNotEqual(set1, set2)
+
+ def test_repr(self):
+ raw_val = 1 << 16
+ actual = repr(na_utils.BitSet(raw_val))
+ expected = str(raw_val)
+
+ self.assertEqual(actual, expected)
+
+ def test_str(self):
+ raw_val = 1 << 16
+ actual = str(na_utils.BitSet(raw_val))
+ expected = bin(raw_val)
+
+ self.assertEqual(actual, expected)
+
+ def test_int(self):
+ val = 1 << 16
+ actual = int(int(na_utils.BitSet(val)))
+
+ self.assertEqual(val, actual)
+
+ def test_and(self):
+ actual = na_utils.BitSet(1 << 16 | 1 << 15)
+ actual &= 1 << 16
+
+ self.assertEqual(na_utils.BitSet(1 << 16), actual)
+
+ def test_or(self):
+ actual = na_utils.BitSet()
+ actual |= 1 << 16
+
+ self.assertEqual(na_utils.BitSet(1 << 16), actual)
+
+ def test_invert(self):
+ actual = na_utils.BitSet(1 << 16)
+ actual = ~actual
+
+ self.assertEqual(~(1 << 16), actual)
+
+ def test_xor(self):
+ actual = na_utils.BitSet(1 << 16)
+ actual ^= 1 << 16
+
+ self.assertEqual(na_utils.BitSet(), actual)
+
+ def test_lshift(self):
+ actual = na_utils.BitSet(1)
+ actual <<= 16
+
+ self.assertEqual(na_utils.BitSet(1 << 16), actual)
+
+ def test_rshift(self):
+ actual = na_utils.BitSet(1 << 16)
+ actual >>= 16
+
+ self.assertEqual(na_utils.BitSet(1), actual)
ASUP_VALID_VERSION = (1, 52, 9000, 3)
# We need to check for both the release and the pre-release versions
SSC_VALID_VERSIONS = ((1, 53, 9000, 1), (1, 53, 9010, 17))
+ REST_1_3_VERSION = (1, 53, 9000, 1)
+ REST_1_4_VERSIONS = ((1, 54, 9000, 1), (1, 54, 9090, 0))
RESOURCE_PATHS = {
'volumes': '/storage-systems/{system-id}/volumes',
'thin_volume_expand':
'/storage-systems/{system-id}/thin-volumes/{object-id}/expand',
'ssc_volumes': '/storage-systems/{system-id}/ssc/volumes',
- 'ssc_volume': '/storage-systems/{system-id}/ssc/volumes/{object-id}'
+ 'ssc_volume': '/storage-systems/{system-id}/ssc/volumes/{object-id}',
+ 'snapshot_groups': '/storage-systems/{system-id}/snapshot-groups',
+ 'snapshot_group':
+ '/storage-systems/{system-id}/snapshot-groups/{object-id}',
+ 'snapshot_volumes': '/storage-systems/{system-id}/snapshot-volumes',
+ 'snapshot_volume':
+ '/storage-systems/{system-id}/snapshot-volumes/{object-id}',
+ 'snapshot_images': '/storage-systems/{system-id}/snapshot-images',
+ 'snapshot_image':
+ '/storage-systems/{system-id}/snapshot-images/{object-id}',
+ 'persistent-stores': '/storage-systems/{'
+ 'system-id}/persistent-records/',
+ 'persistent-store': '/storage-systems/{'
+ 'system-id}/persistent-records/{key}'
}
def __init__(self, scheme, host, port, service_path, username,
asup_api_valid_version = self._validate_version(
self.ASUP_VALID_VERSION, api_version_tuple)
+ rest_1_3_api_valid_version = self._validate_version(
+ self.REST_1_3_VERSION, api_version_tuple)
+
+ rest_1_4_api_valid_version = any(
+ self._validate_version(valid_version, api_version_tuple)
+ for valid_version in self.REST_1_4_VERSIONS)
+
ssc_api_valid_version = any(self._validate_version(valid_version,
api_version_tuple)
for valid_version
supported=ssc_api_valid_version,
min_version=self._version_tuple_to_str(
self.SSC_VALID_VERSIONS[0]))
+ self.features.add_feature(
+ 'REST_1_3_RELEASE', supported=rest_1_3_api_valid_version,
+ min_version=self._version_tuple_to_str(self.REST_1_3_VERSION))
+ self.features.add_feature(
+ 'REST_1_4_RELEASE', supported=rest_1_4_api_valid_version,
+ min_version=self._version_tuple_to_str(self.REST_1_4_VERSIONS[0]))
def _version_tuple_to_str(self, version):
return ".".join([str(part) for part in version])
try:
return self._invoke('GET', path, **{'object-id': object_id})
except es_exception.WebServiceException as e:
- if(404 == e.status_code):
+ if 404 == e.status_code:
raise exception.VolumeNotFound(volume_id=object_id)
else:
raise
def list_snapshot_groups(self):
"""Lists snapshot groups."""
- path = "/storage-systems/{system-id}/snapshot-groups"
+ path = self.RESOURCE_PATHS['snapshot_groups']
return self._invoke('GET', path)
- def create_snapshot_group(self, label, object_id, storage_pool_id,
+ def list_snapshot_group(self, object_id):
+ """Retrieve given snapshot group from the array."""
+ path = self.RESOURCE_PATHS['snapshot_group']
+ return self._invoke('GET', path, **{'object-id': object_id})
+
+ def create_snapshot_group(self, label, object_id, storage_pool_id=None,
repo_percent=99, warn_thres=99, auto_del_limit=0,
full_policy='failbasewrites'):
"""Creates snapshot group on array."""
- path = "/storage-systems/{system-id}/snapshot-groups"
+ path = self.RESOURCE_PATHS['snapshot_groups']
data = {'baseMappableObjectId': object_id, 'name': label,
'storagePoolId': storage_pool_id,
'repositoryPercentage': repo_percent,
'autoDeleteLimit': auto_del_limit, 'fullPolicy': full_policy}
return self._invoke('POST', path, data)
+ def update_snapshot_group(self, group_id, label):
+ """Modify a snapshot group on the array."""
+ path = self.RESOURCE_PATHS['snapshot_group']
+ data = {'name': label}
+ return self._invoke('POST', path, data, **{'object-id': group_id})
+
def delete_snapshot_group(self, object_id):
"""Deletes given snapshot group from array."""
- path = "/storage-systems/{system-id}/snapshot-groups/{object-id}"
+ path = self.RESOURCE_PATHS['snapshot_group']
return self._invoke('DELETE', path, **{'object-id': object_id})
def create_snapshot_image(self, group_id):
"""Creates snapshot image in snapshot group."""
- path = "/storage-systems/{system-id}/snapshot-images"
+ path = self.RESOURCE_PATHS['snapshot_images']
data = {'groupId': group_id}
return self._invoke('POST', path, data)
def delete_snapshot_image(self, object_id):
"""Deletes given snapshot image in snapshot group."""
- path = "/storage-systems/{system-id}/snapshot-images/{object-id}"
+ path = self.RESOURCE_PATHS['snapshot_image']
return self._invoke('DELETE', path, **{'object-id': object_id})
+ def list_snapshot_image(self, object_id):
+ """Retrieve given snapshot image from the array."""
+ path = self.RESOURCE_PATHS['snapshot_image']
+ return self._invoke('GET', path, **{'object-id': object_id})
+
def list_snapshot_images(self):
"""Lists snapshot images."""
- path = "/storage-systems/{system-id}/snapshot-images"
+ path = self.RESOURCE_PATHS['snapshot_images']
return self._invoke('GET', path)
def create_snapshot_volume(self, image_id, label, base_object_id,
- storage_pool_id,
+ storage_pool_id=None,
repo_percent=99, full_thres=99,
view_mode='readOnly'):
"""Creates snapshot volume."""
- path = "/storage-systems/{system-id}/snapshot-volumes"
+ path = self.RESOURCE_PATHS['snapshot_volumes']
data = {'snapshotImageId': image_id, 'fullThreshold': full_thres,
'storagePoolId': storage_pool_id,
'name': label, 'viewMode': view_mode,
'repositoryPoolId': storage_pool_id}
return self._invoke('POST', path, data)
+ def update_snapshot_volume(self, snap_vol_id, label=None, full_thres=None):
+ """Modify an existing snapshot volume."""
+ path = self.RESOURCE_PATHS['snapshot_volume']
+ data = {'name': label, 'fullThreshold': full_thres}
+ return self._invoke('POST', path, data, **{'object-id': snap_vol_id})
+
def delete_snapshot_volume(self, object_id):
"""Deletes given snapshot volume."""
- path = "/storage-systems/{system-id}/snapshot-volumes/{object-id}"
+ path = self.RESOURCE_PATHS['snapshot_volume']
return self._invoke('DELETE', path, **{'object-id': object_id})
+ def list_snapshot_volumes(self):
+ """Lists snapshot volumes/views defined on the array."""
+ path = self.RESOURCE_PATHS['snapshot_volumes']
+ return self._invoke('GET', path)
+
def list_ssc_storage_pools(self):
"""Lists pools and their service quality defined on the array."""
path = "/storage-systems/{system-id}/ssc/pools"
if mode_is_proxy:
api_operating_mode = 'proxy'
return api_operating_mode, about_response_dict['version']
+
+ def list_backend_store(self, key):
+ """Retrieve data by key from the the persistent store on the backend.
+
+ Example response: {"key": "cinder-snapshots", "value": "[]"}
+
+ :param key: the persistent store to retrieve
+ :return a json body representing the value of the store,
+ or an empty json object
+ """
+ path = self.RESOURCE_PATHS.get('persistent-store')
+ try:
+ resp = self._invoke('GET', path, **{'key': key})
+ except exception.NetAppDriverException:
+ return dict()
+ else:
+ data = resp['value']
+ if data:
+ return json.loads(data)
+ return dict()
+
+ def save_backend_store(self, key, store_data):
+ """Save a json value to the persistent storage on the backend.
+
+ The storage backend provides a small amount of persistent storage
+ that we can utilize for storing driver information.
+
+ :param key: The key utilized for storing/retrieving the data
+ :param store_data: a python data structure that will be stored as a
+ json value
+ """
+ path = self.RESOURCE_PATHS.get('persistent-stores')
+ store_data = json.dumps(store_data, separators=(',', ':'))
+
+ data = {
+ 'key': key,
+ 'value': store_data
+ }
+
+ self._invoke('POST', path, data)
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
- self.library.create_snapshot(snapshot)
+ return self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
- self.library.create_snapshot(snapshot)
+ return self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
DEFAULT_HOST_TYPE = 'linux_dm_mp'
+ # Define name marker string to use in snapshot groups that are for copying
+ # volumes. This is to differentiate them from ordinary snapshot groups.
+ SNAPSHOT_VOL_COPY_SUFFIX = 'SGCV'
+ # Define a name marker string used to identify snapshot volumes that have
+ # an underlying snapshot that is awaiting deletion.
+ SNAPSHOT_VOL_DEL_SUFFIX = '_DEL'
+ # Maximum number of snapshots per snapshot group
+ MAX_SNAPSHOT_COUNT = 32
+ # Maximum number of snapshot groups
+ MAX_SNAPSHOT_GROUP_COUNT = 4
+ RESERVED_SNAPSHOT_GROUP_COUNT = 1
+ SNAPSHOT_PERSISTENT_STORE_KEY = 'cinder-snapshots'
+ SNAPSHOT_PERSISTENT_STORE_LOCK = str(uuid.uuid4())
+
def __init__(self, driver_name, driver_protocol="iSCSI",
configuration=None, **kwargs):
self.configuration = configuration
self._client = self._create_rest_client(self.configuration)
self._check_mode_get_or_register_storage_system()
+ self._version_check()
if self.configuration.netapp_enable_multiattach:
self._ensure_multi_attach_host_group_exists()
username=configuration.netapp_login,
password=configuration.netapp_password)
+ def _version_check(self):
+ """Ensure that the minimum version of the REST API is available"""
+ if not self._client.features.REST_1_4_RELEASE:
+ min_version = (
+ self._client.features.REST_1_4_RELEASE.minimum_version)
+ raise exception.NetAppDriverException(
+ 'This version (%(cur)s of the NetApp SANtricity Webservices '
+ 'Proxy is not supported. Install version %(supp)s or '
+ 'later.' % {'cur': self._client.api_version,
+ 'supp': min_version})
+
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
return self._client.list_volume(uid)
- def _get_snapshot_group_for_snapshot(self, snapshot_id):
- label = utils.convert_uuid_to_es_fmt(snapshot_id)
+ def _get_snapshot_group_for_snapshot(self, snapshot):
+ snapshot = self._get_snapshot(snapshot)
+ try:
+ return self._client.list_snapshot_group(snapshot['pitGroupRef'])
+ except (exception.NetAppDriverException,
+ eseries_exc.WebServiceException):
+ msg = _("Specified snapshot group with id %s could not be found.")
+ raise exception.NotFound(msg % snapshot['pitGroupRef'])
+
+ def _get_snapshot_legacy(self, snapshot):
+ """Find a E-Series snapshot by the name of the snapshot group.
+
+ Snapshots were previously identified by the unique name of the
+ snapshot group. A snapshot volume is now utilized to uniquely
+ identify the snapshot, so any snapshots previously defined in this
+ way must be updated.
+
+ :param snapshot_id: Cinder snapshot identifer
+ :return: An E-Series snapshot image
+ """
+ label = utils.convert_uuid_to_es_fmt(snapshot['id'])
for group in self._client.list_snapshot_groups():
if group['label'] == label:
- return group
- msg = _("Specified snapshot group with label %s could not be found.")
- raise exception.NotFound(msg % label)
+ image = self._get_oldest_image_in_snapshot_group(group['id'])
+ group_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ # Modify the group label so we don't have a name collision
+ self._client.update_snapshot_group(group['id'],
+ group_label)
+
+ snapshot.update({'provider_id': image['id']})
+ snapshot.save()
+
+ return image
+
+ raise exception.NotFound(_('Snapshot with id of %s could not be '
+ 'found.') % snapshot['id'])
+
+ def _get_snapshot(self, snapshot):
+ """Find a E-Series snapshot by its Cinder identifier
+
+ An E-Series snapshot image does not have a configuration name/label,
+ so we define a snapshot volume underneath of it that will help us to
+ identify it. We retrieve the snapshot volume with the matching name,
+ and then we find its underlying snapshot.
+
+ :param snapshot_id: Cinder snapshot identifer
+ :return: An E-Series snapshot image
+ """
+ try:
+ return self._client.list_snapshot_image(
+ snapshot.get('provider_id'))
+ except (eseries_exc.WebServiceException or
+ exception.NetAppDriverException):
+ try:
+ LOG.debug('Unable to locate snapshot by its id, falling '
+ 'back to legacy behavior.')
+ return self._get_snapshot_legacy(snapshot)
+ except exception.NetAppDriverException:
+ raise exception.NotFound(_('Snapshot with id of %s could not'
+ ' be found.') % snapshot['id'])
- def _get_latest_image_in_snapshot_group(self, snapshot_id):
- group = self._get_snapshot_group_for_snapshot(snapshot_id)
+ def _get_snapshot_group(self, snapshot_group_id):
+ try:
+ return self._client.list_snapshot_group(snapshot_group_id)
+ except exception.NetAppDriverException:
+ raise exception.NotFound(_('Unable to retrieve snapshot group '
+ 'with id of %s.') % snapshot_group_id)
+
+ def _get_ordered_images_in_snapshot_group(self, snapshot_group_id):
images = self._client.list_snapshot_images()
if images:
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
- group['pitGroupRef']),
+ snapshot_group_id),
images)
sorted_imgs = sorted(filtered_images, key=lambda x: x[
'pitTimestamp'])
- return sorted_imgs[0]
+ return sorted_imgs
+ return list()
+
+ def _get_oldest_image_in_snapshot_group(self, snapshot_group_id):
+ group = self._get_snapshot_group(snapshot_group_id)
+ images = self._get_ordered_images_in_snapshot_group(snapshot_group_id)
+ if images:
+ return images[0]
+
+ msg = _("No snapshot image found in snapshot group %s.")
+ raise exception.NotFound(msg % group['label'])
+
+ def _get_latest_image_in_snapshot_group(self, snapshot_group_id):
+ group = self._get_snapshot_group(snapshot_group_id)
+ images = self._get_ordered_images_in_snapshot_group(snapshot_group_id)
+ if images:
+ return images[-1]
msg = _("No snapshot image found in snapshot group %s.")
raise exception.NotFound(msg % group['label'])
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
+ def _create_volume_from_snapshot(self, volume, image):
+ """Define a new volume based on an E-Series snapshot image.
+
+ This method should be synchronized on the snapshot id.
+
+ :param volume: a Cinder volume
+ :param image: an E-Series snapshot image
+ :return: the clone volume
+ """
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
+
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
- src_vol = self._create_snapshot_volume(snapshot['id'])
+ src_vol = self._client.create_snapshot_volume(
+ image['id'], utils.convert_uuid_to_es_fmt(
+ uuid.uuid4()), image['baseVol'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
- LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
+ LOG.error(_LE("Failure restarting snap vol. Error: %s."),
+ e)
else:
LOG.warning(_LW("Snapshot volume not found."))
- def _create_snapshot_volume(self, snapshot_id):
- """Creates snapshot volume for given group with snapshot_id."""
- group = self._get_snapshot_group_for_snapshot(snapshot_id)
- LOG.debug("Creating snap vol for group %s", group['label'])
- image = self._get_latest_image_in_snapshot_group(snapshot_id)
- label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
- capacity = int(image['pitCapacity']) / units.Gi
- storage_pools = self._get_sorted_available_storage_pools(capacity)
- s_id = storage_pools[0]['volumeGroupRef']
- return self._client.create_snapshot_volume(image['pitRef'], label,
- group['baseVolume'], s_id)
+ return dst_vol
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+ es_snapshot = self._get_snapshot(snapshot)
+ cinder_utils.synchronized(snapshot['id'])(
+ self._create_volume_from_snapshot)(volume, es_snapshot)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id'],
'volume': src_vref}
- self.create_snapshot(snapshot)
+ group_name = (utils.convert_uuid_to_es_fmt(snapshot['id']) +
+ self.SNAPSHOT_VOL_COPY_SUFFIX)
+ es_vol = self._get_volume(src_vref['id'])
+
+ es_snapshot = self._create_es_snapshot(es_vol, group_name)
+
try:
- self.create_volume_from_snapshot(volume, snapshot)
+ self._create_volume_from_snapshot(volume, es_snapshot)
finally:
try:
- self.delete_snapshot(snapshot)
+ self._client.delete_snapshot_group(es_snapshot['pitGroupRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
LOG.warning(_LW("Volume %s already deleted."), volume['id'])
return
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
+ def _create_snapshot_volume(self, snapshot_id, label=None):
+ """Creates snapshot volume for given group with snapshot_id."""
+ image = self._get_snapshot(snapshot_id)
+ group = self._get_snapshot_group(image['pitGroupRef'])
+ LOG.debug("Creating snap vol for group %s", group['label'])
+ if label is None:
+ label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
+ return self._client.create_snapshot_volume(image['pitRef'], label,
+ image['baseVol'])
+
+ def _create_snapshot_group(self, label, volume, percentage_capacity=20.0):
+ """Define a new snapshot group for a volume
+
+ :param label: the label for the snapshot group
+ :param volume: an E-Series volume
+ :param percentage_capacity: an optional repository percentage
+ :return a new snapshot group
+ """
+
+ # Newer versions of the REST API are capable of automatically finding
+ # the best pool candidate
+ if not self._client.features.REST_1_3_RELEASE:
+ vol_size_gb = int(volume['totalSizeInBytes']) / units.Gi
+ pools = self._get_sorted_available_storage_pools(vol_size_gb)
+ volume_pool = next(pool for pool in pools if volume[
+ 'volumeGroupRef'] == pool['id'])
+
+ # A disk pool can only utilize a candidate from its own pool
+ if volume_pool.get('raidLevel') == 'raidDiskPool':
+ pool_id_to_use = volume_pool['volumeGroupRef']
+
+ # Otherwise, choose the best available pool
+ else:
+ pool_id_to_use = pools[0]['volumeGroupRef']
+ group = self._client.create_snapshot_group(
+ label, volume['volumeRef'], pool_id_to_use,
+ repo_percent=percentage_capacity)
+
+ else:
+ group = self._client.create_snapshot_group(
+ label, volume['volumeRef'], repo_percent=percentage_capacity)
+
+ return group
+
+ def _get_snapshot_groups_for_volume(self, vol):
+ """Find all snapshot groups associated with an E-Series volume
+
+ :param vol: An E-Series volume object
+ :return A list of snapshot groups
+ :raise NetAppDriverException: if the list of snapshot groups cannot be
+ retrieved
+ """
+ return [grp for grp in self._client.list_snapshot_groups()
+ if grp['baseVolume'] == vol['id']]
+
+ def _get_available_snapshot_group(self, vol):
+ """Find a snapshot group that has remaining capacity for snapshots.
+
+ In order to minimize repository usage, we prioritize the snapshot
+ group with remaining snapshot capacity that has most recently had a
+ snapshot defined on it.
+
+ :param vol: An E-Series volume object
+ :return A valid snapshot group that has available snapshot capacity,
+ or None
+ :raise NetAppDriverException: if the list of snapshot groups cannot be
+ retrieved
+ """
+ groups_for_v = self._get_snapshot_groups_for_volume(vol)
+
+ # Filter out reserved snapshot groups
+ groups = filter(lambda g: self.SNAPSHOT_VOL_COPY_SUFFIX not in g[
+ 'label'], groups_for_v)
+
+ # Find all groups with free snapshot capacity
+ groups = [group for group in groups if group.get('snapshotCount') <
+ self.MAX_SNAPSHOT_COUNT]
+
+ # Order by the last defined snapshot on the group
+ if len(groups) > 1:
+ group_by_id = {g['id']: g for g in groups}
+
+ snap_imgs = list()
+ for group in groups:
+ try:
+ snap_imgs.append(
+ self._get_latest_image_in_snapshot_group(group['id']))
+ except exception.NotFound:
+ pass
+
+ snap_imgs = sorted(snap_imgs, key=lambda x: x['pitSequenceNumber'])
+
+ if snap_imgs:
+ # The newest image
+ img = snap_imgs[-1]
+ return group_by_id[img['pitGroupRef']]
+ else:
+ return groups[0] if groups else None
+
+ # Skip the snapshot image checks if there is only one snapshot group
+ elif groups:
+ return groups[0]
+ else:
+ return None
+
+ def _create_es_snapshot(self, vol, group_name=None):
snap_grp, snap_image = None, None
- snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
- os_vol = snapshot['volume']
- vol = self._get_volume(os_vol['name_id'])
- vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
- pools = self._get_sorted_available_storage_pools(vol_size_gb)
try:
- snap_grp = self._client.create_snapshot_group(
- snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
- snap_image = self._client.create_snapshot_image(
- snap_grp['pitGroupRef'])
- LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
+ snap_grp = self._get_available_snapshot_group(vol)
+ # If a snapshot group is not available, create one if possible
+ if snap_grp is None:
+ snap_groups_for_vol = self._get_snapshot_groups_for_volume(
+ vol)
+
+ # We need a reserved snapshot group
+ if (group_name is not None and
+ (self.SNAPSHOT_VOL_COPY_SUFFIX in group_name)):
+
+ # First we search for an existing reserved group
+ for grp in snap_groups_for_vol:
+ if grp['label'].endswith(
+ self.SNAPSHOT_VOL_COPY_SUFFIX):
+ snap_grp = grp
+ break
+
+ # No reserved group exists, so we create it
+ if (snap_grp is None and
+ (len(snap_groups_for_vol) <
+ self.MAX_SNAPSHOT_GROUP_COUNT)):
+ snap_grp = self._create_snapshot_group(group_name,
+ vol)
+
+ # Ensure we don't exceed the snapshot group limit
+ elif (len(snap_groups_for_vol) <
+ (self.MAX_SNAPSHOT_GROUP_COUNT -
+ self.RESERVED_SNAPSHOT_GROUP_COUNT)):
+
+ label = group_name if group_name is not None else (
+ utils.convert_uuid_to_es_fmt(uuid.uuid4()))
+
+ snap_grp = self._create_snapshot_group(label, vol)
+ LOG.info(_LI("Created snap grp with label %s."), label)
+
+ # We couldn't retrieve or create a snapshot group
+ if snap_grp is None:
+ raise exception.SnapshotLimitExceeded(
+ allowed=(self.MAX_SNAPSHOT_COUNT *
+ (self.MAX_SNAPSHOT_GROUP_COUNT -
+ self.RESERVED_SNAPSHOT_GROUP_COUNT)))
+
+ return self._client.create_snapshot_image(
+ snap_grp['id'])
+
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
- self.delete_snapshot(snapshot)
+ self._delete_snapshot_group(snap_grp['id'])
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot.
+
+ :param snapshot: The Cinder snapshot
+ :param group_name: An optional label for the snapshot group
+ :return An E-Series snapshot image
+ """
+
+ os_vol = snapshot['volume']
+ vol = self._get_volume(os_vol['name_id'])
+
+ snap_image = cinder_utils.synchronized(vol['id'])(
+ self._create_es_snapshot)(vol)
+ model_update = {
+ 'provider_id': snap_image['id']
+ }
+
+ return model_update
+
+ def _delete_es_snapshot(self, es_snapshot):
+ """Perform a soft-delete on an E-Series snapshot.
+
+ Mark the snapshot image as no longer needed, so that it can be
+ purged from the backend when no other snapshots are dependent upon it.
+
+ :param es_snapshot: an E-Series snapshot image
+ :return None
+ """
+ index = self._get_soft_delete_map()
+ snapgroup_ref = es_snapshot['pitGroupRef']
+ if snapgroup_ref in index:
+ bitset = na_utils.BitSet(int((index[snapgroup_ref])))
+ else:
+ bitset = na_utils.BitSet(0)
+
+ images = [img for img in self._client.list_snapshot_images() if
+ img['pitGroupRef'] == snapgroup_ref]
+ for i, image in enumerate(sorted(images, key=lambda x: x[
+ 'pitSequenceNumber'])):
+ if(image['pitSequenceNumber'] == es_snapshot[
+ 'pitSequenceNumber']):
+ bitset.set(i)
+ break
+
+ index_update, keys_to_del = (
+ self._cleanup_snapshot_images(images, bitset))
+
+ self._merge_soft_delete_changes(index_update, keys_to_del)
def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
+ """Delete a snapshot."""
try:
- snap_grp = self._get_snapshot_group_for_snapshot(snapshot['id'])
+ es_snapshot = self._get_snapshot(snapshot)
except exception.NotFound:
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
- return
- self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
+ else:
+ os_vol = snapshot['volume']
+ vol = self._get_volume(os_vol['name_id'])
+
+ cinder_utils.synchronized(vol['id'])(self._delete_es_snapshot)(
+ es_snapshot)
+
+ def _get_soft_delete_map(self):
+ """Retrieve the snapshot index from the storage backend"""
+ return self._client.list_backend_store(
+ self.SNAPSHOT_PERSISTENT_STORE_KEY)
+
+ @cinder_utils.synchronized(SNAPSHOT_PERSISTENT_STORE_LOCK)
+ def _merge_soft_delete_changes(self, index_update, keys_to_del):
+ """Merge changes to the snapshot index and save it on the backend
+
+ This method merges provided changes into the index, locking, to ensure
+ that concurrent changes that don't overlap are not overwritten. No
+ update will occur if neither an update or keys to delete are provided.
+
+ :param index_update: a dict of keys/value pairs to update in the index
+ :param keys_to_del: a list of keys to purge from the index
+ """
+ if index_update or keys_to_del:
+ index = self._get_soft_delete_map()
+ if index_update:
+ index.update(index_update)
+ if keys_to_del:
+ for key in keys_to_del:
+ if key in index:
+ del index[key]
+
+ self._client.save_backend_store(
+ self.SNAPSHOT_PERSISTENT_STORE_KEY, index)
+
+ def _cleanup_snapshot_images(self, images, bitset):
+ """Delete snapshot images that are marked for removal from the backend.
+
+ This method will iterate over all snapshots (beginning with the
+ oldest), that are defined on the same snapshot group as the provided
+ snapshot image. If the snapshot is marked for deletion, it will be
+ purged from the backend. Otherwise, the method will return because
+ no further snapshots can be purged.
+
+ The bitset will be updated based on the return from this method.
+ Any updates to the index will be provided as a dict, and any keys
+ to be removed from the index should be returned as (dict, list).
+
+ :param images: a list of E-Series snapshot images
+ :param bitset: a bitset representing the snapshot images that are
+ no longer needed on the backend (and may be deleted when possible)
+ :return (dict, list) a tuple containing a dict of updates for the
+ index and a list of keys to remove from the index
+ """
+ snap_grp_ref = images[0]['pitGroupRef']
+ # All images are marked as deleted, we can delete the snapshot group
+ if bitset == 2 ** len(images) - 1:
+ try:
+ self._delete_snapshot_group(snap_grp_ref)
+ except exception.NetAppDriverException as e:
+ LOG.warning(_LW("Unable to remove snapshot group - "
+ "%s."), e.msg)
+ return None, [snap_grp_ref]
+ else:
+ # Order by their sequence number, from oldest to newest
+ snapshots = sorted(images,
+ key=lambda x: x['pitSequenceNumber'])
+ deleted = 0
+
+ for i, snapshot in enumerate(snapshots):
+ if bitset.is_set(i):
+ self._delete_snapshot_image(snapshot)
+ deleted += 1
+ else:
+ # Snapshots must be deleted in order, so if the current
+ # snapshot is not pending deletion, we don't want to
+ # process any more
+ break
+
+ if deleted:
+ # Update the bitset based on the deleted snapshots
+ bitset >>= deleted
+ LOG.debug('Deleted %(count)s snapshot images from snapshot '
+ 'group: %(grp)s.', {'count': deleted,
+ 'grp': snap_grp_ref})
+ if deleted >= len(images):
+ try:
+ self._delete_snapshot_group(snap_grp_ref)
+ except exception.NetAppDriverException as e:
+ LOG.warning(_LW("Unable to remove snapshot group - "
+ "%s."), e.msg)
+ return None, [snap_grp_ref]
+
+ return {snap_grp_ref: repr(bitset)}, None
+
+ def _delete_snapshot_group(self, group_id):
+ try:
+ self._client.delete_snapshot_group(group_id)
+ except eseries_exc.WebServiceException as e:
+ raise exception.NetAppDriverException(e.msg)
+
+ def _delete_snapshot_image(self, es_snapshot):
+ """Remove a snapshot image from the storage backend
+
+ If a snapshot group has no remaining snapshot images associated with
+ it, it will be deleted as well. When the snapshot is deleted,
+ any snapshot volumes that are associated with it will be orphaned,
+ so they are also deleted.
+
+ :param es_snapshot: An E-Series snapshot image
+ :param snapshot_volumes: Snapshot volumes associated with the snapshot
+ """
+ self._client.delete_snapshot_image(es_snapshot['id'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
def convert_uuid_to_es_fmt(uuid_str):
"""Converts uuid to e-series compatible name format."""
uuid_base32 = encode_hex_to_base32(uuid.UUID(six.text_type(uuid_str)).hex)
- return uuid_base32.strip(b'=')
+ return str(uuid_base32.strip(b'='))
def convert_es_fmt_to_uuid(es_label):
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
+# Copyright (c) 2016 Michael Price. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
:returns: True if the feature is supported, otherwise False
"""
return self.supported
+
+
+class BitSet(object):
+ def __init__(self, value=0):
+ self._value = value
+
+ def set(self, bit):
+ self._value |= 1 << bit
+ return self
+
+ def unset(self, bit):
+ self._value &= ~(1 << bit)
+ return self
+
+ def is_set(self, bit):
+ return self._value & 1 << bit
+
+ def __and__(self, other):
+ self._value &= other
+ return self
+
+ def __or__(self, other):
+ self._value |= other
+ return self
+
+ def __invert__(self):
+ self._value = ~self._value
+ return self
+
+ def __xor__(self, other):
+ self._value ^= other
+ return self
+
+ def __lshift__(self, other):
+ self._value <<= other
+ return self
+
+ def __rshift__(self, other):
+ self._value >>= other
+ return self
+
+ def __int__(self):
+ return self._value
+
+ def __str__(self):
+ return bin(self._value)
+
+ def __repr__(self):
+ return str(self._value)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and self._value ==
+ other._value) or self._value == int(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)