kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.zapi_client = mock.Mock()
-
config = self._driver.configuration
config.netapp_vserver = FAKE_VSERVER
mox = self.mox
drv = self._driver
- mox.StubOutWithMock(drv, '_clone_volume')
- drv._clone_volume(mox_lib.IgnoreArg(),
- mox_lib.IgnoreArg(),
- mox_lib.IgnoreArg())
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
+ drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
snapshot = FakeSnapshot(1)
expected_result = {'provider_location': location}
- mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
- drv._clone_volume(mox_lib.IgnoreArg(),
- mox_lib.IgnoreArg(),
- mox_lib.IgnoreArg())
+ drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
mox.ReplayAll()
+ self.mock_object(drv, '_do_qos_for_volume')
+ self.mock_object(utils, 'get_volume_extra_specs')
+
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
- def test_clone_volume(self):
+ def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
- drv._clone_volume(volume_name, clone_name, volume_id, share)
+ drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
+ share)
mox.VerifyAll()
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
- mox.StubOutWithMock(drv, '_delete_file')
+ mox.StubOutWithMock(drv, '_delete_file_at_path')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
- drv._delete_file('/mnt/img-cache-2').AndReturn(True)
- drv._delete_file('/mnt/img-cache-1').AndReturn(True)
+ drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True)
+ drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
- mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
- drv._clone_volume(
+ drv._clone_backing_file_for_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
- mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
- mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
+ mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, 'local_path')
- mox.StubOutWithMock(os.path, 'exists')
- mox.StubOutWithMock(drv, '_delete_file')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
+ drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
- drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
- os.path.exists('/mnt/vol').AndReturn(True)
- drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv.clone_image(
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
+ mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
- mox.StubOutWithMock(drv, '_clone_volume')
+ mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
+ mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
- mox.StubOutWithMock(os.path, 'exists')
- mox.StubOutWithMock(drv, '_delete_file')
+ utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
+ drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
- drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
- os.path.exists('/mnt/vol').AndReturn(True)
- drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv.clone_image(
self.assertEqual('446', na_server.get_port())
self.assertEqual('https', na_server.get_transport_type())
- @mock.patch.object(utils, 'get_volume_extra_specs')
- def test_check_volume_type_qos(self, get_specs):
- get_specs.return_value = {'netapp:qos_policy_group': 'qos'}
- self._driver._get_vserver_and_exp_vol = mock.Mock(
- return_value=('vs', 'vol'))
- self._driver.zapi_client.file_assign_qos = mock.Mock(
- side_effect=api.NaApiError)
- self._driver._is_share_vol_type_match = mock.Mock(return_value=True)
- self.assertRaises(exception.NetAppDriverException,
- self._driver._check_volume_type, 'vol',
- 'share', 'file')
- get_specs.assert_called_once_with('vol')
- self.assertEqual(1,
- self._driver.zapi_client.file_assign_qos.call_count)
- self.assertEqual(1, self._driver._get_vserver_and_exp_vol.call_count)
- self._driver._is_share_vol_type_match.assert_called_once_with(
- 'vol', 'share')
-
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname):
drv = self._driver
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
shutil.move = mock.Mock()
+ mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
+ mock_get_specs.return_value = {}
+ self.mock_object(drv, '_do_qos_for_volume')
location = drv.manage_existing(volume, vol_ref)
+
self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location'])
drv._check_volume_type.assert_called_once_with(
- volume, self.TEST_NFS_EXPORT1, test_file)
+ volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_move_fails(self, get_file_size):
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
- drv._check_volume_type = mock.Mock()
+ mock_check_volume_type = drv._check_volume_type = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
drv._execute = mock.Mock(side_effect=OSError)
+ mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
+ mock_get_specs.return_value = {}
+ self.mock_object(drv, '_do_qos_for_volume')
+
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing, volume, vol_ref)
- drv._check_volume_type.assert_called_once_with(
- volume, self.TEST_NFS_EXPORT1, test_file)
+
+ mock_check_volume_type.assert_called_once_with(
+ volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(nfs_base, 'LOG')
def test_unmanage(self, mock_log):
drv = self._driver
+ self.mock_object(utils, 'get_valid_qos_policy_group_info')
volume = FakeVolume()
volume['id'] = '123'
volume['provider_location'] = '/share'
+
drv.unmanage(volume)
+
self.assertEqual(1, mock_log.info.call_count)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
+ self._fake_empty_qos_policy_group_info = {
+ 'legacy': None,
+ 'spec': None,
+ }
+ self._fake_legacy_qos_policy_group_info = {
+ 'legacy': {
+ 'policy_name': 'qos_policy_1'
+ },
+ 'spec': None,
+ }
- @mock.patch.object(utils, 'get_volume_extra_specs')
@mock.patch.object(utils, 'LOG', mock.Mock())
- def test_create_volume(self, mock_volume_extra_specs):
+ def test_create_volume(self):
drv = self._driver
drv.ssc_enabled = False
- extra_specs = {}
- mock_volume_extra_specs.return_value = extra_specs
+ fake_extra_specs = {}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
- with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_do_create_volume'):
- volume_info = self._driver.create_volume(FakeVolume(host, 1))
- self.assertEqual(volume_info.get('provider_location'),
- fake_share)
- self.assertEqual(0, utils.LOG.warning.call_count)
+ mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
+ mock_get_specs.return_value = fake_extra_specs
+ self.mock_object(drv, '_ensure_shares_mounted')
+ self.mock_object(drv, '_do_create_volume')
+ mock_get_qos_info =\
+ self.mock_object(utils, 'get_valid_qos_policy_group_info')
+ mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info
- @mock.patch.object(utils, 'LOG', mock.Mock())
- def test_create_volume_obsolete_extra_spec(self):
- drv = self._driver
- drv.ssc_enabled = False
- extra_specs = {'netapp:raid_type': 'raid4'}
- mock_volume_extra_specs = mock.Mock()
- self.mock_object(utils,
- 'get_volume_extra_specs',
- mock_volume_extra_specs)
- mock_volume_extra_specs.return_value = extra_specs
- fake_share = 'localhost:myshare'
- host = 'hostname@backend#' + fake_share
- with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_do_create_volume'):
- self._driver.create_volume(FakeVolume(host, 1))
- warn_msg = ('Extra spec %(old)s is obsolete. Use %(new)s '
- 'instead.')
- utils.LOG.warning.assert_called_once_with(
- warn_msg, {'new': 'netapp_raid_type',
- 'old': 'netapp:raid_type'})
+ volume_info = self._driver.create_volume(FakeVolume(host, 1))
- @mock.patch.object(utils, 'LOG', mock.Mock())
- def test_create_volume_deprecated_extra_spec(self):
- drv = self._driver
- drv.ssc_enabled = False
- extra_specs = {'netapp_thick_provisioned': 'true'}
- fake_share = 'localhost:myshare'
- host = 'hostname@backend#' + fake_share
- mock_volume_extra_specs = mock.Mock()
- self.mock_object(utils,
- 'get_volume_extra_specs',
- mock_volume_extra_specs)
- mock_volume_extra_specs.return_value = extra_specs
- with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_do_create_volume'):
- self._driver.create_volume(FakeVolume(host, 1))
- warn_msg = ('Extra spec %(old)s is deprecated. Use %(new)s '
- 'instead.')
- utils.LOG.warning.assert_called_once_with(
- warn_msg, {'new': 'netapp_thin_provisioned',
- 'old': 'netapp_thick_provisioned'})
+ self.assertEqual(fake_share, volume_info.get('provider_location'))
+ self.assertEqual(0, utils.LOG.warning.call_count)
def test_create_volume_no_pool_specified(self):
drv = self._driver
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
- @mock.patch.object(utils, 'get_volume_extra_specs')
- def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
+ def test_create_volume_with_legacy_qos_policy(self):
drv = self._driver
drv.ssc_enabled = False
- extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
+ fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
fake_volume = FakeVolume(host, 1)
- fake_qos_policy = 'qos_policy_1'
- mock_volume_extra_specs.return_value = extra_specs
-
- with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_do_create_volume'):
- with mock.patch.object(drv,
- '_set_qos_policy_group_on_volume'
- ) as mock_set_qos:
- volume_info = self._driver.create_volume(fake_volume)
- self.assertEqual(volume_info.get('provider_location'),
- 'localhost:myshare')
- mock_set_qos.assert_called_once_with(fake_volume,
- fake_share,
- fake_qos_policy)
+ mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
+ mock_get_specs.return_value = fake_extra_specs
+ mock_get_qos_info =\
+ self.mock_object(utils, 'get_valid_qos_policy_group_info')
+ mock_get_qos_info.return_value =\
+ self._fake_legacy_qos_policy_group_info
+ self.mock_object(drv, '_ensure_shares_mounted')
+ self.mock_object(drv, '_do_create_volume')
+ mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume')
+
+ volume_info = self._driver.create_volume(fake_volume)
+
+ self.assertEqual('localhost:myshare',
+ volume_info.get('provider_location'))
+ mock_set_qos.assert_called_once_with(
+ fake_volume, self._fake_legacy_qos_policy_group_info)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
- drv._delete_file = mock.Mock()
+ drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
- drv._delete_file = mock.Mock()
+ drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
- assert drv._delete_file.call_count == 2
+ assert drv._delete_file_at_path.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
- def test_clone_volume_clear(self):
+ def test_clone_backing_file_for_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
drv.zapi_client = mox.CreateMockAnything()
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
try:
- drv._clone_volume(volume_name, clone_name, volume_id)
+ drv._clone_backing_file_for_volume(volume_name, clone_name,
+ volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual(pool, 'fake-share')
- @mock.patch.object(utils, 'get_volume_extra_specs')
- def test_check_volume_type_qos(self, get_specs):
- get_specs.return_value = {'netapp:qos_policy_group': 'qos'}
- self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
- self._driver._check_volume_type,
- 'vol', 'share', 'file')
- get_specs.assert_called_once_with('vol')
-
def _set_config(self, configuration):
super(NetApp7modeNfsDriverTestCase, self)._set_config(
configuration)
configuration.netapp_storage_family = 'ontap_7mode'
return configuration
- def test_clone_volume(self):
+ def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
drv.zapi_client = mox.CreateMockAnything()
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
- drv._clone_volume(volume_name, clone_name, volume_id, share)
+ drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
+ share)
mox.VerifyAll()
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
dedup=True, compression=False,
raid='raid4', ha='cfo', disk='SAS')
+ test_vols = {vol1, vol2, vol3, vol4, vol5}
+
+ ssc_map = {
+ 'mirrored': {vol1},
+ 'dedup': {vol1, vol2, vol3},
+ 'compression': {vol3, vol4},
+ 'thin': {vol5, vol2},
+ 'all': test_vols
+ }
+
def setUp(self):
super(SscUtilsTestCase, self).setUp()
self.stubs.Set(httplib, 'HTTPConnection',
def test_vols_for_optional_specs(self):
"""Test ssc for optional specs."""
- test_vols =\
- set([self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
- ssc_map = {'mirrored': set([self.vol1]),
- 'dedup': set([self.vol1, self.vol2, self.vol3]),
- 'compression': set([self.vol3, self.vol4]),
- 'thin': set([self.vol5, self.vol2]), 'all': test_vols}
extra_specs =\
{'netapp_dedup': 'true',
'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'}
- res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
+ res = ssc_cmode.get_volumes_for_specs(self.ssc_map, extra_specs)
self.assertEqual(len(res), 1)
+ def test_get_volumes_for_specs_none_specs(self):
+ none_specs = None
+ expected = self.ssc_map['all']
+
+ result = ssc_cmode.get_volumes_for_specs(self.ssc_map, none_specs)
+
+ self.assertEqual(expected, result)
+
+ def test_get_volumes_for_specs_empty_dict(self):
+ empty_dict = {}
+ expected = self.ssc_map['all']
+
+ result = ssc_cmode.get_volumes_for_specs(
+ self.ssc_map, empty_dict)
+
+ self.assertEqual(expected, result)
+
+ def test_get_volumes_for_specs_not_a_dict(self):
+ not_a_dict = False
+ expected = self.ssc_map['all']
+
+ result = ssc_cmode.get_volumes_for_specs(
+ self.ssc_map, not_a_dict)
+
+ self.assertEqual(expected, result)
+
def test_query_cl_vols_for_ssc(self):
na_server = api.NaServer('127.0.0.1')
na_server.set_api_version(1, 15)
# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
- def test_create_lun_with_qos_policy_group(self):
+ def test_create_lun_with_qos_policy_group_name(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
- expected_qos_group = 'qos_1'
+ expected_qos_group_name = 'qos_1'
mock_request = mock.Mock()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
return_value=mock_request
) as mock_create_node:
- self.client.create_lun(self.fake_volume,
- self.fake_lun,
- self.fake_size,
- self.fake_metadata,
- qos_policy_group=expected_qos_group)
+ self.client.create_lun(
+ self.fake_volume,
+ self.fake_lun,
+ self.fake_size,
+ self.fake_metadata,
+ qos_policy_group_name=expected_qos_group_name)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
mock_request.add_new_child.assert_called_once_with(
- 'qos-policy-group', expected_qos_group)
+ 'qos-policy-group', expected_qos_group_name)
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
-# Copyright (c) 2014 Alex Meade.
-# Copyright (c) 2015 Dustin Schoenbrun.
-# All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from cinder import exception
from cinder import test
+
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
- fakes as fake)
+ fakes as fake_client)
+from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import (
api as netapp_api)
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
self.vserver = CONNECTION_INFO['vserver']
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
+ self.mock_send_request = self.mock_object(self.client, 'send_request')
def tearDown(self):
super(NetAppCmodeClientTestCase, self).tearDown()
self.assertSetEqual(igroups, expected)
def test_clone_lun(self):
- self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN')
+ self.client.clone_lun(
+ 'volume', 'fakeLUN', 'newFakeLUN',
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+
self.assertEqual(1, self.connection.invoke_successfully.call_count)
def test_clone_lun_multiple_zapi_calls(self):
self.assertEqual(1, len(lun))
def test_file_assign_qos(self):
- expected_flex_vol = "fake_flex_vol"
- expected_policy_group = "fake_policy_group"
- expected_file_path = "fake_file_path"
- self.client.file_assign_qos(expected_flex_vol, expected_policy_group,
- expected_file_path)
+ api_args = {
+ 'volume': fake.FLEXVOL,
+ 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME,
+ 'file': fake.NFS_FILE_PATH,
+ 'vserver': self.vserver
+ }
- __, _args, __ = self.connection.invoke_successfully.mock_calls[0]
- actual_request = _args[0]
- actual_flex_vol = actual_request.get_child_by_name('volume') \
- .get_content()
- actual_policy_group = actual_request \
- .get_child_by_name('qos-policy-group-name').get_content()
- actual_file_path = actual_request.get_child_by_name('file') \
- .get_content()
- actual_vserver = actual_request.get_child_by_name('vserver') \
- .get_content()
+ self.client.file_assign_qos(
+ fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_FILE_PATH)
- self.assertEqual(expected_flex_vol, actual_flex_vol)
- self.assertEqual(expected_policy_group, actual_policy_group)
- self.assertEqual(expected_file_path, actual_file_path)
- self.assertEqual(self.vserver, actual_vserver)
+ self.mock_send_request.assert_has_calls([
+ mock.call('file-assign-qos', api_args, False)])
+
+ def test_set_lun_qos_policy_group(self):
+
+ api_args = {
+ 'path': fake.LUN_PATH,
+ 'qos-policy-group': fake.QOS_POLICY_GROUP_NAME,
+ }
+
+ self.client.set_lun_qos_policy_group(
+ fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME)
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('lun-set-qos-policy-group', api_args)])
+
+ def test_provision_qos_policy_group_no_qos_policy_group_info(self):
+
+ self.client.provision_qos_policy_group(qos_policy_group_info=None)
+
+ self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
+
+ def test_provision_qos_policy_group_legacy_qos_policy_group_info(self):
+
+ self.client.provision_qos_policy_group(
+ qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
+
+ self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
+
+ def test_provision_qos_policy_group_with_qos_spec(self):
+
+ self.mock_object(self.client, 'qos_policy_group_create')
+
+ self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
+
+ self.client.qos_policy_group_create.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
+
+ def test_qos_policy_group_create(self):
+
+ api_args = {
+ 'policy-group': fake.QOS_POLICY_GROUP_NAME,
+ 'max-throughput': fake.MAX_THROUGHPUT,
+ 'vserver': self.vserver,
+ }
+
+ self.client.qos_policy_group_create(
+ fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('qos-policy-group-create', api_args, False)])
+
+ def test_qos_policy_group_delete(self):
+
+ api_args = {
+ 'policy-group': fake.QOS_POLICY_GROUP_NAME
+ }
+
+ self.client.qos_policy_group_delete(
+ fake.QOS_POLICY_GROUP_NAME)
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('qos-policy-group-delete', api_args, False)])
+
+ def test_qos_policy_group_rename(self):
+
+ new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME
+ api_args = {
+ 'policy-group-name': fake.QOS_POLICY_GROUP_NAME,
+ 'new-name': new_name,
+ }
+
+ self.client.qos_policy_group_rename(
+ fake.QOS_POLICY_GROUP_NAME, new_name)
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('qos-policy-group-rename', api_args, False)])
+
+ def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self):
+
+ mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
+ mock_remove = self.mock_object(self.client,
+ 'remove_unused_qos_policy_groups')
+
+ self.client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info=None)
+
+ self.assertEqual(0, mock_rename.call_count)
+ self.assertEqual(0, mock_remove.call_count)
+
+ def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self):
+
+ mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
+ mock_remove = self.mock_object(self.client,
+ 'remove_unused_qos_policy_groups')
+
+ self.client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
+
+ self.assertEqual(0, mock_rename.call_count)
+ self.assertEqual(1, mock_remove.call_count)
+
+ def test_mark_qos_policy_group_for_deletion_w_qos_spec(self):
+
+ mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
+ mock_remove = self.mock_object(self.client,
+ 'remove_unused_qos_policy_groups')
+ mock_log = self.mock_object(client_cmode.LOG, 'warning')
+ new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
+
+ self.client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
+
+ mock_rename.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
+ self.assertEqual(0, mock_log.call_count)
+ self.assertEqual(1, mock_remove.call_count)
+
+ def test_mark_qos_policy_group_for_deletion_exception_path(self):
+
+ mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
+ mock_rename.side_effect = netapp_api.NaApiError
+ mock_remove = self.mock_object(self.client,
+ 'remove_unused_qos_policy_groups')
+ mock_log = self.mock_object(client_cmode.LOG, 'warning')
+ new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
+
+ self.client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
+
+ mock_rename.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
+ self.assertEqual(1, mock_log.call_count)
+ self.assertEqual(1, mock_remove.call_count)
+
+ def test_remove_unused_qos_policy_groups(self):
+
+ mock_log = self.mock_object(client_cmode.LOG, 'debug')
+ api_args = {
+ 'query': {
+ 'qos-policy-group-info': {
+ 'policy-group': 'deleted_cinder_*',
+ 'vserver': self.vserver,
+ }
+ },
+ 'max-records': 3500,
+ 'continue-on-failure': 'true',
+ 'return-success-list': 'false',
+ 'return-failure-list': 'false',
+ }
+
+ self.client.remove_unused_qos_policy_groups()
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('qos-policy-group-delete-iter', api_args, False)])
+ self.assertEqual(0, mock_log.call_count)
+
+ def test_remove_unused_qos_policy_groups_api_error(self):
+
+ mock_log = self.mock_object(client_cmode.LOG, 'debug')
+ api_args = {
+ 'query': {
+ 'qos-policy-group-info': {
+ 'policy-group': 'deleted_cinder_*',
+ 'vserver': self.vserver,
+ }
+ },
+ 'max-records': 3500,
+ 'continue-on-failure': 'true',
+ 'return-success-list': 'false',
+ 'return-failure-list': 'false',
+ }
+ self.mock_send_request.side_effect = netapp_api.NaApiError
+
+ self.client.remove_unused_qos_policy_groups()
+
+ self.mock_send_request.assert_has_calls([
+ mock.call('qos-policy-group-delete-iter', api_args, False)])
+ self.assertEqual(1, mock_log.call_count)
@mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_operational_network_interface_addresses(self):
expected_result = ['1.2.3.4', '99.98.97.96']
api_response = netapp_api.NaElement(
- fake.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE)
- self.connection.invoke_successfully.return_value = api_response
+ fake_client.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE)
+ self.mock_send_request.return_value = api_response
address_list = (
self.client.get_operational_network_interface_addresses())
expected_total_size = 1000
expected_available_size = 750
fake_flexvol_path = '/fake/vol'
- response = netapp_api.NaElement(
+ api_response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<attributes-list>
</attributes-list>
</results>""" % {'available_size': expected_available_size,
'total_size': expected_total_size}))
- self.connection.invoke_successfully.return_value = response
+
+ self.mock_send_request.return_value = api_response
total_size, available_size = (
self.client.get_flexvol_capacity(fake_flexvol_path))
# Copyright (c) - 2014, Clinton Knight. All rights reserved.
+# Copyright (c) - 2015, Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# License for the specific language governing permissions and limitations
# under the License.
+VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56'
+LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86'
+LUN_HANDLE = 'fake_lun_handle'
+LUN_NAME = 'lun1'
+LUN_SIZE = 3
+LUN_TABLE = {LUN_NAME: None}
+SIZE = 1024
+HOST_NAME = 'fake.host.name'
+BACKEND_NAME = 'fake_backend_name'
+POOL_NAME = 'aggr1'
+EXPORT_PATH = '/fake/export/path'
+NFS_SHARE = '192.168.99.24:%s' % EXPORT_PATH
+HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME)
+NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE)
+FLEXVOL = 'openstack-flexvol'
+NFS_FILE_PATH = 'nfsvol'
+PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME)
+LUN_METADATA = {
+ 'OsType': None,
+ 'SpaceReserved': 'true',
+ 'Path': PATH,
+ 'Qtree': None,
+ 'Volume': POOL_NAME,
+}
+VOLUME = {
+ 'name': LUN_NAME,
+ 'size': SIZE,
+ 'id': VOLUME_ID,
+ 'host': HOST_STRING,
+}
+NFS_VOLUME = {
+ 'name': NFS_FILE_PATH,
+ 'size': SIZE,
+ 'id': VOLUME_ID,
+ 'host': NFS_HOST_STRING,
+}
-VOLUME = 'f10d1a84-9b7b-427e-8fec-63c48b509a56'
-LUN = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86'
-SIZE = '1024'
-METADATA = {'OsType': 'linux', 'SpaceReserved': 'true'}
+NETAPP_VOLUME = 'fake_netapp_volume'
UUID1 = '12345678-1234-5678-1234-567812345678'
-LUN1 = '/vol/vol0/lun1'
-VSERVER1_NAME = 'openstack-vserver'
+LUN_PATH = '/vol/vol0/%s' % LUN_NAME
+
+VSERVER_NAME = 'openstack-vserver'
FC_VOLUME = {'name': 'fake_volume'}
}
ISCSI_VOLUME = {
- 'name': 'fake_volume', 'id': 'fake_id',
+ 'name': 'fake_volume',
+ 'id': 'fake_id',
'provider_auth': 'fake provider auth',
}
{'address': '99.98.97.96', 'port': '3260'},
]
-HOSTNAME = 'fake.host.com'
IPV4_ADDRESS = '192.168.14.2'
IPV6_ADDRESS = 'fe80::6e40:8ff:fe8a:130'
-EXPORT_PATH = '/fake/export/path'
-NFS_SHARE = HOSTNAME + ':' + EXPORT_PATH
NFS_SHARE_IPV4 = IPV4_ADDRESS + ':' + EXPORT_PATH
NFS_SHARE_IPV6 = IPV6_ADDRESS + ':' + EXPORT_PATH
TOTAL_BYTES = 4797892092432
AVAILABLE_BYTES = 13479932478
CAPACITY_VALUES = (TOTAL_BYTES, AVAILABLE_BYTES)
+
+IGROUP1 = {'initiator-group-os-type': 'linux',
+ 'initiator-group-type': 'fcp',
+ 'initiator-group-name': IGROUP1_NAME}
+
+QOS_SPECS = {}
+EXTRA_SPECS = {}
+MAX_THROUGHPUT = '21734278B/s'
+QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
+
+QOS_POLICY_GROUP_INFO_LEGACY = {
+ 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME,
+ 'spec': None,
+}
+
+QOS_POLICY_GROUP_SPEC = {
+ 'max_throughput': MAX_THROUGHPUT,
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+}
+
+QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC}
+
+CLONE_SOURCE_NAME = 'fake_clone_source_name'
+CLONE_SOURCE_ID = 'fake_clone_source_id'
+CLONE_SOURCE_SIZE = 1024
+
+CLONE_SOURCE = {
+ 'size': CLONE_SOURCE_SIZE,
+ 'name': CLONE_SOURCE_NAME,
+ 'id': CLONE_SOURCE_ID,
+}
+
+CLONE_DESTINATION_NAME = 'fake_clone_destination_name'
+CLONE_DESTINATION_SIZE = 1041
+CLONE_DESTINATION_ID = 'fake_clone_destination_id'
+
+CLONE_DESTINATION = {
+ 'size': CLONE_DESTINATION_SIZE,
+ 'name': CLONE_DESTINATION_NAME,
+ 'id': CLONE_DESTINATION_ID,
+}
+
+SNAPSHOT = {
+ 'name': 'fake_snapshot_name',
+ 'volume_size': SIZE,
+ 'volume_id': 'fake_volume_id',
+}
+
+VOLUME_REF = {'name': 'fake_vref_name', 'size': 42}
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0)
+ def test_clone_lun_qos_supplied(self):
+ """Test for qos supplied in clone lun invocation."""
+ self.assertRaises(exception.VolumeDriverException,
+ self.library._clone_lun,
+ 'fakeLUN',
+ 'newFakeLUN',
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+
def test_get_fc_target_wwpns(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
def test_create_lun(self):
self.library.vol_refresh_voluntary = False
- self.library._create_lun(fake.VOLUME, fake.LUN,
- fake.SIZE, fake.METADATA)
+ self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
+ fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
- fake.VOLUME, fake.LUN, fake.SIZE, fake.METADATA, None)
+ fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
+ None)
self.assertTrue(self.library.vol_refresh_voluntary)
- @mock.patch.object(na_utils, 'get_volume_extra_specs')
- def test_check_volume_type_for_lun_qos_not_supported(self, get_specs):
- get_specs.return_value = {'specs': 's',
- 'netapp:qos_policy_group': 'qos'}
- mock_lun = block_base.NetAppLun('handle', 'name', '1',
- {'Volume': 'name', 'Path': '/vol/lun'})
+ def test_create_lun_with_qos_policy_group(self):
+ self.assertRaises(exception.VolumeDriverException,
+ self.library._create_lun, fake.VOLUME_ID,
+ fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+
+ def test_check_volume_type_for_lun_legacy_qos_not_supported(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+
+ self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
+ self.library._check_volume_type_for_lun,
+ na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS)
+
+ self.assertEqual(0, mock_get_volume_type.call_count)
+
+ def test_check_volume_type_for_lun_no_volume_type(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = None
+ mock_get_backend_spec = self.mock_object(
+ na_utils, 'get_backend_qos_spec_from_volume_type')
+
+ self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None)
+
+ self.assertEqual(0, mock_get_backend_spec.call_count)
+
+ def test_check_volume_type_for_lun_qos_spec_not_supported(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE
+ mock_get_backend_spec = self.mock_object(
+ na_utils, 'get_backend_qos_spec_from_volume_type')
+ mock_get_backend_spec.return_value = na_fakes.QOS_SPEC
+
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
- {'vol': 'vol'}, mock_lun, {'ref': 'ref'})
- get_specs.assert_called_once_with({'vol': 'vol'})
+ na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS)
def test_get_preferred_target_from_list(self):
fake.ISCSI_TARGET_DETAILS_LIST)
self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result)
+
+ def test_mark_qos_policy_group_for_deletion(self):
+ result = self.library._mark_qos_policy_group_for_deletion(
+ fake.QOS_POLICY_GROUP_INFO)
+
+ self.assertEqual(None, result)
+
+ def test_setup_qos_for_volume(self):
+ result = self.library._setup_qos_for_volume(fake.VOLUME,
+ fake.EXTRA_SPECS)
+
+ self.assertEqual(None, result)
+
+ def test_manage_existing_lun_same_name(self):
+ mock_lun = block_base.NetAppLun('handle', 'name', '1',
+ {'Path': '/vol/vol1/name'})
+ self.library._get_existing_vol_with_manage_ref = mock.Mock(
+ return_value=mock_lun)
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+ self.library._check_volume_type_for_lun = mock.Mock()
+ self.library._add_lun_to_table = mock.Mock()
+ self.zapi_client.move_lun = mock.Mock()
+
+ self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
+
+ self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
+ {'ref': 'ref'})
+ self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
+ self.assertEqual(1, self.library._add_lun_to_table.call_count)
+ self.assertEqual(0, self.zapi_client.move_lun.call_count)
+
+ def test_manage_existing_lun_new_path(self):
+ mock_lun = block_base.NetAppLun(
+ 'handle', 'name', '1', {'Path': '/vol/vol1/name'})
+ self.library._get_existing_vol_with_manage_ref = mock.Mock(
+ return_value=mock_lun)
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+ self.library._check_volume_type_for_lun = mock.Mock()
+ self.library._add_lun_to_table = mock.Mock()
+ self.zapi_client.move_lun = mock.Mock()
+
+ self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
+
+ self.assertEqual(
+ 2, self.library._get_existing_vol_with_manage_ref.call_count)
+ self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
+ self.assertEqual(1, self.library._add_lun_to_table.call_count)
+ self.zapi_client.move_lun.assert_called_once_with(
+ '/vol/vol1/name', '/vol/vol1/volume')
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Mock unit tests for the NetApp block storage library
"""
-
import copy
import uuid
import mock
+from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume import utils as volume_utils
class NetAppBlockStorageLibraryTestCase(test.TestCase):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, None)
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_add_lun_to_table', mock.Mock())
- @mock.patch.object(na_utils, 'get_volume_extra_specs',
- mock.Mock(return_value=None))
- @mock.patch.object(block_base, 'LOG', mock.Mock())
def test_create_volume(self):
- self.library.zapi_client.get_lun_by_args.return_value = ['lun']
- self.library.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
+ volume_size_in_bytes = int(fake.SIZE) * units.Gi
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+ self.mock_object(block_base, 'LOG')
+ self.mock_object(volume_utils, 'extract_host', mock.Mock(
+ return_value=fake.POOL_NAME))
+ self.mock_object(self.library, '_setup_qos_for_volume',
+ mock.Mock(return_value=None))
+ self.mock_object(self.library, '_create_lun')
+ self.mock_object(self.library, '_create_lun_handle')
+ self.mock_object(self.library, '_add_lun_to_table')
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.library.create_volume(fake.VOLUME)
+
self.library._create_lun.assert_called_once_with(
- 'vol1', 'lun1', 107374182400, mock.ANY, None)
- self.assertEqual(0, block_base.LOG.warning.call_count)
+ fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes,
+ fake.LUN_METADATA, None)
+ self.assertEqual(0, self.library.
+ _mark_qos_policy_group_for_deletion.call_count)
+ self.assertEqual(0, block_base.LOG.error.call_count)
+
+ def test_create_volume_no_pool(self):
+ self.mock_object(volume_utils, 'extract_host', mock.Mock(
+ return_value=None))
+
+ self.assertRaises(exception.InvalidHost, self.library.create_volume,
+ fake.VOLUME)
+
+ def test_create_volume_exception_path(self):
+ self.mock_object(block_base, 'LOG')
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(self.library, '_setup_qos_for_volume',
+ mock.Mock(return_value=None))
+ self.mock_object(self.library, '_create_lun', mock.Mock(
+ side_effect=Exception))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.library.create_volume, fake.VOLUME)
+
+ self.assertEqual(1, self.library.
+ _mark_qos_policy_group_for_deletion.call_count)
+ self.assertEqual(1, block_base.LOG.exception.call_count)
def test_create_volume_no_pool_provided_by_scheduler(self):
+ fake_volume = copy.deepcopy(fake.VOLUME)
+ # Set up fake volume whose 'host' field is missing pool information.
+ fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
+
self.assertRaises(exception.InvalidHost, self.library.create_volume,
- {'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend'}) # missing pool
+ fake_volume)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
os = 'linux'
protocol = 'fcp'
self.library.host_type = 'linux'
- mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.zapi_client.map_lun.return_value = '1'
mock_get_or_create_igroup.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS, protocol, os)
self.zapi_client.map_lun.assert_called_once_with(
- fake.LUN1, fake.IGROUP1_NAME, lun_id=None)
+ fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
os = 'windows'
protocol = 'fcp'
self.library.host_type = 'linux'
- mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.library._map_lun('fake_volume',
fake.FC_FORMATTED_INITIATORS, protocol,
self.library.host_type)
self.zapi_client.map_lun.assert_called_once_with(
- fake.LUN1, fake.IGROUP1_NAME, lun_id=None)
+ fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
self.assertEqual(1, block_base.LOG.warning.call_count)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
- mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
self.assertEqual(lun_id, '2')
mock_find_mapped_lun_igroup.assert_called_once_with(
- fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
- mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (None, None)
def test_unmap_lun(self, mock_find_mapped_lun_igroup):
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1)
- self.library._unmap_lun(fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
- self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN1,
+ self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.IGROUP1_NAME)
def test_find_mapped_lun_igroup(self):
self.assertRaises(NotImplementedError,
self.library._find_mapped_lun_igroup,
- fake.LUN1,
+ fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
def test_has_luns_mapped_to_initiators(self):
def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
- mock_get_lun_attr.return_value = {'Path': fake.LUN1}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY)
- mock_unmap_lun.assert_called_once_with(fake.LUN1,
+ mock_unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
mock_has_luns_mapped_to_initiators,
mock_build_initiator_target_map):
- mock_get_lun_attr.return_value = {'Path': fake.LUN1}
+ mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = False
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map)
self.assertEqual(4, num_paths)
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_add_lun_to_table', mock.Mock())
- @mock.patch.object(na_utils, 'LOG', mock.Mock())
- @mock.patch.object(na_utils, 'get_volume_extra_specs',
- mock.Mock(return_value={'netapp:raid_type': 'raid4'}))
- def test_create_volume_obsolete_extra_spec(self):
- self.library.zapi_client.get_lun_by_args.return_value = ['lun']
-
- self.library.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
-
- warn_msg = 'Extra spec %(old)s is obsolete. Use %(new)s instead.'
- na_utils.LOG.warning.assert_called_once_with(
- warn_msg, {'new': 'netapp_raid_type', 'old': 'netapp:raid_type'})
-
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- '_add_lun_to_table', mock.Mock())
- @mock.patch.object(na_utils, 'LOG', mock.Mock())
- @mock.patch.object(na_utils, 'get_volume_extra_specs',
- mock.Mock(return_value={'netapp_thick_provisioned':
- 'true'}))
- def test_create_volume_deprecated_extra_spec(self):
- self.library.zapi_client.get_lun_by_args.return_value = ['lun']
-
- self.library.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
-
- warn_msg = "Extra spec %(old)s is deprecated. Use %(new)s instead."
- na_utils.LOG.warning.assert_called_once_with(
- warn_msg, {'new': 'netapp_thin_provisioned',
- 'old': 'netapp_thick_provisioned'})
-
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_configured(self, mock_check_flags):
self.library.configuration.netapp_lun_ostype = 'windows'
self.library._get_lun_from_table.assert_called_once_with('vol')
self.assertEqual(1, log.call_count)
- def test_manage_existing_lun_same_name(self):
- mock_lun = block_base.NetAppLun('handle', 'name', '1',
- {'Path': '/vol/vol1/name'})
- self.library._get_existing_vol_with_manage_ref = mock.Mock(
- return_value=mock_lun)
- self.library._check_volume_type_for_lun = mock.Mock()
- self.library._add_lun_to_table = mock.Mock()
- self.zapi_client.move_lun = mock.Mock()
- self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
- self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
- {'ref': 'ref'})
- self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
- self.assertEqual(1, self.library._add_lun_to_table.call_count)
- self.assertEqual(0, self.zapi_client.move_lun.call_count)
-
- def test_manage_existing_lun_new_path(self):
- mock_lun = block_base.NetAppLun(
- 'handle', 'name', '1', {'Path': '/vol/vol1/name'})
- self.library._get_existing_vol_with_manage_ref = mock.Mock(
- return_value=mock_lun)
- self.library._check_volume_type_for_lun = mock.Mock()
- self.library._add_lun_to_table = mock.Mock()
- self.zapi_client.move_lun = mock.Mock()
- self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
- self.assertEqual(
- 2, self.library._get_existing_vol_with_manage_ref.call_count)
- self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
- self.assertEqual(1, self.library._add_lun_to_table.call_count)
- self.zapi_client.move_lun.assert_called_once_with(
- '/vol/vol1/name', '/vol/vol1/volume')
-
def test_check_vol_type_for_lun(self):
self.assertRaises(NotImplementedError,
self.library._check_volume_type_for_lun,
- 'vol', 'lun', 'existing_ref')
+ 'vol', 'lun', 'existing_ref', {})
def test_is_lun_valid_on_storage(self):
self.assertTrue(self.library._is_lun_valid_on_storage('lun'))
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
+
+ def test_delete_volume(self):
+ mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr')
+ mock_get_lun_attr.return_value = fake.LUN_METADATA
+ self.library.zapi_client = mock.Mock()
+ self.library.lun_table = fake.LUN_TABLE
+
+ self.library.delete_volume(fake.VOLUME)
+
+ mock_get_lun_attr.assert_called_once_with(
+ fake.LUN_NAME, 'metadata')
+ self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH)
+
+ def test_delete_volume_no_metadata(self):
+ self.mock_object(self.library, '_get_lun_attr', mock.Mock(
+ return_value=None))
+ self.library.zapi_client = mock.Mock()
+ self.mock_object(self.library, 'zapi_client')
+
+ self.library.delete_volume(fake.VOLUME)
+
+ self.library._get_lun_attr.assert_called_once_with(
+ fake.LUN_NAME, 'metadata')
+ self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count)
+ self.assertEqual(0,
+ self.zapi_client.
+ mark_qos_policy_group_for_deletion.call_count)
+
+ def test_clone_source_to_destination(self):
+ self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
+ return_value=fake.EXTRA_SPECS))
+ self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
+ return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.library, '_clone_lun')
+ self.mock_object(self.library, 'extend_volume')
+ self.mock_object(self.library, 'delete_volume')
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.library._clone_source_to_destination(fake.CLONE_SOURCE,
+ fake.CLONE_DESTINATION)
+
+ na_utils.get_volume_extra_specs.assert_called_once_with(
+ fake.CLONE_DESTINATION)
+ self.library._setup_qos_for_volume.assert_called_once_with(
+ fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
+ self.library._clone_lun.assert_called_once_with(
+ fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
+ space_reserved='true',
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+ self.library.extend_volume.assert_called_once_with(
+ fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+ self.assertEqual(0, self.library.delete_volume.call_count)
+ self.assertEqual(0, self.library.
+ _mark_qos_policy_group_for_deletion.call_count)
+
+ def test_clone_source_to_destination_exception_path(self):
+ self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
+ return_value=fake.EXTRA_SPECS))
+ self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
+ return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.library, '_clone_lun')
+ self.mock_object(self.library, 'extend_volume', mock.Mock(
+ side_effect=Exception))
+ self.mock_object(self.library, 'delete_volume')
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.library._clone_source_to_destination,
+ fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
+
+ na_utils.get_volume_extra_specs.assert_called_once_with(
+ fake.CLONE_DESTINATION)
+ self.library._setup_qos_for_volume.assert_called_once_with(
+ fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
+ self.library._clone_lun.assert_called_once_with(
+ fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
+ space_reserved='true',
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+ self.library.extend_volume.assert_called_once_with(
+ fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
+ self.assertEqual(1, self.library.delete_volume.call_count)
+ self.assertEqual(1, self.library.
+ _mark_qos_policy_group_for_deletion.call_count)
+
+ def test_create_lun(self):
+ self.assertRaises(NotImplementedError, self.library._create_lun,
+ fake.VOLUME_ID, fake.LUN_ID, fake.SIZE,
+ fake.LUN_METADATA)
+
+ def test_clone_lun(self):
+ self.assertRaises(NotImplementedError, self.library._clone_lun,
+ fake.VOLUME_ID, 'new-' + fake.VOLUME_ID)
+
+ def test_create_volume_from_snapshot(self):
+ mock_do_clone = self.mock_object(self.library,
+ '_clone_source_to_destination')
+ source = {
+ 'name': fake.SNAPSHOT['name'],
+ 'size': fake.SNAPSHOT['volume_size']
+ }
+
+ self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT)
+
+ mock_do_clone.assert_has_calls([
+ mock.call(source, fake.VOLUME)])
+
+ def test_create_cloned_volume(self):
+ fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
+ fake.LUN_SIZE, fake.LUN_METADATA)
+ mock_get_lun_from_table = self.mock_object(self.library,
+ '_get_lun_from_table')
+ mock_get_lun_from_table.return_value = fake_lun
+ mock_do_clone = self.mock_object(self.library,
+ '_clone_source_to_destination')
+ source = {
+ 'name': fake_lun.name,
+ 'size': fake.VOLUME_REF['size']
+ }
+
+ self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF)
+
+ mock_do_clone.assert_has_calls([
+ mock.call(source, fake.VOLUME)])
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
Mock unit tests for the NetApp block storage C-mode library
"""
-
import mock
from cinder import exception
+from cinder.openstack.common import loopingcall
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
self.zapi_client = self.library.zapi_client
self.library.vserver = mock.Mock()
self.library.ssc_vols = None
+ self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
+ fake.SIZE, None)
+ self.mock_object(self.library, 'lun_table')
+ self.library.lun_table = {fake.LUN_NAME: self.fake_lun}
+ self.mock_object(block_base.NetAppBlockStorageLibrary, 'delete_volume')
def tearDown(self):
super(NetAppBlockStorageCmodeLibraryTestCase, self).tearDown()
super_do_setup.assert_called_once_with(context)
self.assertEqual(1, mock_check_flags.call_count)
- @mock.patch.object(block_base.NetAppBlockStorageLibrary,
- 'check_for_setup_error')
- @mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
- def test_check_for_setup_error(self, mock_check_ssc_api_permissions,
- super_check_for_setup_error):
+ def test_check_for_setup_error(self):
+ super_check_for_setup_error = self.mock_object(
+ block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
+ mock_check_ssc_api_permissions = self.mock_object(
+ ssc_cmode, 'check_ssc_api_permissions')
+ mock_start_periodic_tasks = self.mock_object(
+ self.library, '_start_periodic_tasks')
self.library.check_for_setup_error()
- super_check_for_setup_error.assert_called_once_with()
+ self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_ssc_api_permissions.assert_called_once_with(
self.library.zapi_client)
+ self.assertEqual(1, mock_start_periodic_tasks.call_count)
def test_find_mapped_lun_igroup(self):
igroups = [fake.IGROUP1]
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
- 'vserver': fake.VSERVER1_NAME}]
+ 'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
- fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('1', lun_id)
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
- 'vserver': fake.VSERVER1_NAME}]
+ 'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
- fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
- 'vserver': fake.VSERVER1_NAME}]
+ 'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
- fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
lun_maps = [{'initiator-group': 'igroup2',
'lun-id': '1',
- 'vserver': fake.VSERVER1_NAME}]
+ 'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
- fake.LUN1, fake.FC_FORMATTED_INITIATORS)
+ fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
self.library.zapi_client.clone_lun.assert_called_once_with(
'fakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0,
- dest_block=0, src_block=0)
+ dest_block=0, src_block=0, qos_policy_group_name=None)
def test_get_fc_target_wwpns(self):
ports = [fake.FC_FORMATTED_TARGET_WWPNS[0],
def test_create_lun(self):
self.library._update_stale_vols = mock.Mock()
- self.library._create_lun(fake.VOLUME, fake.LUN,
- fake.SIZE, fake.METADATA)
+ self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
+ fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
- fake.VOLUME, fake.LUN, fake.SIZE, fake.METADATA, None)
+ fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
+ None)
self.assertEqual(1, self.library._update_stale_vols.call_count)
@mock.patch.object(ssc_cmode, 'get_volumes_for_specs')
@mock.patch.object(ssc_cmode, 'get_cluster_latest_ssc')
- @mock.patch.object(na_utils, 'get_volume_extra_specs')
- def test_check_volume_type_for_lun_fail(
- self, get_specs, get_ssc, get_vols):
+ def test_check_volume_type_for_lun_fail(self, get_ssc, get_vols):
self.library.ssc_vols = ['vol']
- get_specs.return_value = {'specs': 's'}
+ fake_extra_specs = {'specs': 's'}
get_vols.return_value = [ssc_cmode.NetAppVolume(name='name',
vserver='vs')]
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Volume': 'fake', 'Path': '/vol/lun'})
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
- {'vol': 'vol'}, mock_lun, {'ref': 'ref'})
- get_specs.assert_called_once_with({'vol': 'vol'})
+ {'vol': 'vol'}, mock_lun, {'ref': 'ref'},
+ fake_extra_specs)
get_vols.assert_called_with(['vol'], {'specs': 's'})
self.assertEqual(1, get_ssc.call_count)
- @mock.patch.object(block_cmode.LOG, 'error')
- @mock.patch.object(ssc_cmode, 'get_volumes_for_specs')
- @mock.patch.object(ssc_cmode, 'get_cluster_latest_ssc')
- @mock.patch.object(na_utils, 'get_volume_extra_specs')
- def test_check_volume_type_for_lun_qos_fail(
- self, get_specs, get_ssc, get_vols, driver_log):
- self.zapi_client.connection.set_api_version(1, 20)
- self.library.ssc_vols = ['vol']
- get_specs.return_value = {'specs': 's',
- 'netapp:qos_policy_group': 'qos'}
- get_vols.return_value = [ssc_cmode.NetAppVolume(name='name',
- vserver='vs')]
- mock_lun = block_base.NetAppLun('handle', 'name', '1',
- {'Volume': 'name', 'Path': '/vol/lun'})
- self.zapi_client.set_lun_qos_policy_group = mock.Mock(
- side_effect=netapp_api.NaApiError)
- self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
- self.library._check_volume_type_for_lun,
- {'vol': 'vol'}, mock_lun, {'ref': 'ref'})
- get_specs.assert_called_once_with({'vol': 'vol'})
- get_vols.assert_called_with(['vol'], {'specs': 's'})
- self.assertEqual(0, get_ssc.call_count)
- self.zapi_client.set_lun_qos_policy_group.assert_called_once_with(
- '/vol/lun', 'qos')
- self.assertEqual(1, driver_log.call_count)
-
def test_get_preferred_target_from_list(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
operational_addresses = [
target_details_list)
self.assertEqual(target_details_list[2], result)
+
+ def test_delete_volume(self):
+ self.mock_object(block_base.NetAppLun, 'get_metadata_property',
+ mock.Mock(return_value=fake.POOL_NAME))
+ self.mock_object(self.library, '_update_stale_vols')
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(
+ return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.library.delete_volume(fake.VOLUME)
+
+ self.assertEqual(1,
+ block_base.NetAppLun.get_metadata_property.call_count)
+ block_base.NetAppBlockStorageLibrary.delete_volume\
+ .assert_called_once_with(fake.VOLUME)
+ na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
+ fake.VOLUME)
+ self.library._mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
+ self.assertEqual(1, self.library._update_stale_vols.call_count)
+
+ def test_delete_volume_no_netapp_vol(self):
+ self.mock_object(block_base.NetAppLun, 'get_metadata_property',
+ mock.Mock(return_value=None))
+ self.mock_object(self.library, '_update_stale_vols')
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(
+ return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.library.delete_volume(fake.VOLUME)
+
+ block_base.NetAppLun.get_metadata_property.assert_called_once_with(
+ 'Volume')
+ block_base.NetAppBlockStorageLibrary.delete_volume\
+ .assert_called_once_with(fake.VOLUME)
+ self.library._mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
+ self.assertEqual(0, self.library._update_stale_vols.call_count)
+
+ def test_delete_volume_get_valid_qos_policy_group_info_exception(self):
+ self.mock_object(block_base.NetAppLun, 'get_metadata_property',
+ mock.Mock(return_value=fake.NETAPP_VOLUME))
+ self.mock_object(self.library, '_update_stale_vols')
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(side_effect=exception.Invalid))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+
+ self.library.delete_volume(fake.VOLUME)
+
+ block_base.NetAppLun.get_metadata_property.assert_called_once_with(
+ 'Volume')
+ block_base.NetAppBlockStorageLibrary.delete_volume\
+ .assert_called_once_with(fake.VOLUME)
+ self.library._mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(None)
+ self.assertEqual(1, self.library._update_stale_vols.call_count)
+
+ def test_setup_qos_for_volume(self):
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(
+ return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.zapi_client, 'provision_qos_policy_group')
+
+ result = self.library._setup_qos_for_volume(fake.VOLUME,
+ fake.EXTRA_SPECS)
+
+ self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result)
+ self.zapi_client.provision_qos_policy_group.\
+ assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
+
+ def test_setup_qos_for_volume_exception_path(self):
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(
+ side_effect=exception.Invalid))
+ self.mock_object(self.zapi_client, 'provision_qos_policy_group')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.library._setup_qos_for_volume, fake.VOLUME,
+ fake.EXTRA_SPECS)
+
+ self.assertEqual(0,
+ self.zapi_client.
+ provision_qos_policy_group.call_count)
+
+ def test_mark_qos_policy_group_for_deletion(self):
+ self.mock_object(self.zapi_client,
+ 'mark_qos_policy_group_for_deletion')
+
+ self.library._mark_qos_policy_group_for_deletion(
+ fake.QOS_POLICY_GROUP_INFO)
+
+ self.zapi_client.mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
+
+ def test_unmanage(self):
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+ self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage')
+
+ self.library.unmanage(fake.VOLUME)
+
+ na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
+ fake.VOLUME)
+ self.library._mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
+ block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with(
+ fake.VOLUME)
+
+ def test_unmanage_w_invalid_qos_policy(self):
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(side_effect=exception.Invalid))
+ self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
+ self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage')
+
+ self.library.unmanage(fake.VOLUME)
+
+ na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
+ fake.VOLUME)
+ self.library._mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(None)
+ block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with(
+ fake.VOLUME)
+
+ def test_manage_existing_lun_same_name(self):
+ mock_lun = block_base.NetAppLun('handle', 'name', '1',
+ {'Path': '/vol/vol1/name'})
+ self.library._get_existing_vol_with_manage_ref = mock.Mock(
+ return_value=mock_lun)
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+ self.library._check_volume_type_for_lun = mock.Mock()
+ self.library._setup_qos_for_volume = mock.Mock()
+ self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
+ mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME))
+ self.library._add_lun_to_table = mock.Mock()
+ self.zapi_client.move_lun = mock.Mock()
+ mock_set_lun_qos_policy_group = self.mock_object(
+ self.zapi_client, 'set_lun_qos_policy_group')
+
+ self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
+
+ self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
+ {'ref': 'ref'})
+ self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
+ self.assertEqual(1, self.library._add_lun_to_table.call_count)
+ self.assertEqual(0, self.zapi_client.move_lun.call_count)
+ self.assertEqual(1, mock_set_lun_qos_policy_group.call_count)
+
+ def test_manage_existing_lun_new_path(self):
+ mock_lun = block_base.NetAppLun(
+ 'handle', 'name', '1', {'Path': '/vol/vol1/name'})
+ self.library._get_existing_vol_with_manage_ref = mock.Mock(
+ return_value=mock_lun)
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+ self.library._check_volume_type_for_lun = mock.Mock()
+ self.library._add_lun_to_table = mock.Mock()
+ self.zapi_client.move_lun = mock.Mock()
+
+ self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
+
+ self.assertEqual(
+ 2, self.library._get_existing_vol_with_manage_ref.call_count)
+ self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
+ self.assertEqual(1, self.library._add_lun_to_table.call_count)
+ self.zapi_client.move_lun.assert_called_once_with(
+ '/vol/vol1/name', '/vol/vol1/volume')
+
+ def test_start_periodic_tasks(self):
+
+ mock_remove_unused_qos_policy_groups = self.mock_object(
+ self.zapi_client,
+ 'remove_unused_qos_policy_groups')
+
+ harvest_qos_periodic_task = mock.Mock()
+ mock_loopingcall = self.mock_object(
+ loopingcall,
+ 'FixedIntervalLoopingCall',
+ mock.Mock(side_effect=[harvest_qos_periodic_task]))
+
+ self.library._start_periodic_tasks()
+
+ mock_loopingcall.assert_has_calls([
+ mock.call(mock_remove_unused_qos_policy_groups)])
+ self.assertTrue(harvest_qos_periodic_task.start.called)
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Mock unit tests for the NetApp nfs storage driver
"""
+import os
+
+import copy
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
+from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder import utils
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
+ self.driver.ssc_enabled = False
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
+
+ def test_create_volume(self):
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(self.driver, '_do_create_volume')
+ self.mock_object(self.driver, '_do_qos_for_volume')
+ update_ssc = self.mock_object(self.driver, '_update_stale_vols')
+ expected = {'provider_location': fake.NFS_SHARE}
+
+ result = self.driver.create_volume(fake.NFS_VOLUME)
+
+ self.assertEqual(expected, result)
+ self.assertEqual(0, update_ssc.call_count)
+
+ def test_create_volume_no_pool(self):
+ volume = copy.deepcopy(fake.NFS_VOLUME)
+ volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+
+ self.assertRaises(exception.InvalidHost,
+ self.driver.create_volume,
+ volume)
+
+ def test_create_volume_exception(self):
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ mock_create = self.mock_object(self.driver, '_do_create_volume')
+ mock_create.side_effect = Exception
+ update_ssc = self.mock_object(self.driver, '_update_stale_vols')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume,
+ fake.NFS_VOLUME)
+
+ self.assertEqual(0, update_ssc.call_count)
+
+ def test_create_volume_from_snapshot(self):
+ provider_location = fake.POOL_NAME
+ snapshot = fake.CLONE_SOURCE
+ self.mock_object(self.driver, '_clone_source_to_destination_volume',
+ mock.Mock(return_value=provider_location))
+
+ result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
+ snapshot)
+
+ self.assertEqual(provider_location, result)
+
+ def test_clone_source_to_destination_volume(self):
+ self.mock_object(self.driver, '_get_volume_location', mock.Mock(
+ return_value=fake.POOL_NAME))
+ self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
+ return_value=fake.EXTRA_SPECS))
+ self.mock_object(
+ self.driver,
+ '_clone_with_extension_check')
+ self.mock_object(self.driver, '_do_qos_for_volume')
+ expected = {'provider_location': fake.POOL_NAME}
+
+ result = self.driver._clone_source_to_destination_volume(
+ fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
+
+ self.assertEqual(expected, result)
+
+ def test_clone_source_to_destination_volume_with_do_qos_exception(self):
+ self.mock_object(self.driver, '_get_volume_location', mock.Mock(
+ return_value=fake.POOL_NAME))
+ self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
+ return_value=fake.EXTRA_SPECS))
+ self.mock_object(
+ self.driver,
+ '_clone_with_extension_check')
+ self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock(
+ side_effect=Exception))
+
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.driver._clone_source_to_destination_volume,
+ fake.CLONE_SOURCE,
+ fake.CLONE_DESTINATION)
+
+ def test_clone_with_extension_check_equal_sizes(self):
+ clone_source = copy.deepcopy(fake.CLONE_SOURCE)
+ clone_source['size'] = fake.VOLUME['size']
+ self.mock_object(self.driver, '_clone_backing_file_for_volume')
+ self.mock_object(self.driver, 'local_path')
+ mock_discover = self.mock_object(self.driver,
+ '_discover_file_till_timeout')
+ mock_discover.return_value = True
+ self.mock_object(self.driver, '_set_rw_permissions')
+ mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
+
+ self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
+
+ self.assertEqual(0, mock_extend_volume.call_count)
+
+ def test_clone_with_extension_check_unequal_sizes(self):
+ clone_source = copy.deepcopy(fake.CLONE_SOURCE)
+ clone_source['size'] = fake.VOLUME['size'] + 1
+ self.mock_object(self.driver, '_clone_backing_file_for_volume')
+ self.mock_object(self.driver, 'local_path')
+ mock_discover = self.mock_object(self.driver,
+ '_discover_file_till_timeout')
+ mock_discover.return_value = True
+ self.mock_object(self.driver, '_set_rw_permissions')
+ mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
+
+ self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
+
+ self.assertEqual(1, mock_extend_volume.call_count)
+
+ def test_clone_with_extension_check_extend_exception(self):
+ clone_source = copy.deepcopy(fake.CLONE_SOURCE)
+ clone_source['size'] = fake.VOLUME['size'] + 1
+ self.mock_object(self.driver, '_clone_backing_file_for_volume')
+ self.mock_object(self.driver, 'local_path')
+ mock_discover = self.mock_object(self.driver,
+ '_discover_file_till_timeout')
+ mock_discover.return_value = True
+ self.mock_object(self.driver, '_set_rw_permissions')
+ mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
+ mock_extend_volume.side_effect = Exception
+ mock_cleanup = self.mock_object(self.driver,
+ '_cleanup_volume_on_failure')
+
+ self.assertRaises(exception.CinderException,
+ self.driver._clone_with_extension_check,
+ clone_source,
+ fake.NFS_VOLUME)
+
+ self.assertEqual(1, mock_cleanup.call_count)
+
+ def test_clone_with_extension_check_no_discovery(self):
+ self.mock_object(self.driver, '_clone_backing_file_for_volume')
+ self.mock_object(self.driver, 'local_path')
+ self.mock_object(self.driver, '_set_rw_permissions')
+ mock_discover = self.mock_object(self.driver,
+ '_discover_file_till_timeout')
+ mock_discover.return_value = False
+
+ self.assertRaises(exception.CinderException,
+ self.driver._clone_with_extension_check,
+ fake.CLONE_SOURCE,
+ fake.NFS_VOLUME)
+
+ def test_create_cloned_volume(self):
+ provider_location = fake.POOL_NAME
+ src_vref = fake.CLONE_SOURCE
+ self.mock_object(self.driver, '_clone_source_to_destination_volume',
+ mock.Mock(return_value=provider_location))
+
+ result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
+ src_vref)
+ self.assertEqual(provider_location, result)
+
+ def test_do_qos_for_volume(self):
+ self.assertRaises(NotImplementedError,
+ self.driver._do_qos_for_volume,
+ fake.NFS_VOLUME,
+ fake.EXTRA_SPECS)
+
+ def test_cleanup_volume_on_failure(self):
+ path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
+ mock_local_path = self.mock_object(self.driver, 'local_path')
+ mock_local_path.return_value = path
+ mock_exists_check = self.mock_object(os.path, 'exists')
+ mock_exists_check.return_value = True
+ mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
+
+ self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
+
+ mock_delete.assert_has_calls([mock.call(path)])
+
+ def test_cleanup_volume_on_failure_no_path(self):
+ self.mock_object(self.driver, 'local_path')
+ mock_exists_check = self.mock_object(os.path, 'exists')
+ mock_exists_check.return_value = False
+ mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
+
+ self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
+
+ self.assertEqual(0, mock_delete.call_count)
+
+ def test_get_vol_for_share(self):
+ self.assertRaises(NotImplementedError,
+ self.driver._get_vol_for_share,
+ fake.NFS_SHARE)
import mock
from os_brick.remotefs import remotefs as remotefs_brick
+from oslo_log import log as logging
from oslo_utils import units
+from cinder import exception
+from cinder.openstack.common import loopingcall
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
+ self.driver.vserver = fake.VSERVER_NAME
+ self.driver.ssc_enabled = True
def get_config_cmode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
- config.netapp_vserver = 'openstack'
+ config.netapp_vserver = fake.VSERVER_NAME
return config
@mock.patch.object(client_cmode, 'Client', mock.Mock())
result[0]['reserved_percentage'])
self.assertEqual(total_capacity_gb, result[0]['total_capacity_gb'])
self.assertEqual(free_capacity_gb, result[0]['free_capacity_gb'])
+
+ def test_check_for_setup_error(self):
+ super_check_for_setup_error = self.mock_object(
+ nfs_base.NetAppNfsDriver, 'check_for_setup_error')
+ mock_check_ssc_api_permissions = self.mock_object(
+ ssc_cmode, 'check_ssc_api_permissions')
+ mock_start_periodic_tasks = self.mock_object(
+ self.driver, '_start_periodic_tasks')
+ self.driver.zapi_client = mock.Mock()
+
+ self.driver.check_for_setup_error()
+
+ self.assertEqual(1, super_check_for_setup_error.call_count)
+ mock_check_ssc_api_permissions.assert_called_once_with(
+ self.driver.zapi_client)
+ self.assertEqual(1, mock_start_periodic_tasks.call_count)
+
+ def test_delete_volume(self):
+ fake_provider_location = 'fake_provider_location'
+ fake_volume = {'name': 'fake_name',
+ 'provider_location': 'fake_provider_location'}
+ fake_qos_policy_group_info = {'legacy': None, 'spec': None}
+ self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume')
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(return_value=fake_qos_policy_group_info))
+ self.mock_object(self.driver, '_post_prov_deprov_in_ssc')
+ self.driver.zapi_client = mock.Mock()
+
+ self.driver.delete_volume(fake_volume)
+
+ nfs_base.NetAppNfsDriver.delete_volume.assert_called_once_with(
+ fake_volume)
+ self.driver.zapi_client.mark_qos_policy_group_for_deletion\
+ .assert_called_once_with(fake_qos_policy_group_info)
+ self.driver._post_prov_deprov_in_ssc.assert_called_once_with(
+ fake_provider_location)
+
+ def test_delete_volume_get_qos_info_exception(self):
+ fake_provider_location = 'fake_provider_location'
+ fake_volume = {'name': 'fake_name',
+ 'provider_location': 'fake_provider_location'}
+ self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume')
+ self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
+ mock.Mock(side_effect=exception.Invalid))
+ self.mock_object(self.driver, '_post_prov_deprov_in_ssc')
+
+ self.driver.delete_volume(fake_volume)
+
+ nfs_base.NetAppNfsDriver.delete_volume.assert_called_once_with(
+ fake_volume)
+ self.driver._post_prov_deprov_in_ssc.assert_called_once_with(
+ fake_provider_location)
+
+ def test_do_qos_for_volume_no_exception(self):
+
+ mock_get_info = self.mock_object(na_utils,
+ 'get_valid_qos_policy_group_info')
+ mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
+ self.driver.zapi_client = mock.Mock()
+ mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
+ mock_set_policy = self.mock_object(self.driver,
+ '_set_qos_policy_group_on_volume')
+ mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
+ mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
+ mock_cleanup = self.mock_object(self.driver,
+ '_cleanup_volume_on_failure')
+
+ self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS)
+
+ mock_get_info.assert_has_calls([
+ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
+ mock_provision_qos.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_INFO)])
+ mock_set_policy.assert_has_calls([
+ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)])
+ self.assertEqual(0, mock_error_log.call_count)
+ self.assertEqual(0, mock_debug_log.call_count)
+ self.assertEqual(0, mock_cleanup.call_count)
+
+ def test_do_qos_for_volume_exception_w_cleanup(self):
+ mock_get_info = self.mock_object(na_utils,
+ 'get_valid_qos_policy_group_info')
+ mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
+ self.driver.zapi_client = mock.Mock()
+ mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
+ mock_set_policy = self.mock_object(self.driver,
+ '_set_qos_policy_group_on_volume')
+ mock_set_policy.side_effect = netapp_api.NaApiError
+ mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
+ mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
+ mock_cleanup = self.mock_object(self.driver,
+ '_cleanup_volume_on_failure')
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.driver._do_qos_for_volume,
+ fake.NFS_VOLUME,
+ fake.EXTRA_SPECS)
+
+ mock_get_info.assert_has_calls([
+ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
+ mock_provision_qos.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_INFO)])
+ mock_set_policy.assert_has_calls([
+ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)])
+ self.assertEqual(1, mock_error_log.call_count)
+ self.assertEqual(1, mock_debug_log.call_count)
+ mock_cleanup.assert_has_calls([
+ mock.call(fake.NFS_VOLUME)])
+
+ def test_do_qos_for_volume_exception_no_cleanup(self):
+
+ mock_get_info = self.mock_object(na_utils,
+ 'get_valid_qos_policy_group_info')
+ mock_get_info.side_effect = exception.Invalid
+ self.driver.zapi_client = mock.Mock()
+ mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
+ mock_set_policy = self.mock_object(self.driver,
+ '_set_qos_policy_group_on_volume')
+ mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
+ mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
+ mock_cleanup = self.mock_object(self.driver,
+ '_cleanup_volume_on_failure')
+
+ self.assertRaises(exception.Invalid, self.driver._do_qos_for_volume,
+ fake.NFS_VOLUME, fake.EXTRA_SPECS, cleanup=False)
+
+ mock_get_info.assert_has_calls([
+ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
+ self.assertEqual(0, mock_provision_qos.call_count)
+ self.assertEqual(0, mock_set_policy.call_count)
+ self.assertEqual(1, mock_error_log.call_count)
+ self.assertEqual(0, mock_debug_log.call_count)
+ self.assertEqual(0, mock_cleanup.call_count)
+
+ def test_set_qos_policy_group_on_volume(self):
+
+ mock_get_name_from_info = self.mock_object(
+ na_utils, 'get_qos_policy_group_name_from_info')
+ mock_get_name_from_info.return_value = fake.QOS_POLICY_GROUP_NAME
+
+ mock_extract_host = self.mock_object(volume_utils, 'extract_host')
+ mock_extract_host.return_value = fake.NFS_SHARE
+
+ self.driver.zapi_client = mock.Mock()
+ mock_get_flex_vol_name =\
+ self.driver.zapi_client.get_vol_by_junc_vserver
+ mock_get_flex_vol_name.return_value = fake.FLEXVOL
+
+ mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
+
+ self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
+ fake.QOS_POLICY_GROUP_INFO)
+
+ mock_get_name_from_info.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_INFO)])
+ mock_extract_host.assert_has_calls([
+ mock.call(fake.NFS_HOST_STRING, level='pool')])
+ mock_get_flex_vol_name.assert_has_calls([
+ mock.call(fake.VSERVER_NAME, fake.EXPORT_PATH)])
+ mock_file_assign_qos.assert_has_calls([
+ mock.call(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME,
+ fake.NFS_VOLUME['name'])])
+
+ def test_set_qos_policy_group_on_volume_no_info(self):
+
+ mock_get_name_from_info = self.mock_object(
+ na_utils, 'get_qos_policy_group_name_from_info')
+
+ mock_extract_host = self.mock_object(volume_utils, 'extract_host')
+
+ self.driver.zapi_client = mock.Mock()
+ mock_get_flex_vol_name =\
+ self.driver.zapi_client.get_vol_by_junc_vserver
+
+ mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
+
+ self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
+ None)
+
+ self.assertEqual(0, mock_get_name_from_info.call_count)
+ self.assertEqual(0, mock_extract_host.call_count)
+ self.assertEqual(0, mock_get_flex_vol_name.call_count)
+ self.assertEqual(0, mock_file_assign_qos.call_count)
+
+ def test_set_qos_policy_group_on_volume_no_name(self):
+
+ mock_get_name_from_info = self.mock_object(
+ na_utils, 'get_qos_policy_group_name_from_info')
+ mock_get_name_from_info.return_value = None
+
+ mock_extract_host = self.mock_object(volume_utils, 'extract_host')
+
+ self.driver.zapi_client = mock.Mock()
+ mock_get_flex_vol_name =\
+ self.driver.zapi_client.get_vol_by_junc_vserver
+
+ mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
+
+ self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
+ fake.QOS_POLICY_GROUP_INFO)
+
+ mock_get_name_from_info.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_INFO)])
+ self.assertEqual(0, mock_extract_host.call_count)
+ self.assertEqual(0, mock_get_flex_vol_name.call_count)
+ self.assertEqual(0, mock_file_assign_qos.call_count)
+
+ def test_unmanage(self):
+ mock_get_info = self.mock_object(na_utils,
+ 'get_valid_qos_policy_group_info')
+ mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
+
+ self.driver.zapi_client = mock.Mock()
+ mock_mark_for_deletion =\
+ self.driver.zapi_client.mark_qos_policy_group_for_deletion
+
+ super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage')
+
+ self.driver.unmanage(fake.NFS_VOLUME)
+
+ mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
+ mock_mark_for_deletion.assert_has_calls([
+ mock.call(fake.QOS_POLICY_GROUP_INFO)])
+ super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
+
+ def test_unmanage_invalid_qos(self):
+ mock_get_info = self.mock_object(na_utils,
+ 'get_valid_qos_policy_group_info')
+ mock_get_info.side_effect = exception.Invalid
+
+ super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage')
+
+ self.driver.unmanage(fake.NFS_VOLUME)
+
+ mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
+ super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
+
+ def test_create_volume(self):
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ self.mock_object(self.driver, '_do_create_volume')
+ self.mock_object(self.driver, '_do_qos_for_volume')
+ update_ssc = self.mock_object(self.driver, '_update_stale_vols')
+ self.mock_object(self.driver, '_get_vol_for_share')
+ expected = {'provider_location': fake.NFS_SHARE}
+
+ result = self.driver.create_volume(fake.NFS_VOLUME)
+
+ self.assertEqual(expected, result)
+ self.assertEqual(1, update_ssc.call_count)
+
+ def test_create_volume_exception(self):
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(na_utils, 'get_volume_extra_specs')
+ mock_create = self.mock_object(self.driver, '_do_create_volume')
+ mock_create.side_effect = Exception
+ update_ssc = self.mock_object(self.driver, '_update_stale_vols')
+ self.mock_object(self.driver, '_get_vol_for_share')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume,
+ fake.NFS_VOLUME)
+
+ self.assertEqual(1, update_ssc.call_count)
+
+ def test_start_periodic_tasks(self):
+
+ self.driver.zapi_client = mock.Mock()
+ mock_remove_unused_qos_policy_groups = self.mock_object(
+ self.driver.zapi_client,
+ 'remove_unused_qos_policy_groups')
+
+ harvest_qos_periodic_task = mock.Mock()
+ mock_loopingcall = self.mock_object(
+ loopingcall,
+ 'FixedIntervalLoopingCall',
+ mock.Mock(side_effect=[harvest_qos_periodic_task]))
+
+ self.driver._start_periodic_tasks()
+
+ mock_loopingcall.assert_has_calls([
+ mock.call(mock_remove_unused_qos_policy_groups)])
+ self.assertTrue(harvest_qos_periodic_task.start.called)
# Copyright (c) - 2014, Clinton Knight All rights reserved.
# Copyright (c) - 2015, Alex Meade. All Rights Reserved.
# Copyright (c) - 2015, Rushil Chugh. All Rights Reserved.
+# Copyright (c) - 2015, Tom Barron. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
'auth_method': 'None', 'auth_username': 'stack',
'auth_password': 'password'}
+VOLUME_NAME = 'fake_volume_name'
+VOLUME_ID = 'fake_volume_id'
+VOLUME_TYPE_ID = 'fake_volume_type_id'
+
+VOLUME = {
+ 'name': VOLUME_NAME,
+ 'size': 42,
+ 'id': VOLUME_ID,
+ 'host': 'fake_host@fake_backend#fake_pool',
+ 'volume_type_id': VOLUME_TYPE_ID,
+}
+
+
+QOS_SPECS = {}
+
+EXTRA_SPECS = {}
+
+MAX_THROUGHPUT = '21734278B/s'
+QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
+LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME}
+
+LEGACY_QOS = {
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+}
+
+QOS_POLICY_GROUP_SPEC = {
+ 'max_throughput': MAX_THROUGHPUT,
+ 'policy_name': 'openstack-%s' % VOLUME_ID,
+}
+
+QOS_POLICY_GROUP_INFO_NONE = {'legacy': None, 'spec': None}
+
+QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC}
+
+LEGACY_QOS_POLICY_GROUP_INFO = {
+ 'legacy': LEGACY_QOS,
+ 'spec': None,
+}
+
+INVALID_QOS_POLICY_GROUP_INFO = {
+ 'legacy': LEGACY_QOS,
+ 'spec': QOS_POLICY_GROUP_SPEC,
+}
+
+QOS_SPECS_ID = 'fake_qos_specs_id'
+QOS_SPEC = {'maxBPS': 21734278}
+OUTER_BACKEND_QOS_SPEC = {
+ 'id': QOS_SPECS_ID,
+ 'specs': QOS_SPEC,
+ 'consumer': 'back-end',
+}
+OUTER_FRONTEND_QOS_SPEC = {
+ 'id': QOS_SPECS_ID,
+ 'specs': QOS_SPEC,
+ 'consumer': 'front-end',
+}
+OUTER_BOTH_QOS_SPEC = {
+ 'id': QOS_SPECS_ID,
+ 'specs': QOS_SPEC,
+ 'consumer': 'both',
+}
+VOLUME_TYPE = {'id': VOLUME_TYPE_ID, 'qos_specs_id': QOS_SPECS_ID}
+
def create_configuration():
config = conf.Configuration(None)
# Copyright (c) 2014 Clinton Knight. All rights reserved.
-# Copyright (c) 2014 Tom Barron. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Mock unit tests for the NetApp driver utility module
"""
+import copy
import platform
import mock
from oslo_concurrency import processutils as putils
+from cinder import context
from cinder import exception
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.fakes as fake
from cinder import version
-import cinder.volume.drivers.netapp.utils as na_utils
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
class NetAppDriverUtilsTestCase(test.TestCase):
self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5)
self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5)
+ def test_iscsi_connection_properties(self):
+
+ actual_properties = na_utils.get_iscsi_connection_properties(
+ fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME,
+ fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS,
+ fake.ISCSI_FAKE_PORT)
+
+ actual_properties_mapped = actual_properties['data']
+
+ self.assertDictEqual(actual_properties_mapped,
+ fake.FC_ISCSI_TARGET_INFO_DICT)
+
+ def test_iscsi_connection_lun_id_type_str(self):
+ FAKE_LUN_ID = '1'
+
+ actual_properties = na_utils.get_iscsi_connection_properties(
+ FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN,
+ fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT)
+
+ actual_properties_mapped = actual_properties['data']
+
+ self.assertIs(type(actual_properties_mapped['target_lun']), int)
+
+ def test_iscsi_connection_lun_id_type_dict(self):
+ FAKE_LUN_ID = {'id': 'fake_id'}
+
+ self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties,
+ FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME,
+ fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS,
+ fake.ISCSI_FAKE_PORT)
+
+ def test_get_volume_extra_specs(self):
+ fake_extra_specs = {'fake_key': 'fake_value'}
+ fake_volume_type = {'extra_specs': fake_extra_specs}
+ fake_volume = {'volume_type_id': 'fake_volume_type_id'}
+ self.mock_object(context, 'get_admin_context')
+ self.mock_object(volume_types, 'get_volume_type', mock.Mock(
+ return_value=fake_volume_type))
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+
+ result = na_utils.get_volume_extra_specs(fake_volume)
+
+ self.assertEqual(fake_extra_specs, result)
+
+ def test_get_volume_extra_specs_no_type_id(self):
+ fake_volume = {}
+ self.mock_object(context, 'get_admin_context')
+ self.mock_object(volume_types, 'get_volume_type')
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+
+ result = na_utils.get_volume_extra_specs(fake_volume)
+
+ self.assertEqual({}, result)
+
+ def test_get_volume_extra_specs_no_volume_type(self):
+ fake_volume = {'volume_type_id': 'fake_volume_type_id'}
+ self.mock_object(context, 'get_admin_context')
+ self.mock_object(volume_types, 'get_volume_type', mock.Mock(
+ return_value=None))
+ self.mock_object(na_utils, 'log_extra_spec_warnings')
+
+ result = na_utils.get_volume_extra_specs(fake_volume)
+
+ self.assertEqual({}, result)
+
+ def test_log_extra_spec_warnings_obsolete_specs(self):
+
+ mock_log = self.mock_object(na_utils.LOG, 'warning')
+
+ na_utils.log_extra_spec_warnings({'netapp:raid_type': 'raid4'})
+
+ self.assertEqual(1, mock_log.call_count)
+
+ def test_log_extra_spec_warnings_deprecated_specs(self):
+
+ mock_log = self.mock_object(na_utils.LOG, 'warning')
+
+ na_utils.log_extra_spec_warnings({'netapp_thick_provisioned': 'true'})
+
+ self.assertEqual(1, mock_log.call_count)
+
+ def test_validate_qos_spec_none(self):
+ qos_spec = None
+
+ # Just return without raising an exception.
+ na_utils.validate_qos_spec(qos_spec)
+
+ def test_validate_qos_spec_keys_weirdly_cased(self):
+ qos_spec = {'mAxIopS': 33000}
+
+ # Just return without raising an exception.
+ na_utils.validate_qos_spec(qos_spec)
+
+ def test_validate_qos_spec_bad_key(self):
+ qos_spec = {'maxFlops': 33000}
+
+ self.assertRaises(exception.Invalid,
+ na_utils.validate_qos_spec,
+ qos_spec)
+
+ def test_validate_qos_spec_bad_key_combination(self):
+ qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000}
+
+ self.assertRaises(exception.Invalid,
+ na_utils.validate_qos_spec,
+ qos_spec)
+
+ def test_map_qos_spec_none(self):
+ qos_spec = None
+
+ result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
+
+ self.assertEqual(None, result)
+
+ def test_map_qos_spec_maxiops(self):
+ qos_spec = {'maxIOPs': 33000}
+ mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
+ mock_get_name.return_value = 'fake_qos_policy'
+ expected = {
+ 'policy_name': 'fake_qos_policy',
+ 'max_throughput': '33000iops',
+ }
+
+ result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_map_qos_spec_maxbps(self):
+ qos_spec = {'maxBPS': 1000000}
+ mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
+ mock_get_name.return_value = 'fake_qos_policy'
+ expected = {
+ 'policy_name': 'fake_qos_policy',
+ 'max_throughput': '1000000B/s',
+ }
+
+ result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_map_qos_spec_no_key_present(self):
+ qos_spec = {}
+ mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
+ mock_get_name.return_value = 'fake_qos_policy'
+ expected = {
+ 'policy_name': 'fake_qos_policy',
+ 'max_throughput': None,
+ }
+
+ result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_map_dict_to_lower(self):
+ original = {'UPperKey': 'Value'}
+ expected = {'upperkey': 'Value'}
+
+ result = na_utils.map_dict_to_lower(original)
+
+ self.assertEqual(expected, result)
+
+ def test_get_qos_policy_group_name(self):
+ expected = 'openstack-%s' % fake.VOLUME_ID
+
+ result = na_utils.get_qos_policy_group_name(fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_get_qos_policy_group_name_no_id(self):
+ volume = copy.deepcopy(fake.VOLUME)
+ del(volume['id'])
+
+ result = na_utils.get_qos_policy_group_name(volume)
+
+ self.assertEqual(None, result)
+
+ def test_get_qos_policy_group_name_from_info(self):
+ expected = 'openstack-%s' % fake.VOLUME_ID
+ result = na_utils.get_qos_policy_group_name_from_info(
+ fake.QOS_POLICY_GROUP_INFO)
+
+ self.assertEqual(expected, result)
+
+ def test_get_qos_policy_group_name_from_info_no_info(self):
+
+ result = na_utils.get_qos_policy_group_name_from_info(None)
+
+ self.assertEqual(None, result)
+
+ def test_get_qos_policy_group_name_from_legacy_info(self):
+ expected = fake.QOS_POLICY_GROUP_NAME
+
+ result = na_utils.get_qos_policy_group_name_from_info(
+ fake.LEGACY_QOS_POLICY_GROUP_INFO)
+
+ self.assertEqual(expected, result)
+
+ def test_get_qos_policy_group_name_from_spec_info(self):
+ expected = 'openstack-%s' % fake.VOLUME_ID
+
+ result = na_utils.get_qos_policy_group_name_from_info(
+ fake.QOS_POLICY_GROUP_INFO)
+
+ self.assertEqual(expected, result)
+
+ def test_get_qos_policy_group_name_from_none_qos_info(self):
+ expected = None
+
+ result = na_utils.get_qos_policy_group_name_from_info(
+ fake.QOS_POLICY_GROUP_INFO_NONE)
+
+ self.assertEqual(expected, result)
+
+ def test_get_valid_qos_policy_group_info_exception_path(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.side_effect = exception.VolumeTypeNotFound
+ expected = fake.QOS_POLICY_GROUP_INFO_NONE
+
+ result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_get_valid_qos_policy_group_info_volume_type_none(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = None
+ expected = fake.QOS_POLICY_GROUP_INFO_NONE
+
+ result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_get_valid_qos_policy_group_info_no_info(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = fake.VOLUME_TYPE
+ mock_get_legacy_qos_policy = self.mock_object(na_utils,
+ 'get_legacy_qos_policy')
+ mock_get_legacy_qos_policy.return_value = None
+ mock_get_valid_qos_spec_from_volume_type = self.mock_object(
+ na_utils, 'get_valid_backend_qos_spec_from_volume_type')
+ mock_get_valid_qos_spec_from_volume_type.return_value = None
+ self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination')
+ expected = fake.QOS_POLICY_GROUP_INFO_NONE
+
+ result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME)
+
+ self.assertEqual(expected, result)
+
+ def test_get_valid_legacy_qos_policy_group_info(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = fake.VOLUME_TYPE
+ mock_get_legacy_qos_policy = self.mock_object(na_utils,
+ 'get_legacy_qos_policy')
+
+ mock_get_legacy_qos_policy.return_value = fake.LEGACY_QOS
+ mock_get_valid_qos_spec_from_volume_type = self.mock_object(
+ na_utils, 'get_valid_backend_qos_spec_from_volume_type')
+ mock_get_valid_qos_spec_from_volume_type.return_value = None
+ self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination')
+
+ result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME)
+
+ self.assertEqual(fake.LEGACY_QOS_POLICY_GROUP_INFO, result)
+
+ def test_get_valid_spec_qos_policy_group_info(self):
+ mock_get_volume_type = self.mock_object(na_utils,
+ 'get_volume_type_from_volume')
+ mock_get_volume_type.return_value = fake.VOLUME_TYPE
+ mock_get_legacy_qos_policy = self.mock_object(na_utils,
+ 'get_legacy_qos_policy')
+ mock_get_legacy_qos_policy.return_value = None
+ mock_get_valid_qos_spec_from_volume_type = self.mock_object(
+ na_utils, 'get_valid_backend_qos_spec_from_volume_type')
+ mock_get_valid_qos_spec_from_volume_type.return_value =\
+ fake.QOS_POLICY_GROUP_SPEC
+ self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination')
+
+ result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME)
+
+ self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result)
+
+ def test_get_valid_backend_qos_spec_from_volume_type_no_spec(self):
+ mock_get_spec = self.mock_object(
+ na_utils, 'get_backend_qos_spec_from_volume_type')
+ mock_get_spec.return_value = None
+ mock_validate = self.mock_object(na_utils, 'validate_qos_spec')
+
+ result = na_utils.get_valid_backend_qos_spec_from_volume_type(
+ fake.VOLUME, fake.VOLUME_TYPE)
+
+ self.assertEqual(None, result)
+ self.assertEqual(0, mock_validate.call_count)
+
+ def test_get_valid_backend_qos_spec_from_volume_type(self):
+ mock_get_spec = self.mock_object(
+ na_utils, 'get_backend_qos_spec_from_volume_type')
+ mock_get_spec.return_value = fake.QOS_SPEC
+ mock_validate = self.mock_object(na_utils, 'validate_qos_spec')
+
+ result = na_utils.get_valid_backend_qos_spec_from_volume_type(
+ fake.VOLUME, fake.VOLUME_TYPE)
+
+ self.assertEqual(fake.QOS_POLICY_GROUP_SPEC, result)
+ self.assertEqual(1, mock_validate.call_count)
+
+ def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self):
+ volume_type = copy.deepcopy(fake.VOLUME_TYPE)
+ del(volume_type['qos_specs_id'])
+ mock_get_context = self.mock_object(context, 'get_admin_context')
+
+ result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+
+ self.assertEqual(None, result)
+ self.assertEqual(0, mock_get_context.call_count)
+
+ def test_get_backend_qos_spec_from_volume_type_no_qos_spec(self):
+ volume_type = fake.VOLUME_TYPE
+ self.mock_object(context, 'get_admin_context')
+ mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs')
+ mock_get_specs.return_value = None
+
+ result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+
+ self.assertEqual(None, result)
+
+ def test_get_backend_qos_spec_from_volume_type_with_frontend_spec(self):
+ volume_type = fake.VOLUME_TYPE
+ self.mock_object(context, 'get_admin_context')
+ mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs')
+ mock_get_specs.return_value = fake.OUTER_FRONTEND_QOS_SPEC
+
+ result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+
+ self.assertEqual(None, result)
+
+ def test_get_backend_qos_spec_from_volume_type_with_backend_spec(self):
+ volume_type = fake.VOLUME_TYPE
+ self.mock_object(context, 'get_admin_context')
+ mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs')
+ mock_get_specs.return_value = fake.OUTER_BACKEND_QOS_SPEC
+
+ result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+
+ self.assertEqual(fake.QOS_SPEC, result)
+
+ def test_get_backend_qos_spec_from_volume_type_with_both_spec(self):
+ volume_type = fake.VOLUME_TYPE
+ self.mock_object(context, 'get_admin_context')
+ mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs')
+ mock_get_specs.return_value = fake.OUTER_BOTH_QOS_SPEC
+
+ result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+
+ self.assertEqual(fake.QOS_SPEC, result)
+
+ def test_check_for_invalid_qos_spec_combination(self):
+
+ self.assertRaises(exception.Invalid,
+ na_utils.check_for_invalid_qos_spec_combination,
+ fake.INVALID_QOS_POLICY_GROUP_INFO,
+ fake.VOLUME_TYPE)
+
+ def test_get_legacy_qos_policy(self):
+ extra_specs = fake.LEGACY_EXTRA_SPECS
+ expected = {'policy_name': fake.QOS_POLICY_GROUP_NAME}
+
+ result = na_utils.get_legacy_qos_policy(extra_specs)
+
+ self.assertEqual(expected, result)
+
+ def test_get_legacy_qos_policy_no_policy_name(self):
+ extra_specs = fake.EXTRA_SPECS
+
+ result = na_utils.get_legacy_qos_policy(extra_specs)
+
+ self.assertEqual(None, result)
+
class OpenStackInfoTestCase(test.TestCase):
info._update_openstack_info()
self.assertTrue(mock_updt_from_dpkg.called)
-
- def test_iscsi_connection_properties(self):
-
- actual_properties = na_utils.get_iscsi_connection_properties(
- fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME,
- fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS,
- fake.ISCSI_FAKE_PORT)
-
- actual_properties_mapped = actual_properties['data']
-
- self.assertDictEqual(actual_properties_mapped,
- fake.FC_ISCSI_TARGET_INFO_DICT)
-
- def test_iscsi_connection_lun_id_type_str(self):
- FAKE_LUN_ID = '1'
-
- actual_properties = na_utils.get_iscsi_connection_properties(
- FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN,
- fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT)
-
- actual_properties_mapped = actual_properties['data']
-
- self.assertIs(type(actual_properties_mapped['target_lun']), int)
-
- def test_iscsi_connection_lun_id_type_dict(self):
- FAKE_LUN_ID = {'id': 'fake_id'}
-
- self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties,
- FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME,
- fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS,
- fake.ISCSI_FAKE_PORT)
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error()
def _create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
+ metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
-
+ if qos_policy_group_name is not None:
+ msg = _('Data ONTAP operating in 7-Mode does not support QoS '
+ 'policy groups.')
+ raise exception.VolumeDriverException(msg)
self.zapi_client.create_lun(
- volume_name, lun_name, size, metadata, qos_policy_group)
+ volume_name, lun_name, size, metadata, qos_policy_group_name)
self.vol_refresh_voluntary = True
return False
def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
+ qos_policy_group_name=None, src_block=0, dest_block=0,
+ block_count=0):
"""Clone LUN with the given handle to the new name."""
+ if qos_policy_group_name is not None:
+ msg = _('Data ONTAP operating in 7-Mode does not support QoS '
+ 'policy groups.')
+ raise exception.VolumeDriverException(msg)
+
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
(parent, _splitter, name) = path.rpartition('/')
"""Driver entry point for destroying existing volumes."""
super(NetAppBlockStorage7modeLibrary, self).delete_volume(volume)
self.vol_refresh_voluntary = True
+ LOG.debug('Deleted LUN with name %s', volume['name'])
def _is_lun_valid_on_storage(self, lun):
"""Validate LUN specific to storage system."""
return False
return True
- def _check_volume_type_for_lun(self, volume, lun, existing_ref):
- """Check if lun satisfies volume type."""
- extra_specs = na_utils.get_volume_extra_specs(volume)
- if extra_specs and extra_specs.pop('netapp:qos_policy_group', None):
+ def _check_volume_type_for_lun(self, volume, lun, existing_ref,
+ extra_specs):
+ """Check if LUN satisfies volume type."""
+ if extra_specs:
+ legacy_policy = extra_specs.get('netapp:qos_policy_group')
+ if legacy_policy is not None:
+ raise exception.ManageExistingVolumeTypeMismatch(
+ reason=_("Setting LUN QoS policy group is not supported "
+ "on this storage family and ONTAP version."))
+ volume_type = na_utils.get_volume_type_from_volume(volume)
+ if volume_type is None:
+ return
+ spec = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
+ if spec is not None:
raise exception.ManageExistingVolumeTypeMismatch(
- reason=_("Setting LUN QoS policy group is not supported"
- " on this storage family and ONTAP version."))
+ reason=_("Back-end QoS specs are not supported on this "
+ "storage family and ONTAP version."))
- def _get_preferred_target_from_list(self, target_details_list):
+ def _get_preferred_target_from_list(self, target_details_list,
+ filter=None):
# 7-mode iSCSI LIFs migrate from controller to controller
# in failover and flap operational state in transit, so
# we don't filter these on operational state.
return (super(NetAppBlockStorage7modeLibrary, self)
- ._get_preferred_target_from_list(target_details_list,
- filter=None))
+ ._get_preferred_target_from_list(target_details_list))
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
{'prop': prop, 'name': name})
def __str__(self, *args, **kwargs):
- return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
- % (self.handle, self.name, self.size, self.metadata)
+ return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % (
+ self.handle, self.name, self.size, self.metadata)
class NetAppBlockStorageLibrary(object):
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
- IGROUP_PREFIX = 'openstack-'
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows',
self.host_type = None
self.lookup_service = fczm_utils.create_lookup_service()
self.app_version = kwargs.get("app_version", "unknown")
- self.db = kwargs.get('db')
self.configuration = kwargs['configuration']
self.configuration.append_config_values(na_opts.netapp_connection_opts)
LOG.debug('create_volume on %s', volume['host'])
# get Data ONTAP volume name as pool name
- ontap_volume_name = volume_utils.extract_host(volume['host'],
- level='pool')
+ pool_name = volume_utils.extract_host(volume['host'], level='pool')
- if ontap_volume_name is None:
+ if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
lun_name = volume['name']
- # start with default size, get requested size
- default_size = units.Mi * 100 # 100 MB
- size = default_size if not int(volume['size'])\
- else int(volume['size']) * units.Gi
+ size = int(volume['size']) * units.Gi
metadata = {'OsType': self.lun_ostype,
'SpaceReserved': 'true',
- 'Path': '/vol/%s/%s' % (ontap_volume_name, lun_name)}
-
- extra_specs = na_utils.get_volume_extra_specs(volume)
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
+ 'Path': '/vol/%s/%s' % (pool_name, lun_name)}
- # warn on obsolete extra specs
- na_utils.log_extra_spec_warnings(extra_specs)
+ qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
+ qos_policy_group_name = (
+ na_utils.get_qos_policy_group_name_from_info(
+ qos_policy_group_info))
- self._create_lun(ontap_volume_name, lun_name, size,
- metadata, qos_policy_group)
- LOG.debug('Created LUN with name %s', lun_name)
-
- metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
- metadata['Volume'] = ontap_volume_name
+ try:
+ self._create_lun(pool_name, lun_name, size, metadata,
+ qos_policy_group_name)
+ except Exception:
+ LOG.exception(_LE("Exception creating LUN %(name)s in pool "
+ "%(pool)s."),
+ {'name': lun_name, 'pool': pool_name})
+ self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
+ msg = _("Volume %s could not be created.")
+ raise exception.VolumeBackendAPIException(data=msg % (
+ volume['name']))
+ LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s',
+ {'name': lun_name, 'qos': qos_policy_group_info})
+
+ metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name)
+ metadata['Volume'] = pool_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
+ def _setup_qos_for_volume(self, volume, extra_specs):
+ return None
+
+ def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
+ return
+
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
- self._clone_lun(lun.name, snapshot_name, 'false')
+ self._clone_lun(lun.name, snapshot_name, space_reserved='false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
- """Driver entry point for creating a new volume from a snapshot.
+ source = {'name': snapshot['name'], 'size': snapshot['volume_size']}
+ return self._clone_source_to_destination(source, volume)
- Many would call this "cloning" and in fact we use cloning to implement
- this feature.
- """
+ def create_cloned_volume(self, volume, src_vref):
+ src_lun = self._get_lun_from_table(src_vref['name'])
+ source = {'name': src_lun.name, 'size': src_vref['size']}
+ return self._clone_source_to_destination(source, volume)
- vol_size = volume['size']
- snap_size = snapshot['volume_size']
- snapshot_name = snapshot['name']
- new_name = volume['name']
- self._clone_lun(snapshot_name, new_name, 'true')
- if vol_size != snap_size:
- try:
- self.extend_volume(volume, volume['size'])
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(
- _LE("Resizing %s failed. Cleaning volume."), new_name)
- self.delete_volume(volume)
+ def _clone_source_to_destination(self, source, destination_volume):
+ source_size = source['size']
+ destination_size = destination_volume['size']
+
+ source_name = source['name']
+ destination_name = destination_volume['name']
+
+ extra_specs = na_utils.get_volume_extra_specs(destination_volume)
+
+ qos_policy_group_info = self._setup_qos_for_volume(
+ destination_volume, extra_specs)
+ qos_policy_group_name = (
+ na_utils.get_qos_policy_group_name_from_info(
+ qos_policy_group_info))
+
+ try:
+ self._clone_lun(source_name, destination_name,
+ space_reserved='true',
+ qos_policy_group_name=qos_policy_group_name)
+
+ if destination_size != source_size:
+
+ try:
+ self.extend_volume(
+ destination_volume, destination_size,
+ qos_policy_group_name=qos_policy_group_name)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(
+ _LE("Resizing %s failed. Cleaning volume."),
+ destination_volume['id'])
+ self.delete_volume(destination_volume)
+
+ except Exception:
+ LOG.exception(_LE("Exception cloning volume %(name)s from source "
+ "volume %(source)s."),
+ {'name': destination_name, 'source': source_name})
+
+ self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
+
+ msg = _("Volume %s could not be created from source volume.")
+ raise exception.VolumeBackendAPIException(
+ data=msg % destination_name)
def _create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
+ metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
raise NotImplementedError()
def _create_igroup_add_initiators(self, initiator_group_type,
host_os_type, initiator_list):
"""Creates igroup and adds initiators."""
- igroup_name = self.IGROUP_PREFIX + six.text_type(uuid.uuid4())
+ igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4())
self.zapi_client.create_igroup(igroup_name, initiator_group_type,
host_os_type)
for initiator in initiator_list:
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
+ qos_policy_group_name=None, src_block=0, dest_block=0,
+ block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
- def create_cloned_volume(self, volume, src_vref):
- """Creates a clone of the specified volume."""
- vol_size = volume['size']
- src_vol = self._get_lun_from_table(src_vref['name'])
- src_vol_size = src_vref['size']
- new_name = volume['name']
- self._clone_lun(src_vol.name, new_name, 'true')
- if vol_size != src_vol_size:
- try:
- self.extend_volume(volume, volume['size'])
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(
- _LE("Resizing %s failed. Cleaning volume."), new_name)
- self.delete_volume(volume)
-
def get_volume_stats(self, refresh=False):
"""Get volume stats.
def _update_volume_stats(self):
raise NotImplementedError()
- def extend_volume(self, volume, new_size):
+ def extend_volume(self, volume, new_size, qos_policy_group_name=None):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
int(new_size_bytes)):
self.zapi_client.do_direct_resize(path, new_size_bytes)
else:
- self._do_sub_clone_resize(path, new_size_bytes)
+ self._do_sub_clone_resize(
+ path, new_size_bytes,
+ qos_policy_group_name=qos_policy_group_name)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_LI("No need to extend volume %s"
break
return value
- def _do_sub_clone_resize(self, path, new_size_bytes):
+ def _do_sub_clone_resize(self, path, new_size_bytes,
+ qos_policy_group_name=None):
"""Does sub LUN clone after verification.
Clones the block ranges and swaps
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % name
- self.zapi_client.create_lun(vol_name, new_lun, new_size_bytes,
- metadata)
+ self.zapi_client.create_lun(
+ vol_name, new_lun, new_size_bytes, metadata,
+ qos_policy_group_name=qos_policy_group_name)
try:
- self._clone_lun(name, new_lun, block_count=block_count)
+ self._clone_lun(name, new_lun, block_count=block_count,
+ qos_policy_group_name=qos_policy_group_name)
+
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
block_count = ls / bs
return block_count
- def _check_volume_type_for_lun(self, volume, lun, existing_ref):
+ def _check_volume_type_for_lun(self, volume, lun, existing_ref,
+ extra_specs):
"""Checks if lun satifies the volume type."""
raise NotImplementedError()
source-name: complete lun path eg. /vol/vol0/lun.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
- self._check_volume_type_for_lun(volume, lun, existing_ref)
+
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
+ self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs)
+
+ qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
+ qos_policy_group_name = (
+ na_utils.get_qos_policy_group_name_from_info(
+ qos_policy_group_info))
+
path = lun.get_metadata_property('Path')
if lun.name == volume['name']:
+ new_path = path
LOG.info(_LI("LUN with given ref %s need not be renamed "
"during manage operation."), existing_ref)
else:
self.zapi_client.move_lun(path, new_path)
lun = self._get_existing_vol_with_manage_ref(
{'source-name': new_path})
+ if qos_policy_group_name is not None:
+ self.zapi_client.set_lun_qos_policy_group(new_path,
+ qos_policy_group_name)
self._add_lun_to_table(lun)
LOG.info(_LI("Manage operation completed for LUN with new path"
" %(path)s and uuid %(uuid)s."),
LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
- target_wwpns, initiator_target_map, num_paths = \
- self._build_initiator_target_map(connector)
+ target_wwpns, initiator_target_map, num_paths = (
+ self._build_initiator_target_map(connector))
if target_wwpns:
LOG.debug("Successfully fetched target details for LUN %(name)s "
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map"))
- target_wwpns, initiator_target_map, num_paths = \
- self._build_initiator_target_map(connector)
+ target_wwpns, initiator_target_map, num_paths = (
+ self._build_initiator_target_map(connector))
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
import six
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _
+from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import block_base
-from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
LOG = logging.getLogger(__name__)
+QOS_CLEANUP_INTERVAL_SECONDS = 60
class NetAppBlockStorageCmodeLibrary(block_base.
"""Check that the driver is working and can communicate."""
ssc_cmode.check_ssc_api_permissions(self.zapi_client)
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
+ self._start_periodic_tasks()
+
+ def _start_periodic_tasks(self):
+ # Start the task that harvests soft-deleted QoS policy groups.
+ harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
+ self.zapi_client.remove_unused_qos_policy_groups)
+ harvest_qos_periodic_task.start(
+ interval=QOS_CLEANUP_INTERVAL_SECONDS,
+ initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
def _create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
+ metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
self.zapi_client.create_lun(
- volume_name, lun_name, size, metadata, qos_policy_group)
+ volume_name, lun_name, size, metadata, qos_policy_group_name)
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(volume_name, self.vserver))
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
- if igroup_name.startswith(self.IGROUP_PREFIX):
+ if igroup_name.startswith(na_utils.OPENSTACK_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return igroup_name, lun_map['lun-id']
return None, None
def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
+ qos_policy_group_name=None, src_block=0, dest_block=0,
+ block_count=0):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
- src_block=0, dest_block=0, block_count=0)
+ qos_policy_group_name=qos_policy_group_name,
+ src_block=0, dest_block=0,
+ block_count=0)
LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
for vol in self.ssc_vols['all']:
pool = dict()
pool['pool_name'] = vol.id['name']
- pool['QoS_support'] = False
+ pool['QoS_support'] = True
pool['reserved_percentage'] = 0
# convert sizes to GB and de-rate by NetApp multiplier
if lun:
netapp_vol = lun.get_metadata_property('Volume')
super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
+ try:
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume)
+ except exception.Invalid:
+ # Delete even if there was invalid qos policy specified for the
+ # volume.
+ qos_policy_group_info = None
+ self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
if netapp_vol:
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
+ msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
+ LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
- def _check_volume_type_for_lun(self, volume, lun, existing_ref):
+ def _check_volume_type_for_lun(self, volume, lun, existing_ref,
+ extra_specs):
"""Check if LUN satisfies volume type."""
- extra_specs = na_utils.get_volume_extra_specs(volume)
- match_write = False
-
def scan_ssc_data():
volumes = ssc_cmode.get_volumes_for_specs(self.ssc_vols,
extra_specs)
self, self.zapi_client.get_connection(), self.vserver)
match_read = scan_ssc_data()
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
- if qos_policy_group:
- if match_read:
- try:
- path = lun.get_metadata_property('Path')
- self.zapi_client.set_lun_qos_policy_group(path,
- qos_policy_group)
- match_write = True
- except netapp_api.NaApiError as nae:
- LOG.error(_LE("Failure setting QoS policy group. %s"), nae)
- else:
- match_write = True
- if not (match_read and match_write):
+ if not match_read:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("LUN with given ref %(ref)s does not satisfy volume"
" type. Ensure LUN volume with ssc features is"
" present on vserver %(vs)s.")
% {'ref': existing_ref, 'vs': self.vserver}))
- def _get_preferred_target_from_list(self, target_details_list):
+ def _get_preferred_target_from_list(self, target_details_list,
+ filter=None):
# cDOT iSCSI LIFs do not migrate from controller to controller
# in failover. Rather, an iSCSI LIF must be configured on each
# controller and the initiator has to take responsibility for
return (super(NetAppBlockStorageCmodeLibrary, self)
._get_preferred_target_from_list(target_details_list,
filter=operational_addresses))
+
+ def _setup_qos_for_volume(self, volume, extra_specs):
+ try:
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume, extra_specs)
+ except exception.Invalid:
+ msg = _('Invalid QoS specification detected while getting QoS '
+ 'policy for volume %s') % volume['id']
+ raise exception.VolumeBackendAPIException(data=msg)
+ self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
+ return qos_policy_group_info
+
+ def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
+ self.zapi_client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info)
+
+ def unmanage(self, volume):
+ """Removes the specified volume from Cinder management.
+
+ Does not delete the underlying backend storage object.
+ """
+ try:
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume)
+ except exception.Invalid:
+ # Unmanage even if there was invalid qos policy specified for the
+ # volume.
+ qos_policy_group_info = None
+ self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
+ super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
return self.connection.invoke_successfully(request, enable_tunneling)
def create_lun(self, volume_name, lun_name, size, metadata,
- qos_policy_group=None):
+ qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
**{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
- if qos_policy_group:
- lun_create.add_new_child('qos-policy-group', qos_policy_group)
+ if qos_policy_group_name:
+ lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
+DELETED_PREFIX = 'deleted_cinder_'
class Client(client_base.Client):
return igroup_list
def clone_lun(self, volume, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
+ qos_policy_group_name=None, src_block=0, dest_block=0,
+ block_count=0):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
+ if qos_policy_group_name is not None:
+ clone_create.add_new_child('qos-policy-group-name',
+ qos_policy_group_name)
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
segments = int(math.ceil(block_count / float(bc_limit)))
return []
return attr_list.get_children()
- def file_assign_qos(self, flex_vol, qos_policy_group, file_path):
- """Retrieves LUN with specified args."""
- file_assign_qos = netapp_api.NaElement.create_node_with_children(
- 'file-assign-qos',
- **{'volume': flex_vol,
- 'qos-policy-group-name': qos_policy_group,
- 'file': file_path,
- 'vserver': self.vserver})
- self.connection.invoke_successfully(file_assign_qos, True)
+ def file_assign_qos(self, flex_vol, qos_policy_group_name, file_path):
+ """Assigns the named QoS policy-group to a file."""
+ api_args = {
+ 'volume': flex_vol,
+ 'qos-policy-group-name': qos_policy_group_name,
+ 'file': file_path,
+ 'vserver': self.vserver,
+ }
+ return self.send_request('file-assign-qos', api_args, False)
+
+ def provision_qos_policy_group(self, qos_policy_group_info):
+ """Create QOS policy group on the backend if appropriate."""
+ if qos_policy_group_info is None:
+ return
+
+ # Legacy QOS uses externally provisioned QOS policy group,
+ # so we don't need to create one on the backend.
+ legacy = qos_policy_group_info.get('legacy')
+ if legacy is not None:
+ return
+
+ spec = qos_policy_group_info.get('spec')
+ if spec is not None:
+ self.qos_policy_group_create(spec['policy_name'],
+ spec['max_throughput'])
+
+ def qos_policy_group_create(self, qos_policy_group_name, max_throughput):
+ """Creates a QOS policy group."""
+ api_args = {
+ 'policy-group': qos_policy_group_name,
+ 'max-throughput': max_throughput,
+ 'vserver': self.vserver,
+ }
+ return self.send_request('qos-policy-group-create', api_args, False)
+
+ def qos_policy_group_delete(self, qos_policy_group_name):
+ """Attempts to delete a QOS policy group."""
+ api_args = {
+ 'policy-group': qos_policy_group_name,
+ }
+ return self.send_request('qos-policy-group-delete', api_args, False)
+
+ def qos_policy_group_rename(self, qos_policy_group_name, new_name):
+ """Renames a QOS policy group."""
+ api_args = {
+ 'policy-group-name': qos_policy_group_name,
+ 'new-name': new_name,
+ }
+ return self.send_request('qos-policy-group-rename', api_args, False)
+
+ def mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
+ """Do (soft) delete of backing QOS policy group for a cinder volume."""
+ if qos_policy_group_info is None:
+ return
+
+ spec = qos_policy_group_info.get('spec')
+
+ # For cDOT we want to delete the QoS policy group that we created for
+ # this cinder volume. Because the QoS policy may still be "in use"
+ # after the zapi call to delete the volume itself returns successfully,
+ # we instead rename the QoS policy group using a specific pattern and
+ # later attempt on a best effort basis to delete any QoS policy groups
+ # matching that pattern.
+ if spec is not None:
+ current_name = spec['policy_name']
+ new_name = DELETED_PREFIX + current_name
+ try:
+ self.qos_policy_group_rename(current_name, new_name)
+ except netapp_api.NaApiError as ex:
+ msg = _LW('Rename failure in cleanup of cDOT QOS policy group '
+ '%(name)s: %(ex)s')
+ LOG.warning(msg, {'name': current_name, 'ex': ex})
+
+ # Attempt to delete any QoS policies named "delete-openstack-*".
+ self.remove_unused_qos_policy_groups()
+
+ def remove_unused_qos_policy_groups(self):
+ """Deletes all QOS policy groups that are marked for deletion."""
+ api_args = {
+ 'query': {
+ 'qos-policy-group-info': {
+ 'policy-group': '%s*' % DELETED_PREFIX,
+ 'vserver': self.vserver,
+ }
+ },
+ 'max-records': 3500,
+ 'continue-on-failure': 'true',
+ 'return-success-list': 'false',
+ 'return-failure-list': 'false',
+ }
+
+ try:
+ self.send_request('qos-policy-group-delete-iter', api_args, False)
+ except netapp_api.NaApiError as ex:
+ msg = 'Could not delete QOS policy groups. Details: %(ex)s'
+ msg_args = {'ex': ex}
+ LOG.debug(msg % msg_args)
def set_lun_qos_policy_group(self, path, qos_policy_group):
"""Sets qos_policy_group on a LUN."""
- set_qos_group = netapp_api.NaElement.create_node_with_children(
- 'lun-set-qos-policy-group',
- **{'path': path, 'qos-policy-group': qos_policy_group})
- self.connection.invoke_successfully(set_qos_group, True)
+ api_args = {
+ 'path': path,
+ 'qos-policy-group': qos_policy_group,
+ }
+ return self.send_request('lun-set-qos-policy-group', api_args)
def get_if_info_by_ip(self, ip):
"""Gets the network interface info by ip."""
attr_list = result.get_child_by_name('attributes-list')
return attr_list.get_children()
raise exception.NotFound(
- _('No interface found on cluster for ip %s') % (ip))
+ _('No interface found on cluster for ip %s') % ip)
def get_vol_by_junc_vserver(self, vserver, junction):
"""Gets the volume by junction path and vserver."""
from oslo_log import log as logging
from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
+ self.ssc_enabled = False
+
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
raise exception.VolumeBackendAPIException(data=msg)
super(NetApp7modeNfsDriver, self).check_for_setup_error()
- def create_volume(self, volume):
- """Creates a volume.
-
- :param volume: volume reference
- """
- LOG.debug('create_volume on %s', volume['host'])
- self._ensure_shares_mounted()
-
- # get share as pool name
- share = volume_utils.extract_host(volume['host'], level='pool')
-
- if share is None:
- msg = _("Pool is not available in the volume host field.")
- raise exception.InvalidHost(reason=msg)
+ def _clone_backing_file_for_volume(self, volume_name, clone_name,
+ volume_id, share=None):
+ """Clone backing file for Cinder volume."""
- volume['provider_location'] = share
- LOG.info(_LI('Creating volume at location %s'),
- volume['provider_location'])
-
- try:
- self._do_create_volume(volume)
- except Exception as ex:
- LOG.error(_LE("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s"),
- {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': ex})
- msg = _("Volume %s could not be created on shares.")
- raise exception.VolumeBackendAPIException(
- data=msg % (volume['name']))
-
- return {'provider_location': volume['provider_location']}
-
- def _clone_volume(self, volume_name, clone_name,
- volume_id, share=None):
- """Clones mounted volume with NetApp filer."""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
"""Checks if share is compatible with volume to host it."""
return self._is_share_eligible(share, volume['size'])
- def _check_volume_type(self, volume, share, file_name):
+ def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
- extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
+ volume_type = na_utils.get_volume_type_from_volume(volume)
+ if volume_type and 'qos_spec_id' in volume_type:
+ raise exception.ManageExistingVolumeTypeMismatch(
+ reason=_("QoS specs are not supported"
+ " on this storage family and ONTAP version."))
+
+ def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
+ """Set QoS policy on backend from volume type information."""
+ # 7-mode DOT does not support QoS.
+ return
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
-from oslo_utils import excutils
from oslo_utils import units
+import six
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
+from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
class NetAppNfsDriver(nfs.NfsDriver):
self._context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.zapi_client = None
+ self.ssc_enabled = False
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
"""
return volume['provider_location']
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+ LOG.debug('create_volume on %s', volume['host'])
+ self._ensure_shares_mounted()
+
+ # get share as pool name
+ pool_name = volume_utils.extract_host(volume['host'], level='pool')
+
+ if pool_name is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
+ try:
+ volume['provider_location'] = pool_name
+ LOG.debug('Using pool %s.', pool_name)
+ self._do_create_volume(volume)
+ self._do_qos_for_volume(volume, extra_specs)
+ return {'provider_location': volume['provider_location']}
+ except Exception:
+ LOG.exception(_LE("Exception creating vol %(name)s on "
+ "pool %(pool)s."),
+ {'name': volume['name'],
+ 'pool': volume['provider_location']})
+ # We need to set this for the model update in order for the
+ # manager to behave correctly.
+ volume['provider_location'] = None
+ finally:
+ if self.ssc_enabled:
+ self._update_stale_vols(self._get_vol_for_share(pool_name))
+
+ msg = _("Volume %(vol)s could not be created in pool %(pool)s.")
+ raise exception.VolumeBackendAPIException(data=msg % {
+ 'vol': volume['name'], 'pool': pool_name})
+
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
- vol_size = volume.size
- snap_size = snapshot.volume_size
+ source = {
+ 'name': snapshot['name'],
+ 'size': snapshot['volume_size'],
+ 'id': snapshot['volume_id'],
+ }
+ return self._clone_source_to_destination_volume(source, volume)
- self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
- share = self._get_volume_location(snapshot.volume_id)
- volume['provider_location'] = share
- path = self.local_path(volume)
- run_as_root = self._execute_as_root
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+ source = {'name': src_vref['name'],
+ 'size': src_vref['size'],
+ 'id': src_vref['id']}
+
+ return self._clone_source_to_destination_volume(source, volume)
+ def _clone_source_to_destination_volume(self, source, destination_volume):
+ share = self._get_volume_location(source['id'])
+
+ extra_specs = na_utils.get_volume_extra_specs(destination_volume)
+
+ try:
+ destination_volume['provider_location'] = share
+ self._clone_with_extension_check(
+ source, destination_volume)
+ self._do_qos_for_volume(destination_volume, extra_specs)
+ return {'provider_location': destination_volume[
+ 'provider_location']}
+ except Exception:
+ LOG.exception(_LE("Exception creating volume %(name)s from source "
+ "%(source)s on share %(share)s."),
+ {'name': destination_volume['id'],
+ 'source': source['name'],
+ 'share': destination_volume['provider_location']})
+ msg = _("Volume %s could not be created on shares.")
+ raise exception.VolumeBackendAPIException(data=msg % (
+ destination_volume['id']))
+
+ def _clone_with_extension_check(self, source, destination_volume):
+ source_size = source['size']
+ source_id = source['id']
+ source_name = source['name']
+ destination_volume_size = destination_volume['size']
+ self._clone_backing_file_for_volume(source_name,
+ destination_volume['name'],
+ source_id)
+ path = self.local_path(destination_volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions(path)
- if vol_size != snap_size:
+ if destination_volume_size != source_size:
try:
- self.extend_volume(volume, vol_size)
+ self.extend_volume(destination_volume,
+ destination_volume_size)
except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(
- _LE("Resizing %s failed. Cleaning volume."),
- volume.name)
- self._execute('rm', path, run_as_root=run_as_root)
+ LOG.error(_LE("Resizing %s failed. Cleaning "
+ "volume."), destination_volume['name'])
+ self._cleanup_volume_on_failure(destination_volume)
+ raise exception.CinderException(
+ _("Resizing clone %s failed.")
+ % destination_volume['name'])
+ else:
+ raise exception.CinderException(_("NFS file %s not discovered.")
+ % destination_volume['name'])
+
+ def _cleanup_volume_on_failure(self, volume):
+ LOG.debug('Cleaning up, failed operation on %s', volume['name'])
+ vol_path = self.local_path(volume)
+ if os.path.exists(vol_path):
+ LOG.debug('Found %s, deleting ...', vol_path)
+ self._delete_file_at_path(vol_path)
else:
- raise exception.CinderException(
- _("NFS file %s not discovered.") % volume['name'])
+ LOG.debug('Could not find %s, continuing ...', vol_path)
- return {'provider_location': volume['provider_location']}
+ def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
+ """Set QoS policy on backend from volume type information."""
+ raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
- self._clone_volume(snapshot['volume_name'],
- snapshot['name'],
- snapshot['volume_id'])
+ self._clone_backing_file_for_volume(snapshot['volume_name'],
+ snapshot['name'],
+ snapshot['volume_id'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
- def _clone_volume(self, volume_name, clone_name, volume_id, share=None):
- """Clones mounted volume using NetApp API."""
+ def _clone_backing_file_for_volume(self, volume_name, clone_name,
+ volume_id, share=None):
+ """Clone backing file for Cinder volume."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
- def create_cloned_volume(self, volume, src_vref):
- """Creates a clone of the specified volume."""
- vol_size = volume.size
- src_vol_size = src_vref.size
- self._clone_volume(src_vref.name, volume.name, src_vref.id)
- share = self._get_volume_location(src_vref.id)
- volume['provider_location'] = share
- path = self.local_path(volume)
-
- if self._discover_file_till_timeout(path):
- self._set_rw_permissions(path)
- if vol_size != src_vol_size:
- try:
- self.extend_volume(volume, vol_size)
- except Exception:
- LOG.error(
- _LE("Resizing %s failed. Cleaning volume."),
- volume.name)
- self._execute('rm', path,
- run_as_root=self._execute_as_root)
- raise
- else:
- raise exception.CinderException(
- _("NFS file %s not discovered.") % volume['name'])
-
- return {'provider_location': volume['provider_location']}
-
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
super(NetAppNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info(_LI('Copied image to volume %s using regular download.'),
- volume['name'])
+ volume['id'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
LOG.info(_LI('Cloning from cache to destination %s'), dst)
- self._clone_volume(src, dst, volume_id=None, share=share)
+ self._clone_backing_file_for_volume(src, dst, volume_id=None,
+ share=share)
_do_clone()
@utils.synchronized('clean_cache')
@utils.synchronized(f[0], external=True)
def _do_delete():
- if self._delete_file(file_path):
+ if self._delete_file_at_path(file_path):
return True
return False
if bytes_to_free <= 0:
return
- def _delete_file(self, path):
+ def _delete_file_at_path(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
image_id = image_meta['id']
cloned = False
post_clone = False
+
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
if cloned:
+ self._do_qos_for_volume(volume, extra_specs)
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e
LOG.info(_LI('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s'),
{'image_id': image_id, 'msg': msg})
- vol_path = self.local_path(volume)
- volume['provider_location'] = None
- if os.path.exists(vol_path):
- self._delete_file(vol_path)
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
image_location = self._construct_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
run_as_root = self._execute_as_root
-
if share and self._is_share_vol_compatible(volume, share):
LOG.debug('Share is cloneable %s', share)
volume['provider_location'] = share
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
- self._clone_volume(
+ self._clone_backing_file_for_volume(
img_file, volume['name'],
volume_id=None, share=share)
cloned = True
export_path = nfs_share.rsplit(':', 1)[1]
return self.zapi_client.get_flexvol_capacity(export_path)
- def _check_volume_type(self, volume, share, file_name):
+ def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Match volume type for share file."""
raise NotImplementedError()
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s",
{'vol': volume['id'],
'ref': existing_vol_ref['source-name']})
- self._check_volume_type(volume, nfs_share, vol_path)
+
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+
+ self._check_volume_type(volume, nfs_share, vol_path, extra_specs)
+
if vol_path == volume['name']:
LOG.debug("New Cinder volume %s name matches reference name: "
"no need to rename.", volume['name'])
{'name': existing_vol_ref['source-name'],
'msg': err})
raise exception.VolumeBackendAPIException(data=exception_msg)
+ try:
+ self._do_qos_for_volume(volume, extra_specs, cleanup=False)
+ except Exception as err:
+ exception_msg = (_("Failed to set QoS for existing volume "
+ "%(name)s, Error msg: %(msg)s.") %
+ {'name': existing_vol_ref['source-name'],
+ 'msg': six.text_type(err)})
+ raise exception.VolumeBackendAPIException(data=exception_msg)
return {'provider_location': nfs_share}
def manage_existing_get_size(self, volume, existing_vol_ref):
:param volume: Cinder volume to unmanage
"""
- CONF = cfg.CONF
vol_str = CONF.volume_name_template % volume['id']
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is "
"no longer being managed."), {'cr': vol_path})
+
+ @utils.synchronized('update_stale')
+ def _update_stale_vols(self, volume=None, reset=False):
+ """Populates stale vols with vol and returns set copy."""
+ raise NotImplementedError
+
+ def _get_vol_for_share(self, nfs_share):
+ """Gets the ssc vol with given share."""
+ raise NotImplementedError
import uuid
from oslo_log import log as logging
+from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
+from cinder.openstack.common import loopingcall
from cinder import utils
-from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
LOG = logging.getLogger(__name__)
+QOS_CLEANUP_INTERVAL_SECONDS = 60
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
"""Check that the driver is working and can communicate."""
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
ssc_cmode.check_ssc_api_permissions(self.zapi_client)
+ self._start_periodic_tasks()
- def create_volume(self, volume):
- """Creates a volume.
-
- :param volume: volume reference
- """
- LOG.debug('create_volume on %s', volume['host'])
- self._ensure_shares_mounted()
-
- # get share as pool name
- share = volume_utils.extract_host(volume['host'], level='pool')
-
- if share is None:
- msg = _("Pool is not available in the volume host field.")
- raise exception.InvalidHost(reason=msg)
-
- extra_specs = na_utils.get_volume_extra_specs(volume)
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
-
- # warn on obsolete extra specs
- na_utils.log_extra_spec_warnings(extra_specs)
-
+ def _do_qos_for_volume(self, volume, extra_specs, cleanup=True):
try:
- volume['provider_location'] = share
- LOG.info(_LI('casted to %s'), volume['provider_location'])
- self._do_create_volume(volume)
- if qos_policy_group:
- self._set_qos_policy_group_on_volume(volume, share,
- qos_policy_group)
- return {'provider_location': volume['provider_location']}
- except Exception as ex:
- LOG.error(_LE("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s"),
- {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': ex})
- volume['provider_location'] = None
- finally:
- if self.ssc_enabled:
- self._update_stale_vols(self._get_vol_for_share(share))
-
- msg = _("Volume %s could not be created on shares.")
- raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
-
- def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume, extra_specs)
+ self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
+ self._set_qos_policy_group_on_volume(volume, qos_policy_group_info)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE("Setting QoS for %s failed"), volume['id'])
+ if cleanup:
+ LOG.debug("Cleaning volume %s", volume['id'])
+ self._cleanup_volume_on_failure(volume)
+
+ def _start_periodic_tasks(self):
+ # Start the task that harvests soft-deleted QoS policy groups.
+ harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
+ self.zapi_client.remove_unused_qos_policy_groups)
+ harvest_qos_periodic_task.start(
+ interval=QOS_CLEANUP_INTERVAL_SECONDS,
+ initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
+
+ def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info):
+ if qos_policy_group_info is None:
+ return
+ qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info(
+ qos_policy_group_info)
+ if qos_policy_group_name is None:
+ return
target_path = '%s' % (volume['name'])
+ share = volume_utils.extract_host(volume['host'], level='pool')
export_path = share.split(':')[1]
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
export_path)
self.zapi_client.file_assign_qos(flex_vol_name,
- qos_policy_group,
+ qos_policy_group_name,
target_path)
- def _check_volume_type(self, volume, share, file_name):
+ def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Match volume type for share file."""
- extra_specs = na_utils.get_volume_extra_specs(volume)
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
if not self._is_share_vol_type_match(volume, share):
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Volume type does not match for share %s."),
share))
- if qos_policy_group:
- try:
- vserver, flex_vol_name = self._get_vserver_and_exp_vol(
- share=share)
- self.zapi_client.file_assign_qos(flex_vol_name,
- qos_policy_group,
- file_name)
- except na_api.NaApiError as ex:
- LOG.exception(_LE('Setting file QoS policy group failed. %s'),
- ex)
- raise exception.NetAppDriverException(
- reason=(_('Setting file QoS policy group failed. %s'), ex))
-
- def _clone_volume(self, volume_name, clone_name,
- volume_id, share=None):
- """Clones mounted volume on NetApp Cluster."""
+
+ def _clone_backing_file_for_volume(self, volume_name, clone_name,
+ volume_id, share=None):
+ """Clone backing file for Cinder volume."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver)
pool = dict()
pool['pool_name'] = nfs_share
- pool['QoS_support'] = False
+ pool['QoS_support'] = True
pool.update(capacity)
# add SSC content if available
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
- for file in old_files:
- path = '/vol/%s/%s' % (exp_volume, file)
+ for old_file in old_files:
+ path = '/vol/%s/%s' % (exp_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path, vserver)
- file_list.append((file, u_bytes))
+ file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
"""Deletes a logical volume."""
share = volume['provider_location']
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
+ try:
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume)
+ self.zapi_client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info)
+ except Exception:
+ # Don't blow up here if something went wrong de-provisioning the
+ # QoS policy for the volume.
+ pass
self._post_prov_deprov_in_ssc(share)
def delete_snapshot(self, snapshot):
{'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
- self._delete_file(dst_img_conv_local)
+ self._delete_file_at_path(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
- self._delete_file(dst_img_local)
+ self._delete_file_at_path(dst_img_local)
+
+ def unmanage(self, volume):
+ """Removes the specified volume from Cinder management.
+
+ Does not delete the underlying backend storage object. A log entry
+ will be made to notify the Admin that the volume is no longer being
+ managed.
+
+ :param volume: Cinder volume to unmanage
+ """
+ try:
+ qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
+ volume)
+ self.zapi_client.mark_qos_policy_group_for_deletion(
+ qos_policy_group_info)
+ except Exception:
+ # Unmanage even if there was a problem deprovisioning the
+ # associated qos policy group.
+ pass
+
+ super(NetAppCmodeNfsDriver, self).unmanage(volume)
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
def get_volumes_for_specs(ssc_vols, specs):
"""Shortlists volumes for extra specs provided."""
- if specs is None or not isinstance(specs, dict):
+ if specs is None or specs == {} or not isinstance(specs, dict):
return ssc_vols['all']
result = copy.deepcopy(ssc_vols['all'])
raid_type = specs.get('netapp:raid_type')
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LW, _LI
+from cinder.i18n import _, _LE, _LW, _LI
from cinder import utils
from cinder import version
+from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
+OPENSTACK_PREFIX = 'openstack-'
OBSOLETE_SSC_SPECS = {'netapp:raid_type': 'netapp_raid_type',
'netapp:disk_type': 'netapp_disk_type'}
DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored',
'netapp_nodedup': 'netapp_dedup',
'netapp_nocompression': 'netapp_compression',
'netapp_thick_provisioned': 'netapp_thin_provisioned'}
+QOS_KEYS = frozenset(
+ ['maxIOPS', 'total_iops_sec', 'maxBPS', 'total_bytes_sec'])
+BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
def validate_instantiation(**kwargs):
"""Provides extra specs associated with volume."""
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id')
- specs = None
- if type_id is not None:
- volume_type = volume_types.get_volume_type(ctxt, type_id)
- specs = volume_type.get('extra_specs')
- return specs
+ if type_id is None:
+ return {}
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ if volume_type is None:
+ return {}
+ extra_specs = volume_type.get('extra_specs', {})
+ log_extra_spec_warnings(extra_specs)
+ return extra_specs
def resolve_hostname(hostname):
}
+def validate_qos_spec(qos_spec):
+ """Check validity of Cinder qos spec for our backend."""
+ if qos_spec is None:
+ return
+ normalized_qos_keys = [key.lower() for key in QOS_KEYS]
+ keylist = []
+ for key, value in six.iteritems(qos_spec):
+ lower_case_key = key.lower()
+ if lower_case_key not in normalized_qos_keys:
+ msg = _('Unrecognized QOS keyword: "%s"') % key
+ raise exception.Invalid(msg)
+ keylist.append(lower_case_key)
+ # Modify the following check when we allow multiple settings in one spec.
+ if len(keylist) > 1:
+ msg = _('Only one limit can be set in a QoS spec.')
+ raise exception.Invalid(msg)
+
+
+def get_volume_type_from_volume(volume):
+ """Provides volume type associated with volume."""
+ type_id = volume.get('volume_type_id')
+ if type_id is None:
+ return {}
+ ctxt = context.get_admin_context()
+ return volume_types.get_volume_type(ctxt, type_id)
+
+
+def map_qos_spec(qos_spec, volume):
+ """Map Cinder QOS spec to limit/throughput-value as used in client API."""
+ if qos_spec is None:
+ return None
+ qos_spec = map_dict_to_lower(qos_spec)
+ spec = dict(policy_name=get_qos_policy_group_name(volume),
+ max_throughput=None)
+ # IOPS and BPS specifications are exclusive of one another.
+ if 'maxiops' in qos_spec or 'total_iops_sec' in qos_spec:
+ spec['max_throughput'] = '%siops' % qos_spec['maxiops']
+ elif 'maxbps' in qos_spec or 'total_bytes_sec' in qos_spec:
+ spec['max_throughput'] = '%sB/s' % qos_spec['maxbps']
+ return spec
+
+
+def map_dict_to_lower(input_dict):
+ """Return an equivalent to the input dictionary with lower-case keys."""
+ lower_case_dict = {}
+ for key in input_dict:
+ lower_case_dict[key.lower()] = input_dict[key]
+ return lower_case_dict
+
+
+def get_qos_policy_group_name(volume):
+ """Return the name of backend QOS policy group based on its volume id."""
+ if 'id' in volume:
+ return OPENSTACK_PREFIX + volume['id']
+ return None
+
+
+def get_qos_policy_group_name_from_info(qos_policy_group_info):
+ """Return the name of a QOS policy group given qos policy group info."""
+ if qos_policy_group_info is None:
+ return None
+ legacy = qos_policy_group_info.get('legacy')
+ if legacy is not None:
+ return legacy['policy_name']
+ spec = qos_policy_group_info.get('spec')
+ if spec is not None:
+ return spec['policy_name']
+ return None
+
+
+def get_valid_qos_policy_group_info(volume, extra_specs=None):
+ """Given a volume, return information for QOS provisioning."""
+ info = dict(legacy=None, spec=None)
+ try:
+ volume_type = get_volume_type_from_volume(volume)
+ except KeyError:
+ LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id'])
+ return info
+ if volume_type is None:
+ return info
+ if extra_specs is None:
+ extra_specs = volume_type.get('extra_specs', {})
+ info['legacy'] = get_legacy_qos_policy(extra_specs)
+ info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume,
+ volume_type)
+ msg = 'QoS policy group info for volume %(vol)s: %(info)s'
+ LOG.debug(msg, {'vol': volume['name'], 'info': info})
+ check_for_invalid_qos_spec_combination(info, volume_type)
+ return info
+
+
+def get_valid_backend_qos_spec_from_volume_type(volume, volume_type):
+ """Given a volume type, return the associated Cinder QoS spec."""
+ spec_key_values = get_backend_qos_spec_from_volume_type(volume_type)
+ if spec_key_values is None:
+ return None
+ validate_qos_spec(spec_key_values)
+ return map_qos_spec(spec_key_values, volume)
+
+
+def get_backend_qos_spec_from_volume_type(volume_type):
+ qos_specs_id = volume_type.get('qos_specs_id')
+ if qos_specs_id is None:
+ return None
+ ctxt = context.get_admin_context()
+ qos_spec = qos_specs.get_qos_specs(ctxt, qos_specs_id)
+ if qos_spec is None:
+ return None
+ consumer = qos_spec['consumer']
+ # Front end QoS specs are handled by libvirt and we ignore them here.
+ if consumer not in BACKEND_QOS_CONSUMERS:
+ return None
+ spec_key_values = qos_spec['specs']
+ return spec_key_values
+
+
+def check_for_invalid_qos_spec_combination(info, volume_type):
+ """Invalidate QOS spec if both legacy and non-legacy info is present."""
+ if info['legacy'] and info['spec']:
+ msg = _('Conflicting QoS specifications in volume type '
+ '%s: when QoS spec is associated to volume '
+ 'type, legacy "netapp:qos_policy_group" is not allowed in '
+ 'the volume type extra specs.') % volume_type['id']
+ raise exception.Invalid(msg)
+
+
+def get_legacy_qos_policy(extra_specs):
+ """Return legacy qos policy information if present in extra specs."""
+ external_policy_name = extra_specs.get('netapp:qos_policy_group')
+ if external_policy_name is None:
+ return None
+ return dict(policy_name=external_policy_name)
+
+
class hashabledict(dict):
"""A hashable dictionary that is comparable (i.e. in unit tests, etc.)"""
def __hash__(self):