'123456789054321': ['1122334455667777']}
POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
- '-userCap', '-availableCap', '-state')
+ '-userCap', '-availableCap',
+ '-state', '-prcntFullThreshold')
POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
'unit_test_pool', '-availableCap',
- '-userCap', '-fastcache', '-state',
- '-subscribedCap')
+ '-userCap', '-state',
+ '-subscribedCap',
+ '-prcntFullThreshold',
+ '-fastcache')
def POOL_GET_ALL_CMD(self, withfastcache=False):
if withfastcache:
return ('storagepool', '-list', '-availableCap',
- '-userCap', '-fastcache', '-state', '-subscribedCap')
+ '-userCap', '-state', '-subscribedCap',
+ '-prcntFullThreshold',
+ '-fastcache')
else:
return ('storagepool', '-list', '-availableCap',
- '-userCap', '-state', '-subscribedCap')
+ '-userCap', '-state', '-subscribedCap',
+ '-prcntFullThreshold')
def POOL_GET_ALL_RESULT(self, withfastcache=False):
if withfastcache:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
else:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
for i, stat in enumerate(states):
out = ("Pool Name: Pool_" + str(i) + "\n"
"Pool ID: " + str(i) + "\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
'-tieringPolicy', 'noMovement']}
def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
- poll=True):
+ ignore_thresholds=False, poll=True):
initial = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
initial.extend(self.provisioning_values['thick'])
if tiering:
initial.extend(self.tiering_values[tiering])
+ if ignore_thresholds:
+ initial.append('-ignoreThresholds')
return tuple(initial)
def CHECK_FASTCACHE_CMD(self, storage_pool):
"Current Operation to complete 'Preparing' "
"and retry the operation. (0x712d8e0e)", 14)
- POOL_PROPERTY = ("""\
-Pool Name: unit_test_pool
-Pool ID: 1
-User Capacity (Blocks): 6881061888
-User Capacity (GBs): 3281.146
-Available Capacity (Blocks): 6832207872
-Available Capacity (GBs): 3257.851
-State: Ready
-
-""", 0)
+ POOL_PROPERTY = (
+ "Pool Name: unit_test_pool\n"
+ "Pool ID: 1\n"
+ "Percent Full Threshold: 70\n"
+ "User Capacity (Blocks): 6881061888\n"
+ "User Capacity (GBs): 3281.146\n"
+ "Available Capacity (Blocks): 6832207872\n"
+ "Available Capacity (GBs): 3257.851\n"
+ "State: Ready\n"
+ "\n", 0)
POOL_PROPERTY_W_FASTCACHE = (
"Pool Name: unit_test_pool\n"
"Pool ID: 1\n"
+ "Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6832207872\n"
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
- # set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
- self.configuration.default_timeout = 0.0002
+ self.configuration.default_timeout = 1
self.configuration.initiator_auto_registration = True
self.configuration.check_max_pool_luns_threshold = False
self.stubs.Set(self.configuration, 'safe_get',
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+ self.configuration.ignore_pool_full_threshold = False
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = self.generate_driver(self.configuration)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
- 'thick', None, False)),
+ 'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
+ @mock.patch(
+ "eventlet.event.Event.wait",
+ mock.Mock(return_value=None))
+ def test_create_volume_ignore_thresholds(self):
+ self.configuration.ignore_pool_full_threshold = True
+ fake_cli = self.driverSetup()
+ self.driver.create_volume(self.testData.test_volume)
+ expect_cmd = [
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol1', 1,
+ 'unit_test_pool',
+ 'thick', None,
+ ignore_thresholds=True, poll=False)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False)]
+
+ fake_cli.assert_has_calls(expect_cmd)
+
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', None, False)),
+ 'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'thin', None, False)),
+ 'thin', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'thick', None, False)),
+ 'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', 'highestavailable', False)),
+ 'compressed', 'highestavailable', poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'deduplicated', None, False))]
+ 'deduplicated', None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- None, 'auto', False))]
+ None, 'auto', poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
expected_pool_stats = {
'free_capacity_gb': 3105.303,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
self.assertEqual(expected_pool_stats, pool_stats)
+ def test_get_volume_stats_ignore_threshold(self):
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_GET_ALL_CMD(True)]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_GET_ALL_RESULT(True)]
+ self.driverSetup(commands, results)
+ self.driver.cli.ignore_pool_full_threshold = True
+ stats = self.driver.get_volume_stats(True)
+
+ pool_stats = stats['pools'][0]
+ self.assertEqual(2, pool_stats['reserved_percentage'])
+
+ def test_get_volume_stats_reserved_percentage_from_conf(self):
+ commands = [self.testData.NDU_LIST_CMD,
+ self.testData.POOL_GET_ALL_CMD(True)]
+ results = [self.testData.NDU_LIST_RESULT,
+ self.testData.POOL_GET_ALL_RESULT(True)]
+ self.configuration.reserved_percentage = 22
+ self.driverSetup(commands, results)
+ self.driver.cli.ignore_pool_full_threshold = True
+ stats = self.driver.get_volume_stats(True)
+
+ pool_stats = stats['pools'][0]
+ self.assertEqual(22, pool_stats['reserved_percentage'])
+
def test_get_volume_stats_too_many_luns(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True),
def test_create_volume_cli_failed(self):
commands = [self.testData.LUN_CREATION_CMD(
- 'failed_vol1', 1, 'unit_test_pool', None, None, False)]
+ 'failed_vol1', 1, 'unit_test_pool', None, None, poll=False)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
- 'failed_vol1', 1, 'unit_test_pool', None, None, False))]
+ 'failed_vol1', 1, 'unit_test_pool', None, None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
def test_create_faulted_volume(self):
volume_name = 'faulted_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
- volume_name, 1, 'unit_test_pool', None, None, False)
+ volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list_preparing]
results = [SUCCEED,
self.driver.create_volume(faulted_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
- volume_name, 1, 'unit_test_pool', None, None, False)),
+ volume_name, 1, 'unit_test_pool', None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
def test_create_offline_volume(self):
volume_name = 'offline_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
- volume_name, 1, 'unit_test_pool', None, None, False)
+ volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list]
results = [SUCCEED,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
- fake_cli = self.driverSetup(commands, results)
+ fake_cli1 = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
- expect_cmd = [
+ expect_cmd1 = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
- fake_cli.assert_has_calls(expect_cmd)
+ fake_cli1.assert_has_calls(expect_cmd1)
+
+ self.configuration.ignore_pool_full_threshold = True
+ fake_cli2 = self.driverSetup(commands, results)
+ self.driver.create_volume_from_snapshot(self.testData.test_volume2,
+ self.testData.test_snapshot)
+ expect_cmd2 = [
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol2_dest', 1, 'unit_test_pool', None, None,
+ ignore_thresholds=True))]
+ fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
- fake_cli = self.driverSetup(commands, results)
+ fake_cli1 = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
new_type_data,
diff_data,
host_test_data)
- expect_cmd = [
+ expect_cmd1 = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.MIGRATION_CMD(1, None),
retry_disable=True,
poll=True)]
- fake_cli.assert_has_calls(expect_cmd)
+ fake_cli1.assert_has_calls(expect_cmd1)
+
+ self.configuration.ignore_pool_full_threshold = True
+ fake_cli2 = self.driverSetup(commands, results)
+ self.driver.cli.enablers = ['-Compression',
+ '-Deduplication',
+ '-ThinProvisioning',
+ '-FAST']
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
+ return_value={'array_serial': "FNM00124500890"})
+
+ self.driver.retype(None, self.testData.test_volume3,
+ new_type_data,
+ diff_data,
+ host_test_data)
+ expect_cmd2 = [
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None,
+ ignore_thresholds=True))]
+ fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
'provisioned_capacity_gb': 8,
'pool_name': "unit_test_pool",
'fast_cache_enabled': 'True',
- 'state': 'Ready'}])
+ 'state': 'Ready',
+ 'pool_full_threshold': 70.0}])
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
- None, None, False)),
+ None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'deduplicated', None, False)),
+ 'deduplicated', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', None, False)),
+ 'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
expected_pool_stats = {
'free_capacity_gb': 3105.303,
- 'reserved_percentage': 2,
+ 'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
'By default, the value is False.'),
cfg.BoolOpt('force_delete_lun_in_storagegroup',
default=False,
- help='Delete a LUN even if it is in Storage Groups.')
+ help='Delete a LUN even if it is in Storage Groups.'),
+ cfg.BoolOpt('ignore_pool_full_threshold',
+ default=False,
+ help='Force LUN creation even if '
+ 'the full threshold of pool is reached.')
]
CONF.register_opts(loc_opts)
'Total Subscribed Capacity *\(GBs\) *:\s*(.*)\s*',
'provisioned_capacity_gb',
float)
+ POOL_FULL_THRESHOLD = PropertyDescriptor(
+ '-prcntFullThreshold',
+ 'Percent Full Threshold:\s*(.*)\s*',
+ 'pool_full_threshold',
+ lambda value: int(value))
- POOL_ALL = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY, POOL_STATE]
+ POOL_ALL = [POOL_TOTAL_CAPACITY,
+ POOL_FREE_CAPACITY,
+ POOL_STATE,
+ POOL_FULL_THRESHOLD]
MAX_POOL_LUNS = PropertyDescriptor(
'-maxPoolLUNs',
def create_lun_with_advance_feature(self, pool, name, size,
provisioning, tiering,
consistencygroup_id=None,
+ ignore_thresholds=False,
poll=True):
command_create_lun = ['lun', '-create',
'-capacity', size,
# tiering
if tiering:
command_create_lun.extend(self.tiering_values[tiering])
+ if ignore_thresholds:
+ command_create_lun.append('-ignoreThresholds')
# create lun
data = self.create_lun_by_cmd(command_create_lun, name)
self.configuration.force_delete_lun_in_storagegroup)
if self.force_delete_lun_in_sg:
LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
+
self.max_over_subscription_ratio = (
self.configuration.max_over_subscription_ratio)
+ self.ignore_pool_full_threshold = (
+ self.configuration.ignore_pool_full_threshold)
+ if self.ignore_pool_full_threshold:
+ LOG.warning(_LW("ignore_pool_full_threshold: True. "
+ "LUN creation will still be forced "
+ "even if the pool full threshold is exceeded."))
+ self.reserved_percentage = self.configuration.reserved_percentage
def _get_managed_storage_pools(self, pools):
storage_pools = set()
'provisioning': provisioning,
'tiering': tiering,
'volume_size': volume_size,
- 'client': self._client
+ 'client': self._client,
+ 'ignore_pool_full_threshold': self.ignore_pool_full_threshold
}
return store_spec
data = self._client.create_lun_with_advance_feature(
pool, volume_name, volume_size,
- provisioning, tiering, volume['consistencygroup_id'], False)
+ provisioning, tiering, volume['consistencygroup_id'],
+ ignore_thresholds=self.ignore_pool_full_threshold,
+ poll=False)
model_update = {'provider_location':
self._build_provider_location_for_lun(data['lun_id'])}
data = self._client.create_lun_with_advance_feature(
target_pool_name, new_volume_name, volume['size'],
- provisioning, tiering)
+ provisioning, tiering,
+ ignore_thresholds=self.ignore_pool_full_threshold)
dst_id = data['lun_id']
moved = self._client.migrate_lun_with_verification(
pool_stats['total_capacity_gb'] = pool['total_capacity_gb']
pool_stats['provisioned_capacity_gb'] = (
pool['provisioned_capacity_gb'])
- pool_stats['reserved_percentage'] = 0
# Handle pool state Initializing, Ready, Faulted, Offline or Deleting.
if pool['state'] in ('Initializing', 'Offline', 'Deleting'):
'state': pool['state']})
else:
pool_stats['free_capacity_gb'] = pool['free_capacity_gb']
- # Some extra capacity will be used by meta data of pool LUNs.
- # The overhead is about LUN_Capacity * 0.02 + 3 GB
- # reserved_percentage will be used to make sure the scheduler
- # takes the overhead into consideration.
- # Assume that all the remaining capacity is to be used to create
- # a thick LUN, reserved_percentage is estimated as follows:
- reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
- (1.02 * pool['total_capacity_gb'])) * 100)
- pool_stats['reserved_percentage'] = int(math.ceil
- (min(reserved, 100)))
if self.check_max_pool_luns_threshold:
pool_feature = self._client.get_pool_feature_properties(
poll=False) if not pool_feature else pool_feature
pool_feature['max_pool_luns'])
pool_stats['free_capacity_gb'] = 0
+ if not self.reserved_percentage:
+ # Since the admin is not sure of what value is proper,
+ # the driver will calculate the recommended value.
+
+ # Some extra capacity will be used by meta data of pool LUNs.
+ # The overhead is about LUN_Capacity * 0.02 + 3 GB
+ # reserved_percentage will be used to make sure the scheduler
+ # takes the overhead into consideration.
+ # Assume that all the remaining capacity is to be used to create
+ # a thick LUN, reserved_percentage is estimated as follows:
+ reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+ (1.02 * pool['total_capacity_gb'])) * 100)
+ # Take pool full threshold into consideration
+ if not self.ignore_pool_full_threshold:
+ reserved += 100 - pool['pool_full_threshold']
+ pool_stats['reserved_percentage'] = int(math.ceil(min(reserved,
+ 100)))
+ else:
+ pool_stats['reserved_percentage'] = self.reserved_percentage
+
array_serial = self.get_array_serial()
pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
{'pool_name': pool['pool_name'],
'volume_size': volume['size'],
'provisioning': provisioning,
'tiering': tiering,
+ 'ignore_pool_full_threshold': self.ignore_pool_full_threshold
}
work_flow.add(
CreateSMPTask(name="CreateSMPTask%s" % i,
if self.protocol == 'iSCSI':
self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
+ properties = [self._client.POOL_FREE_CAPACITY,
+ self._client.POOL_TOTAL_CAPACITY,
+ self._client.POOL_STATE,
+ self._client.POOL_SUBSCRIBED_CAPACITY,
+ self._client.POOL_FULL_THRESHOLD]
if '-FASTCache' in self.enablers:
- properties = [self._client.POOL_FREE_CAPACITY,
- self._client.POOL_TOTAL_CAPACITY,
- self._client.POOL_FAST_CACHE,
- self._client.POOL_STATE,
- self._client.POOL_SUBSCRIBED_CAPACITY]
- else:
- properties = [self._client.POOL_FREE_CAPACITY,
- self._client.POOL_TOTAL_CAPACITY,
- self._client.POOL_STATE,
- self._client.POOL_SUBSCRIBED_CAPACITY]
+ properties.append(self._client.POOL_FAST_CACHE)
+
pool_list = self._client.get_pool_list(properties, False)
if self.storage_pools:
inject=inject)
def execute(self, client, pool_name, dest_vol_name, volume_size,
- provisioning, tiering, *args, **kwargs):
+ provisioning, tiering, ignore_pool_full_threshold,
+ *args, **kwargs):
LOG.debug('CreateDestLunTask.execute')
data = client.create_lun_with_advance_feature(
pool_name, dest_vol_name, volume_size,
- provisioning, tiering)
+ provisioning, tiering,
+ ignore_thresholds=ignore_pool_full_threshold)
return data
def revert(self, result, client, dest_vol_name, *args, **kwargs):