]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Make VNX Cinder Driver aware of VNX Pool Full Threshold
authorJeegn Chen <jeegn.chen@emc.com>
Fri, 10 Apr 2015 06:35:49 +0000 (14:35 +0800)
committerTina <tina.tang@emc.com>
Mon, 8 Jun 2015 02:42:50 +0000 (22:42 -0400)
When the full threshold of a pool is exceeded, VNX does not allow
more LUNs to be created in the pool unless -ignoreThresholds is
specified explicitly. The following logic is added to address
the issue.

If reserved_percentage is configured in cinder.conf,
get_volume_stats will simply report the specified value.

Otherwise, the driver will check a new boolean option
ignore_pool_full_threshold.
* If the option is set to False, the pool full threshold will be
considered when calculating reserved_percentage.
* If the option is set to True, original logic is used for
reserved_percentage.

If ignore_pool_full_threshold is True -ignoreThresholds will
be added to force the LUN creation.

DocImpact
Change-Id: Ibc3f04f86b28f703930899de8661aade9e35e169
Closes-Bug: #1430410
Closes-Bug: #1442519

cinder/tests/unit/test_emc_vnxdirect.py
cinder/volume/drivers/emc/emc_vnx_cli.py

index 0ef8a53838002a7682744f529ddad047ff728674..a16e1e8615f57b4fe8576212e876a93b02c27381 100644 (file)
@@ -373,25 +373,32 @@ class EMCVNXCLIDriverTestData(object):
                '123456789054321': ['1122334455667777']}
 
     POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
-                         '-userCap', '-availableCap', '-state')
+                         '-userCap', '-availableCap',
+                         '-state', '-prcntFullThreshold')
 
     POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
                                      'unit_test_pool', '-availableCap',
-                                     '-userCap', '-fastcache', '-state',
-                                     '-subscribedCap')
+                                     '-userCap', '-state',
+                                     '-subscribedCap',
+                                     '-prcntFullThreshold',
+                                     '-fastcache')
 
     def POOL_GET_ALL_CMD(self, withfastcache=False):
         if withfastcache:
             return ('storagepool', '-list', '-availableCap',
-                    '-userCap', '-fastcache', '-state', '-subscribedCap')
+                    '-userCap', '-state', '-subscribedCap',
+                    '-prcntFullThreshold',
+                    '-fastcache')
         else:
             return ('storagepool', '-list', '-availableCap',
-                    '-userCap', '-state', '-subscribedCap')
+                    '-userCap', '-state', '-subscribedCap',
+                    '-prcntFullThreshold')
 
     def POOL_GET_ALL_RESULT(self, withfastcache=False):
         if withfastcache:
             return ("Pool Name:  unit_test_pool\n"
                     "Pool ID:  0\n"
+                    "Percent Full Threshold:  70\n"
                     "User Capacity (Blocks):  6881061888\n"
                     "User Capacity (GBs):  3281.146\n"
                     "Available Capacity (Blocks):  6512292864\n"
@@ -402,6 +409,7 @@ class EMCVNXCLIDriverTestData(object):
                     "\n"
                     "Pool Name:  unit_test_pool2\n"
                     "Pool ID:  1\n"
+                    "Percent Full Threshold:  70\n"
                     "User Capacity (Blocks):  8598306816\n"
                     "User Capacity (GBs):  4099.992\n"
                     "Available Capacity (Blocks):  8356663296\n"
@@ -412,6 +420,7 @@ class EMCVNXCLIDriverTestData(object):
         else:
             return ("Pool Name:  unit_test_pool\n"
                     "Pool ID:  0\n"
+                    "Percent Full Threshold:  70\n"
                     "User Capacity (Blocks):  6881061888\n"
                     "User Capacity (GBs):  3281.146\n"
                     "Available Capacity (Blocks):  6512292864\n"
@@ -421,6 +430,7 @@ class EMCVNXCLIDriverTestData(object):
                     "\n"
                     "Pool Name:  unit_test_pool2\n"
                     "Pool ID:  1\n"
+                    "Percent Full Threshold:  70\n"
                     "User Capacity (Blocks):  8598306816\n"
                     "User Capacity (GBs):  4099.992\n"
                     "Available Capacity (Blocks):  8356663296\n"
@@ -443,6 +453,7 @@ class EMCVNXCLIDriverTestData(object):
         for i, stat in enumerate(states):
             out = ("Pool Name:  Pool_" + str(i) + "\n"
                    "Pool ID:  " + str(i) + "\n"
+                   "Percent Full Threshold:  70\n"
                    "User Capacity (Blocks):  8598306816\n"
                    "User Capacity (GBs):  4099.992\n"
                    "Available Capacity (Blocks):  8356663296\n"
@@ -589,7 +600,7 @@ class EMCVNXCLIDriverTestData(object):
             '-tieringPolicy', 'noMovement']}
 
     def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
-                         poll=True):
+                         ignore_thresholds=False, poll=True):
         initial = ['lun', '-create',
                    '-capacity', size,
                    '-sq', 'gb',
@@ -603,6 +614,8 @@ class EMCVNXCLIDriverTestData(object):
             initial.extend(self.provisioning_values['thick'])
         if tiering:
             initial.extend(self.tiering_values[tiering])
+        if ignore_thresholds:
+            initial.append('-ignoreThresholds')
         return tuple(initial)
 
     def CHECK_FASTCACHE_CMD(self, storage_pool):
@@ -691,20 +704,21 @@ State:  Ready
                 "Current Operation to complete 'Preparing' "
                 "and retry the operation. (0x712d8e0e)", 14)
 
-    POOL_PROPERTY = ("""\
-Pool Name:  unit_test_pool
-Pool ID:  1
-User Capacity (Blocks):  6881061888
-User Capacity (GBs):  3281.146
-Available Capacity (Blocks):  6832207872
-Available Capacity (GBs):  3257.851
-State: Ready
-
-""", 0)
+    POOL_PROPERTY = (
+        "Pool Name:  unit_test_pool\n"
+        "Pool ID:  1\n"
+        "Percent Full Threshold:  70\n"
+        "User Capacity (Blocks):  6881061888\n"
+        "User Capacity (GBs):  3281.146\n"
+        "Available Capacity (Blocks):  6832207872\n"
+        "Available Capacity (GBs):  3257.851\n"
+        "State: Ready\n"
+        "\n", 0)
 
     POOL_PROPERTY_W_FASTCACHE = (
         "Pool Name:  unit_test_pool\n"
         "Pool ID:  1\n"
+        "Percent Full Threshold:  70\n"
         "User Capacity (Blocks):  6881061888\n"
         "User Capacity (GBs):  3281.146\n"
         "Available Capacity (Blocks):  6832207872\n"
@@ -1020,8 +1034,7 @@ class DriverTestCaseBase(test.TestCase):
         self.configuration.storage_vnx_pool_name = 'unit_test_pool'
         self.configuration.san_login = 'sysadmin'
         self.configuration.san_password = 'sysadmin'
-        # set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
-        self.configuration.default_timeout = 0.0002
+        self.configuration.default_timeout = 1
         self.configuration.initiator_auto_registration = True
         self.configuration.check_max_pool_luns_threshold = False
         self.stubs.Set(self.configuration, 'safe_get',
@@ -1033,6 +1046,7 @@ class DriverTestCaseBase(test.TestCase):
         self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
             '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
         self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+        self.configuration.ignore_pool_full_threshold = False
 
     def driverSetup(self, commands=tuple(), results=tuple()):
         self.driver = self.generate_driver(self.configuration)
@@ -1117,13 +1131,31 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1', 1,
                 'unit_test_pool',
-                'thick', None, False)),
+                'thick', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
                       poll=False),
             mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
 
         fake_cli.assert_has_calls(expect_cmd)
 
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_volume_ignore_thresholds(self):
+        self.configuration.ignore_pool_full_threshold = True
+        fake_cli = self.driverSetup()
+        self.driver.create_volume(self.testData.test_volume)
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol1', 1,
+                'unit_test_pool',
+                'thick', None,
+                ignore_thresholds=True, poll=False)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                      poll=False)]
+
+        fake_cli.assert_has_calls(expect_cmd)
+
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
@@ -1150,7 +1182,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', None, False)),
+                'compressed', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
                 'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
@@ -1184,7 +1216,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'thin', None, False)),
+                'thin', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
                 'vol_with_type'), poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
@@ -1213,7 +1245,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'thick', None, False)),
+                'thick', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
                 'vol_with_type'), poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
@@ -1246,7 +1278,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', 'highestavailable', False)),
+                'compressed', 'highestavailable', poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
                 'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
@@ -1282,7 +1314,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'deduplicated', None, False))]
+                'deduplicated', None, poll=False))]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1312,7 +1344,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                None, 'auto', False))]
+                None, 'auto', poll=False))]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1376,7 +1408,7 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
 
         expected_pool_stats = {
             'free_capacity_gb': 3105.303,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool|fakeSerial',
             'total_capacity_gb': 3281.146,
             'provisioned_capacity_gb': 536.14,
@@ -1392,6 +1424,31 @@ class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
 
         self.assertEqual(expected_pool_stats, pool_stats)
 
+    def test_get_volume_stats_ignore_threshold(self):
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_GET_ALL_CMD(True)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_GET_ALL_RESULT(True)]
+        self.driverSetup(commands, results)
+        self.driver.cli.ignore_pool_full_threshold = True
+        stats = self.driver.get_volume_stats(True)
+
+        pool_stats = stats['pools'][0]
+        self.assertEqual(2, pool_stats['reserved_percentage'])
+
+    def test_get_volume_stats_reserved_percentage_from_conf(self):
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_GET_ALL_CMD(True)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_GET_ALL_RESULT(True)]
+        self.configuration.reserved_percentage = 22
+        self.driverSetup(commands, results)
+        self.driver.cli.ignore_pool_full_threshold = True
+        stats = self.driver.get_volume_stats(True)
+
+        pool_stats = stats['pools'][0]
+        self.assertEqual(22, pool_stats['reserved_percentage'])
+
     def test_get_volume_stats_too_many_luns(self):
         commands = [self.testData.NDU_LIST_CMD,
                     self.testData.POOL_GET_ALL_CMD(True),
@@ -1981,7 +2038,7 @@ Time Remaining:  0 second(s)
 
     def test_create_volume_cli_failed(self):
         commands = [self.testData.LUN_CREATION_CMD(
-            'failed_vol1', 1, 'unit_test_pool', None, None, False)]
+            'failed_vol1', 1, 'unit_test_pool', None, None, poll=False)]
         results = [FAKE_ERROR_RETURN]
         fake_cli = self.driverSetup(commands, results)
 
@@ -1989,7 +2046,7 @@ Time Remaining:  0 second(s)
                           self.driver.create_volume,
                           self.testData.test_failed_volume)
         expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
-            'failed_vol1', 1, 'unit_test_pool', None, None, False))]
+            'failed_vol1', 1, 'unit_test_pool', None, None, poll=False))]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
@@ -1997,7 +2054,7 @@ Time Remaining:  0 second(s)
     def test_create_faulted_volume(self):
         volume_name = 'faulted_volume'
         cmd_create = self.testData.LUN_CREATION_CMD(
-            volume_name, 1, 'unit_test_pool', None, None, False)
+            volume_name, 1, 'unit_test_pool', None, None, poll=False)
         cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
         commands = [cmd_create, cmd_list_preparing]
         results = [SUCCEED,
@@ -2015,7 +2072,7 @@ Time Remaining:  0 second(s)
         self.driver.create_volume(faulted_volume)
         expect_cmd = [
             mock.call(*self.testData.LUN_CREATION_CMD(
-                volume_name, 1, 'unit_test_pool', None, None, False)),
+                volume_name, 1, 'unit_test_pool', None, None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
                       poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
@@ -2027,7 +2084,7 @@ Time Remaining:  0 second(s)
     def test_create_offline_volume(self):
         volume_name = 'offline_volume'
         cmd_create = self.testData.LUN_CREATION_CMD(
-            volume_name, 1, 'unit_test_pool', None, None, False)
+            volume_name, 1, 'unit_test_pool', None, None, poll=False)
         cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
         commands = [cmd_create, cmd_list]
         results = [SUCCEED,
@@ -2074,11 +2131,11 @@ Time Remaining:  0 second(s)
                     cmd_migrate_verify]
         results = [output_dest, output_dest, output_migrate,
                    output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
+        fake_cli1 = self.driverSetup(commands, results)
 
         self.driver.create_volume_from_snapshot(self.testData.test_volume2,
                                                 self.testData.test_snapshot)
-        expect_cmd = [
+        expect_cmd1 = [
             mock.call(
                 *self.testData.SNAP_MP_CREATE_CMD(
                     name='vol2', source='vol1'),
@@ -2099,7 +2156,17 @@ Time Remaining:  0 second(s)
                       poll=True),
             mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
                       poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
+        fake_cli1.assert_has_calls(expect_cmd1)
+
+        self.configuration.ignore_pool_full_threshold = True
+        fake_cli2 = self.driverSetup(commands, results)
+        self.driver.create_volume_from_snapshot(self.testData.test_volume2,
+                                                self.testData.test_snapshot)
+        expect_cmd2 = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol2_dest', 1, 'unit_test_pool', None, None,
+                ignore_thresholds=True))]
+        fake_cli2.assert_has_calls(expect_cmd2)
 
     @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
                 new=utils.ZeroIntervalLoopingCall)
@@ -2479,7 +2546,7 @@ Time Remaining:  0 second(s)
         results = [self.testData.NDU_LIST_RESULT,
                    ('No snap', 1023),
                    output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
+        fake_cli1 = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
                                     '-Deduplication',
                                     '-ThinProvisioning',
@@ -2491,7 +2558,7 @@ Time Remaining:  0 second(s)
                            new_type_data,
                            diff_data,
                            host_test_data)
-        expect_cmd = [
+        expect_cmd1 = [
             mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
@@ -2500,7 +2567,26 @@ Time Remaining:  0 second(s)
             mock.call(*self.testData.MIGRATION_CMD(1, None),
                       retry_disable=True,
                       poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
+        fake_cli1.assert_has_calls(expect_cmd1)
+
+        self.configuration.ignore_pool_full_threshold = True
+        fake_cli2 = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd2 = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None,
+                ignore_thresholds=True))]
+        fake_cli2.assert_has_calls(expect_cmd2)
 
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
@@ -2964,7 +3050,8 @@ Time Remaining:  0 second(s)
             'provisioned_capacity_gb': 8,
             'pool_name': "unit_test_pool",
             'fast_cache_enabled': 'True',
-            'state': 'Ready'}])
+            'state': 'Ready',
+            'pool_full_threshold': 70.0}])
 
         self.driver.update_volume_stats()
         self.driver.create_volume(self.testData.test_volume_with_type)
@@ -3116,7 +3203,7 @@ Time Remaining:  0 second(s)
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1', 1,
                 'unit_test_pool',
-                None, None, False)),
+                None, None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
                       poll=False),
             mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
@@ -3557,7 +3644,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
         pool_stats1 = stats['pools'][0]
         expected_pool_stats1 = {
             'free_capacity_gb': 3105.303,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool|fakeSerial',
             'total_capacity_gb': 3281.146,
             'provisioned_capacity_gb': 536.140,
@@ -3575,7 +3662,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
         pool_stats2 = stats['pools'][1]
         expected_pool_stats2 = {
             'free_capacity_gb': 3984.768,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool2|fakeSerial',
             'total_capacity_gb': 4099.992,
             'provisioned_capacity_gb': 636.240,
@@ -3603,7 +3690,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
         pool_stats1 = stats['pools'][0]
         expected_pool_stats1 = {
             'free_capacity_gb': 3105.303,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool|fakeSerial',
             'total_capacity_gb': 3281.146,
             'provisioned_capacity_gb': 536.140,
@@ -3621,7 +3708,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
         pool_stats2 = stats['pools'][1]
         expected_pool_stats2 = {
             'free_capacity_gb': 3984.768,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool2|fakeSerial',
             'total_capacity_gb': 4099.992,
             'provisioned_capacity_gb': 636.240,
@@ -3684,7 +3771,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'deduplicated', None, False)),
+                'deduplicated', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                       poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
@@ -3803,7 +3890,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', None, False)),
+                'compressed', None, poll=False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
                 'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
@@ -4017,7 +4104,7 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
 
         expected_pool_stats = {
             'free_capacity_gb': 3105.303,
-            'reserved_percentage': 2,
+            'reserved_percentage': 32,
             'location_info': 'unit_test_pool|fakeSerial',
             'total_capacity_gb': 3281.146,
             'provisioned_capacity_gb': 536.14,
index 2f98454e9d7796acf40815b46c7d92224c657b2f..3b996d740e3662d07c20f89014ddda7b2399ad46 100644 (file)
@@ -114,7 +114,11 @@ loc_opts = [
                 'By default, the value is False.'),
     cfg.BoolOpt('force_delete_lun_in_storagegroup',
                 default=False,
-                help='Delete a LUN even if it is in Storage Groups.')
+                help='Delete a LUN even if it is in Storage Groups.'),
+    cfg.BoolOpt('ignore_pool_full_threshold',
+                default=False,
+                help='Force LUN creation even if '
+                'the full threshold of pool is reached.')
 ]
 
 CONF.register_opts(loc_opts)
@@ -246,8 +250,16 @@ class CommandLineHelper(object):
         'Total Subscribed Capacity *\(GBs\) *:\s*(.*)\s*',
         'provisioned_capacity_gb',
         float)
+    POOL_FULL_THRESHOLD = PropertyDescriptor(
+        '-prcntFullThreshold',
+        'Percent Full Threshold:\s*(.*)\s*',
+        'pool_full_threshold',
+        lambda value: int(value))
 
-    POOL_ALL = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY, POOL_STATE]
+    POOL_ALL = [POOL_TOTAL_CAPACITY,
+                POOL_FREE_CAPACITY,
+                POOL_STATE,
+                POOL_FULL_THRESHOLD]
 
     MAX_POOL_LUNS = PropertyDescriptor(
         '-maxPoolLUNs',
@@ -377,6 +389,7 @@ class CommandLineHelper(object):
     def create_lun_with_advance_feature(self, pool, name, size,
                                         provisioning, tiering,
                                         consistencygroup_id=None,
+                                        ignore_thresholds=False,
                                         poll=True):
         command_create_lun = ['lun', '-create',
                               '-capacity', size,
@@ -391,6 +404,8 @@ class CommandLineHelper(object):
         # tiering
         if tiering:
             command_create_lun.extend(self.tiering_values[tiering])
+        if ignore_thresholds:
+            command_create_lun.append('-ignoreThresholds')
 
         # create lun
         data = self.create_lun_by_cmd(command_create_lun, name)
@@ -1710,8 +1725,16 @@ class EMCVnxCliBase(object):
             self.configuration.force_delete_lun_in_storagegroup)
         if self.force_delete_lun_in_sg:
             LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
+
         self.max_over_subscription_ratio = (
             self.configuration.max_over_subscription_ratio)
+        self.ignore_pool_full_threshold = (
+            self.configuration.ignore_pool_full_threshold)
+        if self.ignore_pool_full_threshold:
+            LOG.warning(_LW("ignore_pool_full_threshold: True. "
+                            "LUN creation will still be forced "
+                            "even if the pool full threshold is exceeded."))
+        self.reserved_percentage = self.configuration.reserved_percentage
 
     def _get_managed_storage_pools(self, pools):
         storage_pools = set()
@@ -1769,7 +1792,8 @@ class EMCVnxCliBase(object):
                 'provisioning': provisioning,
                 'tiering': tiering,
                 'volume_size': volume_size,
-                'client': self._client
+                'client': self._client,
+                'ignore_pool_full_threshold': self.ignore_pool_full_threshold
             }
             return store_spec
 
@@ -1799,7 +1823,9 @@ class EMCVnxCliBase(object):
 
         data = self._client.create_lun_with_advance_feature(
             pool, volume_name, volume_size,
-            provisioning, tiering, volume['consistencygroup_id'], False)
+            provisioning, tiering, volume['consistencygroup_id'],
+            ignore_thresholds=self.ignore_pool_full_threshold,
+            poll=False)
         model_update = {'provider_location':
                         self._build_provider_location_for_lun(data['lun_id'])}
 
@@ -2038,7 +2064,8 @@ class EMCVnxCliBase(object):
 
         data = self._client.create_lun_with_advance_feature(
             target_pool_name, new_volume_name, volume['size'],
-            provisioning, tiering)
+            provisioning, tiering,
+            ignore_thresholds=self.ignore_pool_full_threshold)
 
         dst_id = data['lun_id']
         moved = self._client.migrate_lun_with_verification(
@@ -2126,7 +2153,6 @@ class EMCVnxCliBase(object):
         pool_stats['total_capacity_gb'] = pool['total_capacity_gb']
         pool_stats['provisioned_capacity_gb'] = (
             pool['provisioned_capacity_gb'])
-        pool_stats['reserved_percentage'] = 0
 
         # Handle pool state Initializing, Ready, Faulted, Offline or Deleting.
         if pool['state'] in ('Initializing', 'Offline', 'Deleting'):
@@ -2136,16 +2162,6 @@ class EMCVnxCliBase(object):
                          'state': pool['state']})
         else:
             pool_stats['free_capacity_gb'] = pool['free_capacity_gb']
-            # Some extra capacity will be used by meta data of pool LUNs.
-            # The overhead is about LUN_Capacity * 0.02 + 3 GB
-            # reserved_percentage will be used to make sure the scheduler
-            # takes the overhead into consideration.
-            # Assume that all the remaining capacity is to be used to create
-            # a thick LUN, reserved_percentage is estimated as follows:
-            reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
-                         (1.02 * pool['total_capacity_gb'])) * 100)
-            pool_stats['reserved_percentage'] = int(math.ceil
-                                                    (min(reserved, 100)))
             if self.check_max_pool_luns_threshold:
                 pool_feature = self._client.get_pool_feature_properties(
                     poll=False) if not pool_feature else pool_feature
@@ -2157,6 +2173,26 @@ class EMCVnxCliBase(object):
                                 pool_feature['max_pool_luns'])
                     pool_stats['free_capacity_gb'] = 0
 
+        if not self.reserved_percentage:
+            # Since the admin is not sure of what value is proper,
+            # the driver will calculate the recommended value.
+
+            # Some extra capacity will be used by meta data of pool LUNs.
+            # The overhead is about LUN_Capacity * 0.02 + 3 GB
+            # reserved_percentage will be used to make sure the scheduler
+            # takes the overhead into consideration.
+            # Assume that all the remaining capacity is to be used to create
+            # a thick LUN, reserved_percentage is estimated as follows:
+            reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+                         (1.02 * pool['total_capacity_gb'])) * 100)
+            # Take pool full threshold into consideration
+            if not self.ignore_pool_full_threshold:
+                reserved += 100 - pool['pool_full_threshold']
+            pool_stats['reserved_percentage'] = int(math.ceil(min(reserved,
+                                                                  100)))
+        else:
+            pool_stats['reserved_percentage'] = self.reserved_percentage
+
         array_serial = self.get_array_serial()
         pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
                                        {'pool_name': pool['pool_name'],
@@ -3104,6 +3140,7 @@ class EMCVnxCliBase(object):
                 'volume_size': volume['size'],
                 'provisioning': provisioning,
                 'tiering': tiering,
+                'ignore_pool_full_threshold': self.ignore_pool_full_threshold
             }
             work_flow.add(
                 CreateSMPTask(name="CreateSMPTask%s" % i,
@@ -3203,17 +3240,14 @@ class EMCVnxCliBase(object):
         if self.protocol == 'iSCSI':
             self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
 
+        properties = [self._client.POOL_FREE_CAPACITY,
+                      self._client.POOL_TOTAL_CAPACITY,
+                      self._client.POOL_STATE,
+                      self._client.POOL_SUBSCRIBED_CAPACITY,
+                      self._client.POOL_FULL_THRESHOLD]
         if '-FASTCache' in self.enablers:
-            properties = [self._client.POOL_FREE_CAPACITY,
-                          self._client.POOL_TOTAL_CAPACITY,
-                          self._client.POOL_FAST_CACHE,
-                          self._client.POOL_STATE,
-                          self._client.POOL_SUBSCRIBED_CAPACITY]
-        else:
-            properties = [self._client.POOL_FREE_CAPACITY,
-                          self._client.POOL_TOTAL_CAPACITY,
-                          self._client.POOL_STATE,
-                          self._client.POOL_SUBSCRIBED_CAPACITY]
+            properties.append(self._client.POOL_FAST_CACHE)
+
         pool_list = self._client.get_pool_list(properties, False)
 
         if self.storage_pools:
@@ -3289,11 +3323,13 @@ class CreateDestLunTask(task.Task):
                                                 inject=inject)
 
     def execute(self, client, pool_name, dest_vol_name, volume_size,
-                provisioning, tiering, *args, **kwargs):
+                provisioning, tiering, ignore_pool_full_threshold,
+                *args, **kwargs):
         LOG.debug('CreateDestLunTask.execute')
         data = client.create_lun_with_advance_feature(
             pool_name, dest_vol_name, volume_size,
-            provisioning, tiering)
+            provisioning, tiering,
+            ignore_thresholds=ignore_pool_full_threshold)
         return data
 
     def revert(self, result, client, dest_vol_name, *args, **kwargs):