From 2375384f6e0b9475f82c1eedceb6b3ffc25eae51 Mon Sep 17 00:00:00 2001 From: Patrick East Date: Thu, 17 Dec 2015 15:28:16 -0800 Subject: [PATCH] Enhance the stats reported from the Pure Volume Drivers MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit This change provides some improvements for older scheduler hints and stats along with a few new stats that can be used for filtering and goodness functions. These new stats include: Latency: usec_per_read_op  - Average arrival-to-completion time, measured in microseconds, for a host read operation. usec_per_write_op - Average arrival-to-completion time, measured in microseconds, for a host write operation. queue_depth  - Average number of queued I/O requests. IOPS: reads_per_sec  - Number of read requests processed per second. writes_per_sec - Number of write requests processed per second. Bandwidth input_per_sec  - Number of bytes read per second. output_per_sec - Number of bytes written per second. Metadata: total_hosts - Total number of Purity hosts created. total_snapshots - Total number of snapshots (includes pending deletion). total_pgroups - Total number of Protection Groups (includes pending deletion). Note: All of these stats are for the entire array and are not specific to the Cinder volumes, snapshots, hosts, latency, etc. As part of the new and improved stats the drivers will respect the cinder.conf backend “reserved_percentage” option. Previously it was set to 0. Last but not least there is a new config option called “pure_automatic_max_oversubscription_ratio”. This defaults to True, and when enabled gives the older behavior of automatically calculating the thin provisioning ratio which was allowed for the backend. If disabled the driver will now respect the “max_over_subscription_ratio” config option. DocImpact Change-Id: I733f4f90addc39f4c1b720ce63100b15b0523b3e Implements: blueprint pure-enhanced-stats --- cinder/tests/unit/test_pure.py | 228 +++++++++++------- cinder/volume/drivers/pure.py | 119 ++++++--- .../pure-enhanced-stats-42a684fe4546d1b1.yaml | 11 + 3 files changed, 239 insertions(+), 119 deletions(-) create mode 100644 releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml diff --git a/cinder/tests/unit/test_pure.py b/cinder/tests/unit/test_pure.py index 6e2a0d81e..9188dc5e7 100644 --- a/cinder/tests/unit/test_pure.py +++ b/cinder/tests/unit/test_pure.py @@ -16,6 +16,7 @@ from copy import deepcopy import sys +import ddt import mock from oslo_utils import units @@ -152,6 +153,18 @@ SPACE_INFO_EMPTY = { "total": 0, } +PERF_INFO = { + 'writes_per_sec': 318, + 'usec_per_write_op': 255, + 'output_per_sec': 234240, + 'reads_per_sec': 15, + 'input_per_sec': 2827943, + 'time': '2015-12-17T21:50:55Z', + 'usec_per_read_op': 192, + 'queue_depth': 4, +} +PERF_INFO_RAW = [PERF_INFO] + ISCSI_CONNECTION_INFO = { "driver_volume_type": "iscsi", "data": { @@ -184,6 +197,14 @@ PURE_SNAPSHOT = { "size": 3221225472, "source": "vol1" } +PURE_PGROUP = { + "hgroups": None, + "hosts": None, + "name": "pg1", + "source": "pure01", + "targets": None, + "volumes": ["v1"] +} class FakePureStorageHTTPError(Exception): @@ -225,14 +246,18 @@ class PureDriverTestCase(test.TestCase): mock_func.side_effect = original_side_effect -class PureBaseVolumeDriverTestCase(PureDriverTestCase): +class PureBaseSharedDriverTestCase(PureDriverTestCase): def setUp(self): - super(PureBaseVolumeDriverTestCase, self).setUp() + super(PureBaseSharedDriverTestCase, self).setUp() self.driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) - self.driver._array = self.array self.array.get_rest_version.return_value = '1.4' + +class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): + def setUp(self): + super(PureBaseVolumeDriverTestCase, self).setUp() + def test_generate_purity_host_name(self): result = self.driver._generate_purity_host_name( "really-long-string-thats-a-bit-too-long") @@ -518,92 +543,6 @@ class PureBaseVolumeDriverTestCase(PureDriverTestCase): private=True) self.array.delete_host.assert_called_once_with(PURE_HOST_NAME) - @mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True) - @mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True) - def test_get_volume_stats(self, mock_space, mock_filter): - filter_function = "capabilities.total_volumes < 10" - mock_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100) - mock_filter.return_value = filter_function - self.assertEqual({}, self.driver.get_volume_stats()) - self.array.get.return_value = SPACE_INFO - result = { - "volume_backend_name": VOLUME_BACKEND_NAME, - "vendor_name": "Pure Storage", - "driver_version": self.driver.VERSION, - "storage_protocol": None, - "total_capacity_gb": TOTAL_CAPACITY, - "free_capacity_gb": TOTAL_CAPACITY - USED_SPACE, - "reserved_percentage": 0, - "consistencygroup_support": True, - "thin_provisioning_support": True, - "provisioned_capacity": PROVISIONED_CAPACITY, - "max_over_subscription_ratio": (PROVISIONED_CAPACITY / - USED_SPACE), - "total_volumes": 100, - "filter_function": filter_function, - "multiattach": True, - } - real_result = self.driver.get_volume_stats(refresh=True) - self.assertDictMatch(result, real_result) - self.assertDictMatch(result, self.driver._stats) - - @mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True) - @mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True) - def test_get_volume_stats_empty_array(self, mock_space, mock_filter): - filter_function = "capabilities.total_volumes < 10" - mock_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100) - mock_filter.return_value = filter_function - self.assertEqual({}, self.driver.get_volume_stats()) - self.array.get.return_value = SPACE_INFO_EMPTY - result = { - "volume_backend_name": VOLUME_BACKEND_NAME, - "vendor_name": "Pure Storage", - "driver_version": self.driver.VERSION, - "storage_protocol": None, - "total_capacity_gb": TOTAL_CAPACITY, - "free_capacity_gb": TOTAL_CAPACITY, - "reserved_percentage": 0, - "consistencygroup_support": True, - "thin_provisioning_support": True, - "provisioned_capacity": PROVISIONED_CAPACITY, - "max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION, - "total_volumes": 100, - "filter_function": filter_function, - "multiattach": True, - } - real_result = self.driver.get_volume_stats(refresh=True) - self.assertDictMatch(result, real_result) - self.assertDictMatch(result, self.driver._stats) - - @mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True) - @mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True) - def test_get_volume_stats_nothing_provisioned(self, mock_space, - mock_filter): - filter_function = "capabilities.total_volumes < 10" - mock_space.return_value = (0, 0) - mock_filter.return_value = filter_function - self.assertEqual({}, self.driver.get_volume_stats()) - self.array.get.return_value = SPACE_INFO - result = { - "volume_backend_name": VOLUME_BACKEND_NAME, - "vendor_name": "Pure Storage", - "driver_version": self.driver.VERSION, - "storage_protocol": None, - "total_capacity_gb": TOTAL_CAPACITY, - "free_capacity_gb": TOTAL_CAPACITY - USED_SPACE, - "reserved_percentage": 0, - "consistencygroup_support": True, - "thin_provisioning_support": True, - "provisioned_capacity": 0, - "max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION, - "total_volumes": 0, - "filter_function": filter_function, - "multiattach": True, - } - real_result = self.driver.get_volume_stats(refresh=True) - self.assertDictMatch(result, real_result) - self.assertDictMatch(result, self.driver._stats) - def test_extend_volume(self): vol_name = VOLUME["name"] + "-cinder" self.driver.extend_volume(VOLUME, 3) @@ -1638,3 +1577,114 @@ class PureFCDriverTestCase(PureDriverTestCase): self.driver._connect, VOLUME, FC_CONNECTOR) self.assertTrue(self.array.connect_host.called) self.assertTrue(self.array.list_volume_private_connections) + + +@ddt.ddt +class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase): + def setUp(self): + super(PureVolumeUpdateStatsTestCase, self).setUp() + self.array.get.side_effect = self.fake_get_array + + def fake_get_array(*args, **kwargs): + if 'action' in kwargs and kwargs['action'] is 'monitor': + return PERF_INFO_RAW + + if 'space' in kwargs and kwargs['space'] is True: + return SPACE_INFO + + @ddt.data(dict(used=10, + provisioned=100, + config_ratio=5, + expected_ratio=5, + auto=False), + dict(used=10, + provisioned=100, + config_ratio=5, + expected_ratio=10, + auto=True), + dict(used=0, + provisioned=100, + config_ratio=5, + expected_ratio=5, + auto=True), + dict(used=10, + provisioned=0, + config_ratio=5, + expected_ratio=5, + auto=True)) + @ddt.unpack + def test_get_thin_provisioning(self, + used, + provisioned, + config_ratio, + expected_ratio, + auto): + self.mock_config.pure_automatic_max_oversubscription_ratio = auto + self.mock_config.max_over_subscription_ratio = config_ratio + actual_ratio = self.driver._get_thin_provisioning(provisioned, used) + self.assertEqual(expected_ratio, actual_ratio) + + @mock.patch(BASE_DRIVER_OBJ + '.get_goodness_function') + @mock.patch(BASE_DRIVER_OBJ + '.get_filter_function') + @mock.patch(BASE_DRIVER_OBJ + '._get_provisioned_space') + @mock.patch(BASE_DRIVER_OBJ + '._get_thin_provisioning') + def test_get_volume_stats(self, mock_get_thin_provisioning, mock_get_space, + mock_get_filter, mock_get_goodness): + filter_function = 'capabilities.total_volumes < 10' + goodness_function = '90' + num_hosts = 20 + num_snaps = 175 + num_pgroups = 15 + reserved_percentage = 12 + + self.array.list_hosts.return_value = [PURE_HOST] * num_hosts + self.array.list_volumes.return_value = [PURE_SNAPSHOT] * num_snaps + self.array.list_pgroups.return_value = [PURE_PGROUP] * num_pgroups + self.mock_config.reserved_percentage = reserved_percentage + mock_get_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100) + mock_get_filter.return_value = filter_function + mock_get_goodness.return_value = goodness_function + mock_get_thin_provisioning.return_value = (PROVISIONED_CAPACITY / + USED_SPACE) + + expected_result = { + 'volume_backend_name': VOLUME_BACKEND_NAME, + 'vendor_name': 'Pure Storage', + 'driver_version': self.driver.VERSION, + 'storage_protocol': None, + 'consistencygroup_support': True, + 'thin_provisioning_support': True, + 'multiattach': True, + 'total_capacity_gb': TOTAL_CAPACITY, + 'free_capacity_gb': TOTAL_CAPACITY - USED_SPACE, + 'reserved_percentage': reserved_percentage, + 'provisioned_capacity': PROVISIONED_CAPACITY, + 'max_over_subscription_ratio': (PROVISIONED_CAPACITY / + USED_SPACE), + 'filter_function': filter_function, + 'goodness_function': goodness_function, + 'total_volumes': 100, + 'total_snapshots': num_snaps, + 'total_hosts': num_hosts, + 'total_pgroups': num_pgroups, + 'writes_per_sec': PERF_INFO['writes_per_sec'], + 'reads_per_sec': PERF_INFO['reads_per_sec'], + 'input_per_sec': PERF_INFO['input_per_sec'], + 'output_per_sec': PERF_INFO['output_per_sec'], + 'usec_per_read_op': PERF_INFO['usec_per_read_op'], + 'usec_per_write_op': PERF_INFO['usec_per_write_op'], + 'queue_depth': PERF_INFO['queue_depth'], + } + + real_result = self.driver.get_volume_stats(refresh=True) + self.assertDictMatch(expected_result, real_result) + + # Make sure when refresh=False we are using cached values and not + # sending additional requests to the array. + self.array.reset_mock() + real_result = self.driver.get_volume_stats(refresh=False) + self.assertDictMatch(expected_result, real_result) + self.assertFalse(self.array.get.called) + self.assertFalse(self.array.list_volumes.called) + self.assertFalse(self.array.list_hosts.called) + self.assertFalse(self.array.list_pgroups.called) diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index 390e14767..3200755f8 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -47,6 +47,12 @@ LOG = logging.getLogger(__name__) PURE_OPTS = [ cfg.StrOpt("pure_api_token", help="REST API authorization token."), + cfg.BoolOpt("pure_automatic_max_oversubscription_ratio", + default=True, + help="Automatically determine an oversubscription ratio based " + "on the current total data reduction values. If used " + "this calculated value will override the " + "max_over_subscription_ratio config option.") ] CONF = cfg.CONF @@ -293,37 +299,70 @@ class PureBaseVolumeDriver(san.SanDriver): def _update_stats(self): """Set self._stats with relevant information.""" - info = self._array.get(space=True) - total_capacity = float(info["capacity"]) / units.Gi - used_space = float(info["total"]) / units.Gi + + # Collect info from the array + space_info = self._array.get(space=True) + perf_info = self._array.get(action='monitor')[0] # Always first index + hosts = self._array.list_hosts() + snaps = self._array.list_volumes(snap=True, pending=True) + pgroups = self._array.list_pgroups(pending=True) + + # Perform some translations and calculations + total_capacity = float(space_info["capacity"]) / units.Gi + used_space = float(space_info["total"]) / units.Gi free_space = float(total_capacity - used_space) prov_space, total_vols = self._get_provisioned_space() + total_hosts = len(hosts) + total_snaps = len(snaps) + total_pgroups = len(pgroups) provisioned_space = float(prov_space) / units.Gi - # If array is empty we can not calculate a max oversubscription ratio. - # In this case we choose 20 as a default value for the ratio. Once - # some volumes are actually created and some data is stored on the - # array a much more accurate number will be presented based on current - # usage. - if used_space == 0 or provisioned_space == 0: - thin_provisioning = 20 - else: - thin_provisioning = provisioned_space / used_space - data = { - "volume_backend_name": self._backend_name, - "vendor_name": "Pure Storage", - "driver_version": self.VERSION, - "storage_protocol": self._storage_protocol, - "total_capacity_gb": total_capacity, - "free_capacity_gb": free_space, - "reserved_percentage": 0, - "consistencygroup_support": True, - "thin_provisioning_support": True, - "provisioned_capacity": provisioned_space, - "max_over_subscription_ratio": thin_provisioning, - "total_volumes": total_vols, - "filter_function": self.get_filter_function(), - "multiattach": True, - } + thin_provisioning = self._get_thin_provisioning(provisioned_space, + used_space) + + # Start with some required info + data = dict( + volume_backend_name=self._backend_name, + vendor_name='Pure Storage', + driver_version=self.VERSION, + storage_protocol=self._storage_protocol, + ) + + # Add flags for supported features + data['consistencygroup_support'] = True + data['thin_provisioning_support'] = True + data['multiattach'] = True + + # Add capacity info for scheduler + data['total_capacity_gb'] = total_capacity + data['free_capacity_gb'] = free_space + data['reserved_percentage'] = self.configuration.reserved_percentage + data['provisioned_capacity'] = provisioned_space + data['max_over_subscription_ratio'] = thin_provisioning + + # Add the filtering/goodness functions + data['filter_function'] = self.get_filter_function() + data['goodness_function'] = self.get_goodness_function() + + # Add array metadata counts for filtering and weighing functions + data['total_volumes'] = total_vols + data['total_snapshots'] = total_snaps + data['total_hosts'] = total_hosts + data['total_pgroups'] = total_pgroups + + # Add performance stats for filtering and weighing functions + # IOPS + data['writes_per_sec'] = perf_info['writes_per_sec'] + data['reads_per_sec'] = perf_info['reads_per_sec'] + + # Bandwidth + data['input_per_sec'] = perf_info['input_per_sec'] + data['output_per_sec'] = perf_info['output_per_sec'] + + # Latency + data['usec_per_read_op'] = perf_info['usec_per_read_op'] + data['usec_per_write_op'] = perf_info['usec_per_write_op'] + data['queue_depth'] = perf_info['queue_depth'] + self._stats = data def _get_provisioned_space(self): @@ -331,6 +370,26 @@ class PureBaseVolumeDriver(san.SanDriver): volumes = self._array.list_volumes(pending=True) return sum(item["size"] for item in volumes), len(volumes) + def _get_thin_provisioning(self, provisioned_space, used_space): + """Get the current value for the thin provisioning ratio. + + If pure_automatic_max_oversubscription_ratio is True we will calculate + a value, if not we will respect the configuration option for the + max_over_subscription_ratio. + """ + if (self.configuration.pure_automatic_max_oversubscription_ratio and + used_space != 0 and provisioned_space != 0): + # If array is empty we can not calculate a max oversubscription + # ratio. In this case we look to the config option as a starting + # point. Once some volumes are actually created and some data is + # stored on the array a much more accurate number will be + # presented based on current usage. + thin_provisioning = provisioned_space / used_space + else: + thin_provisioning = self.configuration.max_over_subscription_ratio + + return thin_provisioning + @log_debug_trace def extend_volume(self, volume, new_size): """Extend volume to new_size.""" @@ -769,7 +828,7 @@ class PureBaseVolumeDriver(san.SanDriver): class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): - VERSION = "3.0.0" + VERSION = "4.0.0" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) @@ -931,7 +990,7 @@ class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): - VERSION = "1.0.0" + VERSION = "2.0.0" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) diff --git a/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml b/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml new file mode 100644 index 000000000..4bb4bc612 --- /dev/null +++ b/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml @@ -0,0 +1,11 @@ +--- +features: + - Adds additional metrics reported to the scheduler for Pure Volume Drivers + for better filtering and weighing functions. + - Adds config option to enable/disable automatically calculation an + over-subscription ratio max for Pure Volume Drivers. When disabled the + drivers will now respect the max_oversubscription_ratio config option. +fixes: + - Fixed issue where Pure Volume Drivers would ignore reserved_percentage + config option. + -- 2.45.2