From: Xing Yang Date: Fri, 16 Jan 2015 17:45:53 +0000 (-0500) Subject: Add provisioned_capacity X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=29cc21b3b98aeaafa20cddea1098b575ebc90d49;p=openstack-build%2Fcinder-build.git Add provisioned_capacity This change is needed by the over subscription patch: https://review.openstack.org/#/c/142171/ This patch makes the following change: Add 'provisioned_capacity' to Cinder base driver, Cinder reference driver (LVM), and scheduler host_manager. provisioned_capacity is the apparent allocated space indicating how much capacity has been provisioned. Example: User A created 2x10G volumes in Cinder from backend A, and user B created 3x10G volumes from backend A directly, without using Cinder. Assume those are all the volumes provisioned on backend A. The total provisioned_capacity will be 50G and that is what the driver should be reporting. Change-Id: I26035a8b591309a922efa517e50c68eb57ef6d2d Partial-Implements: blueprint over-subscription-in-thin-provisioning --- diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index 9776ddf2e..5ab879fe3 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -67,6 +67,7 @@ class LVM(executor.Executor): self.vg_thin_pool_free_space = 0.0 self._supports_snapshot_lv_activation = None self._supports_lvchange_ignoreskipactivation = None + self.vg_provisioned_capacity = 0.0 if create_vg and physical_volumes is not None: self.pv_list = physical_volumes @@ -402,15 +403,50 @@ class LVM(executor.Executor): self.vg_lv_count = int(vg_list[0]['lv_count']) self.vg_uuid = vg_list[0]['uuid'] + total_vols_size = 0.0 if self.vg_thin_pool is not None: + # NOTE(xyang): If providing only self.vg_name, + # get_lv_info will output info on the thin pool and all + # individual volumes. + # get_lv_info(self._root_helper, 'stack-vg') + # sudo lvs --noheadings --unit=g -o vg_name,name,size + # --nosuffix stack-vg + # stack-vg stack-pool 9.51 + # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 + # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 + # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 + # + # If providing both self.vg_name and self.vg_thin_pool, + # get_lv_info will output only info on the thin pool, but not + # individual volumes. + # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') + # sudo lvs --noheadings --unit=g -o vg_name,name,size + # --nosuffix stack-vg/stack-pool + # stack-vg stack-pool 9.51 + # + # We need info on both the thin pool and the volumes, + # therefore we should provide only self.vg_name, but not + # self.vg_thin_pool here. for lv in self.get_lv_info(self._root_helper, - self.vg_name, - self.vg_thin_pool): + self.vg_name): + lvsize = lv['size'] + # get_lv_info runs "lvs" command with "--nosuffix". + # This removes "g" from "1.00g" and only outputs "1.00". + # Running "lvs" command without "--nosuffix" will output + # "1.00g" if "g" is the unit. + # Remove the unit if it is in lv['size']. + if not lv['size'][-1].isdigit(): + lvsize = lvsize[:-1] if lv['name'] == self.vg_thin_pool: - self.vg_thin_pool_size = lv['size'] + self.vg_thin_pool_size = lvsize tpfs = self._get_thin_pool_free_space(self.vg_name, self.vg_thin_pool) self.vg_thin_pool_free_space = tpfs + else: + total_vols_size = total_vols_size + float(lvsize) + total_vols_size = round(total_vols_size, 2) + + self.vg_provisioned_capacity = total_vols_size def _calculate_thin_pool_size(self): """Calculates the correct size for a thin pool. diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 582a954dc..2fadc13af 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -109,6 +109,11 @@ class HostState(object): self.allocated_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 + # The apparent allocated space indicating how much capacity + # has been provisioned. This could be the sum of sizes of + # all volumes on a backend, which could be greater than or + # equal to the allocated_capacity_gb. + self.provisioned_capacity_gb = 0 # PoolState for all pools self.pools = {} @@ -266,6 +271,7 @@ class HostState(object): """Incrementally update host state from an volume.""" volume_gb = volume['size'] self.allocated_capacity_gb += volume_gb + self.provisioned_capacity_gb += volume_gb if self.free_capacity_gb == 'infinite': # There's virtually infinite space on back-end pass @@ -306,6 +312,14 @@ class PoolState(HostState): 'allocated_capacity_gb', 0) self.QoS_support = capability.get('QoS_support', False) self.reserved_percentage = capability.get('reserved_percentage', 0) + # provisioned_capacity_gb is the apparent total capacity of + # all the volumes created on a backend, which is greater than + # or equal to allocated_capacity_gb, which is the apparent + # total capacity of all the volumes created on a backend + # in Cinder. Using allocated_capacity_gb as the default of + # provisioned_capacity_gb if it is not set. + self.provisioned_capacity_gb = capability.get( + 'provisioned_capacity_gb', self.allocated_capacity_gb) def update_pools(self, capability): # Do nothing, since we don't have pools within pool, yet diff --git a/cinder/tests/brick/test_brick_lvm.py b/cinder/tests/brick/test_brick_lvm.py index 588bd7281..cce74b9b2 100644 --- a/cinder/tests/brick/test_brick_lvm.py +++ b/cinder/tests/brick/test_brick_lvm.py @@ -79,6 +79,14 @@ class BrickLvmTestCase(test.TestCase): elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \ '-o, name,size,free,lv_count,uuid, ' \ '--separator, :, --nosuffix' in cmd_string: + data = (" test-prov-cap-vg-unit:10.00:10.00:0:" + "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") + if 'test-prov-cap-vg-unit' in cmd_string: + return (data, "") + data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" + "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") + if 'test-prov-cap-vg-no-unit' in cmd_string: + return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: @@ -98,8 +106,19 @@ class BrickLvmTestCase(test.TestCase): raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found." ) - data = " fake-vg fake-1 1.00g\n" - data += " fake-vg fake-2 1.00g\n" + if 'test-prov-cap-vg-unit' in cmd_string: + data = " fake-vg test-prov-cap-pool-unit 9.50g\n" + data += " fake-vg fake-volume-1 1.00g\n" + data += " fake-vg fake-volume-2 2.00g\n" + elif 'test-prov-cap-vg-no-unit' in cmd_string: + data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" + data += " fake-vg fake-volume-1 1.00\n" + data += " fake-vg fake-volume-2 2.00\n" + elif 'test-found-lv-name' in cmd_string: + data = " fake-vg test-found-lv-name 9.50\n" + else: + data = " fake-vg fake-1 1.00g\n" + data += " fake-vg fake-2 1.00g\n" elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in cmd_string): if 'test-volumes' in cmd_string: @@ -113,7 +132,10 @@ class BrickLvmTestCase(test.TestCase): data += " fake-vg-2:/dev/sdd:10.00:9.99\n" elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \ ', -o, size,data_percent, --separator, :' in cmd_string: - data = " 9:12\n" + if 'test-prov-cap-pool' in cmd_string: + data = " 9.5:20\n" + else: + data = " 9:12\n" elif 'lvcreate, -T, -L, ' in cmd_string: pass elif 'lvcreate, -T, -V, ' in cmd_string: @@ -161,9 +183,28 @@ class BrickLvmTestCase(test.TestCase): def test_get_lv_info_notfound(self): self.assertEqual( + [], + self.vg.get_lv_info( + 'sudo', vg_name='fake-vg', lv_name='lv-nothere') + ) + + def test_get_lv_info_found(self): + lv_info = [{'size': '9.50', 'name': 'test-found-lv-name', + 'vg': 'fake-vg'}] + self.assertEqual( + lv_info, self.vg.get_lv_info( - 'sudo', vg_name='fake-vg', lv_name='lv-nothere'), - [] + 'sudo', vg_name='fake-vg', + lv_name='test-found-lv-name') + ) + + def test_get_lv_info_no_lv_name(self): + lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, + {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}] + self.assertEqual( + lv_info, + self.vg.get_lv_info( + 'sudo', vg_name='fake-vg') ) def test_get_all_physical_volumes(self): @@ -238,6 +279,25 @@ class BrickLvmTestCase(test.TestCase): for size in ("1g", "1.2g", "1.75g"): self.assertEqual(size, self.vg.create_thin_pool(size_str=size)) + def test_thin_pool_provisioned_capacity(self): + self.vg.vg_thin_pool = "test-prov-cap-pool-unit" + self.vg.vg_name = 'test-prov-cap-vg-unit' + self.assertEqual( + "9.5g", + self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) + self.assertEqual("9.50", self.vg.vg_thin_pool_size) + self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) + self.assertEqual(3.0, self.vg.vg_provisioned_capacity) + + self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit" + self.vg.vg_name = 'test-prov-cap-vg-no-unit' + self.assertEqual( + "9.5g", + self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) + self.assertEqual("9.50", self.vg.vg_thin_pool_size) + self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) + self.assertEqual(3.0, self.vg.vg_provisioned_capacity) + def test_thin_pool_free_space(self): # The size of fake-vg-pool is 9g and the allocated data sums up to # 12% so the calculated free space should be 7.92 diff --git a/cinder/tests/scheduler/test_host_manager.py b/cinder/tests/scheduler/test_host_manager.py index eea6bcc8c..38f58c133 100644 --- a/cinder/tests/scheduler/test_host_manager.py +++ b/cinder/tests/scheduler/test_host_manager.py @@ -257,13 +257,16 @@ class HostManagerTestCase(test.TestCase): mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, - timestamp=None, reserved_percentage=0), + timestamp=None, reserved_percentage=0, + provisioned_capacity_gb=312), 'host2@back1': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, - timestamp=None, reserved_percentage=0), + timestamp=None, reserved_percentage=0, + provisioned_capacity_gb=156), 'host2@back2': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, - timestamp=None, reserved_percentage=0), + timestamp=None, reserved_percentage=0, + provisioned_capacity_gb=9300), } _mock_service_get_all_by_topic.return_value = services @@ -289,7 +292,8 @@ class HostManagerTestCase(test.TestCase): 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, - 'storage_protocol': None}, + 'storage_protocol': None, + 'provisioned_capacity_gb': 312}, }, { 'name': 'host2@back1#BBB', @@ -301,7 +305,8 @@ class HostManagerTestCase(test.TestCase): 'total_capacity_gb': 256, 'reserved_percentage': 0, 'vendor_name': None, - 'storage_protocol': None}, + 'storage_protocol': None, + 'provisioned_capacity_gb': 156}, }, { 'name': 'host2@back2#CCC', @@ -313,7 +318,8 @@ class HostManagerTestCase(test.TestCase): 'total_capacity_gb': 10000, 'reserved_percentage': 0, 'vendor_name': None, - 'storage_protocol': None}, + 'storage_protocol': None, + 'provisioned_capacity_gb': 9300}, } ] self.assertEqual(len(expected), len(res)) @@ -329,16 +335,19 @@ class HostStateTestCase(test.TestCase): volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, + 'provisioned_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized - self.assertEqual(fake_host.total_capacity_gb, 0) - self.assertEqual(fake_host.free_capacity_gb, None) + self.assertEqual(0, fake_host.total_capacity_gb) + self.assertEqual(None, fake_host.free_capacity_gb) # Pool stats has been updated - self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 1024) - self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, 512) + self.assertEqual(1024, fake_host.pools['_pool0'].total_capacity_gb) + self.assertEqual(512, fake_host.pools['_pool0'].free_capacity_gb) + self.assertEqual(512, + fake_host.pools['_pool0'].provisioned_capacity_gb) # Test update for existing host state volume_capability.update(dict(total_capacity_gb=1000)) @@ -348,8 +357,10 @@ class HostStateTestCase(test.TestCase): # Test update for existing host state with different backend name volume_capability.update(dict(volume_backend_name='magic')) fake_host.update_from_volume_capability(volume_capability) - self.assertEqual(fake_host.pools['magic'].total_capacity_gb, 1000) - self.assertEqual(fake_host.pools['magic'].free_capacity_gb, 512) + self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb) + self.assertEqual(512, fake_host.pools['magic'].free_capacity_gb) + self.assertEqual(512, + fake_host.pools['magic'].provisioned_capacity_gb) # 'pool0' becomes nonactive pool, and is deleted self.assertRaises(KeyError, lambda: fake_host.pools['pool0']) @@ -366,6 +377,7 @@ class HostStateTestCase(test.TestCase): 'total_capacity_gb': 500, 'free_capacity_gb': 230, 'allocated_capacity_gb': 270, + 'provisioned_capacity_gb': 270, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 100, @@ -377,6 +389,7 @@ class HostStateTestCase(test.TestCase): 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, + 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 200, @@ -401,10 +414,14 @@ class HostStateTestCase(test.TestCase): # Pool stats has been updated self.assertEqual(len(fake_host.pools), 2) - self.assertEqual(fake_host.pools['1st pool'].total_capacity_gb, 500) - self.assertEqual(fake_host.pools['1st pool'].free_capacity_gb, 230) - self.assertEqual(fake_host.pools['2nd pool'].total_capacity_gb, 1024) - self.assertEqual(fake_host.pools['2nd pool'].free_capacity_gb, 1024) + self.assertEqual(500, fake_host.pools['1st pool'].total_capacity_gb) + self.assertEqual(230, fake_host.pools['1st pool'].free_capacity_gb) + self.assertEqual(270, + fake_host.pools['1st pool'].provisioned_capacity_gb) + self.assertEqual(1024, fake_host.pools['2nd pool'].total_capacity_gb) + self.assertEqual(1024, fake_host.pools['2nd pool'].free_capacity_gb) + self.assertEqual(0, + fake_host.pools['2nd pool'].provisioned_capacity_gb) capability = { 'volume_backend_name': 'Local iSCSI', @@ -416,6 +433,7 @@ class HostStateTestCase(test.TestCase): 'total_capacity_gb': 10000, 'free_capacity_gb': 10000, 'allocated_capacity_gb': 0, + 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, }, @@ -434,8 +452,10 @@ class HostStateTestCase(test.TestCase): self.assertRaises(KeyError, lambda: fake_host.pools['1st pool']) self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool']) - self.assertEqual(fake_host.pools['3rd pool'].total_capacity_gb, 10000) - self.assertEqual(fake_host.pools['3rd pool'].free_capacity_gb, 10000) + self.assertEqual(10000, fake_host.pools['3rd pool'].total_capacity_gb) + self.assertEqual(10000, fake_host.pools['3rd pool'].free_capacity_gb) + self.assertEqual(0, + fake_host.pools['3rd pool'].provisioned_capacity_gb) def test_update_from_volume_infinite_capability(self): fake_host = host_manager.HostState('host1') @@ -484,10 +504,12 @@ class HostStateTestCase(test.TestCase): self.assertEqual(fake_host.total_capacity_gb, 0) self.assertEqual(fake_host.free_capacity_gb, None) # Pool stats has been updated - self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, - 0) - self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, - 0) + self.assertEqual(0, + fake_host.pools['_pool0'].total_capacity_gb) + self.assertEqual(0, + fake_host.pools['_pool0'].free_capacity_gb) + self.assertEqual(0, + fake_host.pools['_pool0'].provisioned_capacity_gb) class PoolStateTestCase(test.TestCase): @@ -500,14 +522,17 @@ class PoolStateTestCase(test.TestCase): volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, + 'provisioned_capacity_gb': 512, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'} fake_pool.update_from_volume_capability(volume_capability) - self.assertEqual(fake_pool.host, 'host1#pool0') - self.assertEqual(fake_pool.pool_name, 'pool0') - self.assertEqual(fake_pool.total_capacity_gb, 1024) - self.assertEqual(fake_pool.free_capacity_gb, 512) + self.assertEqual('host1#pool0', fake_pool.host) + self.assertEqual('pool0', fake_pool.pool_name) + self.assertEqual(1024, fake_pool.total_capacity_gb) + self.assertEqual(512, fake_pool.free_capacity_gb) + self.assertEqual(512, + fake_pool.provisioned_capacity_gb) self.assertDictMatch(fake_pool.capabilities, volume_capability) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index f5ea37e51..b98ff16df 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -4113,6 +4113,8 @@ class ISCSITestCase(DriverTestCase): stats['pools'][0]['total_capacity_gb'], float('5.52')) self.assertEqual( stats['pools'][0]['free_capacity_gb'], float('0.52')) + self.assertEqual( + stats['pools'][0]['provisioned_capacity_gb'], float('5.0')) def test_validate_connector(self): iscsi_driver =\ @@ -4177,6 +4179,8 @@ class ISERTestCase(DriverTestCase): stats['pools'][0]['total_capacity_gb'], float('5.52')) self.assertEqual( stats['pools'][0]['free_capacity_gb'], float('0.52')) + self.assertEqual( + stats['pools'][0]['provisioned_capacity_gb'], float('5.0')) self.assertEqual(stats['storage_protocol'], 'iSER') @test.testtools.skip("SKIP until ISER driver is removed or fixed") @@ -4189,6 +4193,8 @@ class ISERTestCase(DriverTestCase): stats['pools'][0]['total_capacity_gb'], 0) self.assertEqual( stats['pools'][0]['free_capacity_gb'], 0) + self.assertEqual( + stats['pools'][0]['provisioned_capacity_gb'], float('5.0')) self.assertEqual(stats['storage_protocol'], 'iSER') diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index dfdf93f2e..cfa8f30e9 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -1117,6 +1117,7 @@ class ISCSIDriver(VolumeDriver): pool_name=pool, total_capacity_gb=0, free_capacity_gb=0, + provisioned_capacity_gb=0, reserved_percentage=100, QoS_support=False )) @@ -1128,6 +1129,7 @@ class ISCSIDriver(VolumeDriver): pool_name=data["volume_backend_name"], total_capacity_gb=0, free_capacity_gb=0, + provisioned_capacity_gb=0, reserved_percentage=100, QoS_support=False )) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index 5c10a866d..968599e00 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -189,12 +189,17 @@ class LVMVolumeDriver(driver.VolumeDriver): self.vg.vg_mirror_size(self.configuration.lvm_mirrors) free_capacity =\ self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) + provisioned_capacity = round( + float(total_capacity) - float(free_capacity), 2) elif self.configuration.lvm_type == 'thin': total_capacity = self.vg.vg_thin_pool_size free_capacity = self.vg.vg_thin_pool_free_space + provisioned_capacity = self.vg.vg_provisioned_capacity else: total_capacity = self.vg.vg_size free_capacity = self.vg.vg_free_space + provisioned_capacity = round( + float(total_capacity) - float(free_capacity), 2) location_info = \ ('LVMVolumeDriver:%(hostname)s:%(vg)s' @@ -211,6 +216,7 @@ class LVMVolumeDriver(driver.VolumeDriver): pool_name=data["volume_backend_name"], total_capacity_gb=total_capacity, free_capacity_gb=free_capacity, + provisioned_capacity_gb=provisioned_capacity, reserved_percentage=self.configuration.reserved_percentage, location_info=location_info, QoS_support=False,