self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
+ self.vg_provisioned_capacity = 0.0
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
+ total_vols_size = 0.0
if self.vg_thin_pool is not None:
+ # NOTE(xyang): If providing only self.vg_name,
+ # get_lv_info will output info on the thin pool and all
+ # individual volumes.
+ # get_lv_info(self._root_helper, 'stack-vg')
+ # sudo lvs --noheadings --unit=g -o vg_name,name,size
+ # --nosuffix stack-vg
+ # stack-vg stack-pool 9.51
+ # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
+ # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
+ # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
+ #
+ # If providing both self.vg_name and self.vg_thin_pool,
+ # get_lv_info will output only info on the thin pool, but not
+ # individual volumes.
+ # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
+ # sudo lvs --noheadings --unit=g -o vg_name,name,size
+ # --nosuffix stack-vg/stack-pool
+ # stack-vg stack-pool 9.51
+ #
+ # We need info on both the thin pool and the volumes,
+ # therefore we should provide only self.vg_name, but not
+ # self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
- self.vg_name,
- self.vg_thin_pool):
+ self.vg_name):
+ lvsize = lv['size']
+ # get_lv_info runs "lvs" command with "--nosuffix".
+ # This removes "g" from "1.00g" and only outputs "1.00".
+ # Running "lvs" command without "--nosuffix" will output
+ # "1.00g" if "g" is the unit.
+ # Remove the unit if it is in lv['size'].
+ if not lv['size'][-1].isdigit():
+ lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
- self.vg_thin_pool_size = lv['size']
+ self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
+ else:
+ total_vols_size = total_vols_size + float(lvsize)
+ total_vols_size = round(total_vols_size, 2)
+
+ self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
self.allocated_capacity_gb = 0
self.free_capacity_gb = None
self.reserved_percentage = 0
+ # The apparent allocated space indicating how much capacity
+ # has been provisioned. This could be the sum of sizes of
+ # all volumes on a backend, which could be greater than or
+ # equal to the allocated_capacity_gb.
+ self.provisioned_capacity_gb = 0
# PoolState for all pools
self.pools = {}
"""Incrementally update host state from an volume."""
volume_gb = volume['size']
self.allocated_capacity_gb += volume_gb
+ self.provisioned_capacity_gb += volume_gb
if self.free_capacity_gb == 'infinite':
# There's virtually infinite space on back-end
pass
'allocated_capacity_gb', 0)
self.QoS_support = capability.get('QoS_support', False)
self.reserved_percentage = capability.get('reserved_percentage', 0)
+ # provisioned_capacity_gb is the apparent total capacity of
+ # all the volumes created on a backend, which is greater than
+ # or equal to allocated_capacity_gb, which is the apparent
+ # total capacity of all the volumes created on a backend
+ # in Cinder. Using allocated_capacity_gb as the default of
+ # provisioned_capacity_gb if it is not set.
+ self.provisioned_capacity_gb = capability.get(
+ 'provisioned_capacity_gb', self.allocated_capacity_gb)
def update_pools(self, capability):
# Do nothing, since we don't have pools within pool, yet
elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
+ data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
+ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
+ if 'test-prov-cap-vg-unit' in cmd_string:
+ return (data, "")
+ data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:"
+ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
+ if 'test-prov-cap-vg-no-unit' in cmd_string:
+ return (data, "")
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
raise processutils.ProcessExecutionError(
stderr="One or more volume(s) not found."
)
- data = " fake-vg fake-1 1.00g\n"
- data += " fake-vg fake-2 1.00g\n"
+ if 'test-prov-cap-vg-unit' in cmd_string:
+ data = " fake-vg test-prov-cap-pool-unit 9.50g\n"
+ data += " fake-vg fake-volume-1 1.00g\n"
+ data += " fake-vg fake-volume-2 2.00g\n"
+ elif 'test-prov-cap-vg-no-unit' in cmd_string:
+ data = " fake-vg test-prov-cap-pool-no-unit 9.50\n"
+ data += " fake-vg fake-volume-1 1.00\n"
+ data += " fake-vg fake-volume-2 2.00\n"
+ elif 'test-found-lv-name' in cmd_string:
+ data = " fake-vg test-found-lv-name 9.50\n"
+ else:
+ data = " fake-vg fake-1 1.00g\n"
+ data += " fake-vg fake-2 1.00g\n"
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data += " fake-vg-2:/dev/sdd:10.00:9.99\n"
elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
- data = " 9:12\n"
+ if 'test-prov-cap-pool' in cmd_string:
+ data = " 9.5:20\n"
+ else:
+ data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
def test_get_lv_info_notfound(self):
self.assertEqual(
+ [],
+ self.vg.get_lv_info(
+ 'sudo', vg_name='fake-vg', lv_name='lv-nothere')
+ )
+
+ def test_get_lv_info_found(self):
+ lv_info = [{'size': '9.50', 'name': 'test-found-lv-name',
+ 'vg': 'fake-vg'}]
+ self.assertEqual(
+ lv_info,
self.vg.get_lv_info(
- 'sudo', vg_name='fake-vg', lv_name='lv-nothere'),
- []
+ 'sudo', vg_name='fake-vg',
+ lv_name='test-found-lv-name')
+ )
+
+ def test_get_lv_info_no_lv_name(self):
+ lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
+ {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}]
+ self.assertEqual(
+ lv_info,
+ self.vg.get_lv_info(
+ 'sudo', vg_name='fake-vg')
)
def test_get_all_physical_volumes(self):
for size in ("1g", "1.2g", "1.75g"):
self.assertEqual(size, self.vg.create_thin_pool(size_str=size))
+ def test_thin_pool_provisioned_capacity(self):
+ self.vg.vg_thin_pool = "test-prov-cap-pool-unit"
+ self.vg.vg_name = 'test-prov-cap-vg-unit'
+ self.assertEqual(
+ "9.5g",
+ self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
+ self.assertEqual("9.50", self.vg.vg_thin_pool_size)
+ self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
+ self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
+
+ self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit"
+ self.vg.vg_name = 'test-prov-cap-vg-no-unit'
+ self.assertEqual(
+ "9.5g",
+ self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
+ self.assertEqual("9.50", self.vg.vg_thin_pool_size)
+ self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
+ self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
+
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
mocked_service_states = {
'host1': dict(volume_backend_name='AAA',
total_capacity_gb=512, free_capacity_gb=200,
- timestamp=None, reserved_percentage=0),
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=312),
'host2@back1': dict(volume_backend_name='BBB',
total_capacity_gb=256, free_capacity_gb=100,
- timestamp=None, reserved_percentage=0),
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=156),
'host2@back2': dict(volume_backend_name='CCC',
total_capacity_gb=10000, free_capacity_gb=700,
- timestamp=None, reserved_percentage=0),
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=9300),
}
_mock_service_get_all_by_topic.return_value = services
'total_capacity_gb': 512,
'reserved_percentage': 0,
'vendor_name': None,
- 'storage_protocol': None},
+ 'storage_protocol': None,
+ 'provisioned_capacity_gb': 312},
},
{
'name': 'host2@back1#BBB',
'total_capacity_gb': 256,
'reserved_percentage': 0,
'vendor_name': None,
- 'storage_protocol': None},
+ 'storage_protocol': None,
+ 'provisioned_capacity_gb': 156},
},
{
'name': 'host2@back2#CCC',
'total_capacity_gb': 10000,
'reserved_percentage': 0,
'vendor_name': None,
- 'storage_protocol': None},
+ 'storage_protocol': None,
+ 'provisioned_capacity_gb': 9300},
}
]
self.assertEqual(len(expected), len(res))
volume_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512,
+ 'provisioned_capacity_gb': 512,
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
# Backend level stats remain uninitialized
- self.assertEqual(fake_host.total_capacity_gb, 0)
- self.assertEqual(fake_host.free_capacity_gb, None)
+ self.assertEqual(0, fake_host.total_capacity_gb)
+ self.assertEqual(None, fake_host.free_capacity_gb)
# Pool stats has been updated
- self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 1024)
- self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, 512)
+ self.assertEqual(1024, fake_host.pools['_pool0'].total_capacity_gb)
+ self.assertEqual(512, fake_host.pools['_pool0'].free_capacity_gb)
+ self.assertEqual(512,
+ fake_host.pools['_pool0'].provisioned_capacity_gb)
# Test update for existing host state
volume_capability.update(dict(total_capacity_gb=1000))
# Test update for existing host state with different backend name
volume_capability.update(dict(volume_backend_name='magic'))
fake_host.update_from_volume_capability(volume_capability)
- self.assertEqual(fake_host.pools['magic'].total_capacity_gb, 1000)
- self.assertEqual(fake_host.pools['magic'].free_capacity_gb, 512)
+ self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb)
+ self.assertEqual(512, fake_host.pools['magic'].free_capacity_gb)
+ self.assertEqual(512,
+ fake_host.pools['magic'].provisioned_capacity_gb)
# 'pool0' becomes nonactive pool, and is deleted
self.assertRaises(KeyError, lambda: fake_host.pools['pool0'])
'total_capacity_gb': 500,
'free_capacity_gb': 230,
'allocated_capacity_gb': 270,
+ 'provisioned_capacity_gb': 270,
'QoS_support': 'False',
'reserved_percentage': 0,
'dying_disks': 100,
'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
+ 'provisioned_capacity_gb': 0,
'QoS_support': 'False',
'reserved_percentage': 0,
'dying_disks': 200,
# Pool stats has been updated
self.assertEqual(len(fake_host.pools), 2)
- self.assertEqual(fake_host.pools['1st pool'].total_capacity_gb, 500)
- self.assertEqual(fake_host.pools['1st pool'].free_capacity_gb, 230)
- self.assertEqual(fake_host.pools['2nd pool'].total_capacity_gb, 1024)
- self.assertEqual(fake_host.pools['2nd pool'].free_capacity_gb, 1024)
+ self.assertEqual(500, fake_host.pools['1st pool'].total_capacity_gb)
+ self.assertEqual(230, fake_host.pools['1st pool'].free_capacity_gb)
+ self.assertEqual(270,
+ fake_host.pools['1st pool'].provisioned_capacity_gb)
+ self.assertEqual(1024, fake_host.pools['2nd pool'].total_capacity_gb)
+ self.assertEqual(1024, fake_host.pools['2nd pool'].free_capacity_gb)
+ self.assertEqual(0,
+ fake_host.pools['2nd pool'].provisioned_capacity_gb)
capability = {
'volume_backend_name': 'Local iSCSI',
'total_capacity_gb': 10000,
'free_capacity_gb': 10000,
'allocated_capacity_gb': 0,
+ 'provisioned_capacity_gb': 0,
'QoS_support': 'False',
'reserved_percentage': 0,
},
self.assertRaises(KeyError, lambda: fake_host.pools['1st pool'])
self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool'])
- self.assertEqual(fake_host.pools['3rd pool'].total_capacity_gb, 10000)
- self.assertEqual(fake_host.pools['3rd pool'].free_capacity_gb, 10000)
+ self.assertEqual(10000, fake_host.pools['3rd pool'].total_capacity_gb)
+ self.assertEqual(10000, fake_host.pools['3rd pool'].free_capacity_gb)
+ self.assertEqual(0,
+ fake_host.pools['3rd pool'].provisioned_capacity_gb)
def test_update_from_volume_infinite_capability(self):
fake_host = host_manager.HostState('host1')
self.assertEqual(fake_host.total_capacity_gb, 0)
self.assertEqual(fake_host.free_capacity_gb, None)
# Pool stats has been updated
- self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
- 0)
- self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
- 0)
+ self.assertEqual(0,
+ fake_host.pools['_pool0'].total_capacity_gb)
+ self.assertEqual(0,
+ fake_host.pools['_pool0'].free_capacity_gb)
+ self.assertEqual(0,
+ fake_host.pools['_pool0'].provisioned_capacity_gb)
class PoolStateTestCase(test.TestCase):
volume_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512,
'reserved_percentage': 0,
+ 'provisioned_capacity_gb': 512,
'timestamp': None,
'cap1': 'val1',
'cap2': 'val2'}
fake_pool.update_from_volume_capability(volume_capability)
- self.assertEqual(fake_pool.host, 'host1#pool0')
- self.assertEqual(fake_pool.pool_name, 'pool0')
- self.assertEqual(fake_pool.total_capacity_gb, 1024)
- self.assertEqual(fake_pool.free_capacity_gb, 512)
+ self.assertEqual('host1#pool0', fake_pool.host)
+ self.assertEqual('pool0', fake_pool.pool_name)
+ self.assertEqual(1024, fake_pool.total_capacity_gb)
+ self.assertEqual(512, fake_pool.free_capacity_gb)
+ self.assertEqual(512,
+ fake_pool.provisioned_capacity_gb)
self.assertDictMatch(fake_pool.capabilities, volume_capability)
stats['pools'][0]['total_capacity_gb'], float('5.52'))
self.assertEqual(
stats['pools'][0]['free_capacity_gb'], float('0.52'))
+ self.assertEqual(
+ stats['pools'][0]['provisioned_capacity_gb'], float('5.0'))
def test_validate_connector(self):
iscsi_driver =\
stats['pools'][0]['total_capacity_gb'], float('5.52'))
self.assertEqual(
stats['pools'][0]['free_capacity_gb'], float('0.52'))
+ self.assertEqual(
+ stats['pools'][0]['provisioned_capacity_gb'], float('5.0'))
self.assertEqual(stats['storage_protocol'], 'iSER')
@test.testtools.skip("SKIP until ISER driver is removed or fixed")
stats['pools'][0]['total_capacity_gb'], 0)
self.assertEqual(
stats['pools'][0]['free_capacity_gb'], 0)
+ self.assertEqual(
+ stats['pools'][0]['provisioned_capacity_gb'], float('5.0'))
self.assertEqual(stats['storage_protocol'], 'iSER')
pool_name=pool,
total_capacity_gb=0,
free_capacity_gb=0,
+ provisioned_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
pool_name=data["volume_backend_name"],
total_capacity_gb=0,
free_capacity_gb=0,
+ provisioned_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
+ provisioned_capacity = round(
+ float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
+ provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
+ provisioned_capacity = round(
+ float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
+ provisioned_capacity_gb=provisioned_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,