This patch adds support for over subscription in thin provisioning.
The following changes are proposed:
* A configuration option "max_over_subscription_ratio" will be
introduced.
* Driver reports the following capacities and ratios:
* provisioned_capacity
* max_over_subscription_ratio
* Driver can use the newly added configuration option to report
this ratio or it can decide what ratio to report itself.
The value of this ratio is depending on driver implementation
and will be reported together with other capabilities and
capacities by the driver.
* reserved_percentage
* Note: This is an existing parameter reported by the driver.
* Currently it is measured against the free capacity. In this
patch, it will be changed to measure against the total
capacity in the filter scheduler.
* Driver also reports the following capabilities:
* thin_provisioning_support (True or False)
* thick_provisioning_support (True or False)
* Scheduler will use the above new capabilities reported by the
driver to make decisions when choosing a backend.
For more details, please see Cinder spec:
https://review.openstack.org/#/c/129342/12/specs/kilo/
over-subscription-in-thin-provisioning.rst
Implements: blueprint over-subscription-in-thin-provisioning
Change-Id: I176a691a4e25bbdc7c4d598628acf2543b2c7ac6
# Copyright (c) 2012 Intel
# Copyright (c) 2012 OpenStack Foundation
+# Copyright (c) 2015 EMC Corporation
#
# All Rights Reserved.
#
return False
free_space = host_state.free_capacity_gb
- if free_space == 'infinite' or free_space == 'unknown':
+ total_space = host_state.total_capacity_gb
+ reserved = float(host_state.reserved_percentage) / 100
+ if free_space in ['infinite', 'unknown']:
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
- reserved = float(host_state.reserved_percentage) / 100
- free = math.floor(free_space * (1 - reserved))
+ elif total_space in ['infinite', 'unknown']:
+ # If total_space is 'infinite' or 'unknown' and reserved
+ # is 0, we assume the back-ends can serve the request.
+ # If total_space is 'infinite' or 'unknown' and reserved
+ # is not 0, we cannot calculate the reserved space.
+ # float(total_space) will throw an exception. total*reserved
+ # also won't work. So the back-ends cannot serve the request.
+ if reserved == 0:
+ return True
+ return False
+ total = float(total_space)
+ if total <= 0:
+ LOG.warning(_LW("Insufficient free space for volume creation. "
+ "Total capacity is %(total).2f on host %(host)s."),
+ {"total": total,
+ "host": host_state.host})
+ return False
+ # Calculate how much free space is left after taking into account
+ # the reserved space.
+ free = free_space - math.floor(total * reserved)
msg_args = {"host": host_state.host,
"requested": volume_size,
if free < volume_size:
LOG.warning(_LW("Insufficient free space for volume creation "
"on host %(host)s (requested / avail): "
- "%(requested)s/%(available)s") % msg_args)
+ "%(requested)s/%(available)s"), msg_args)
+ return free >= volume_size
else:
- LOG.debug("Sufficient free space for volume creation "
+ LOG.debug("Space information for volume creation "
"on host %(host)s (requested / avail): "
- "%(requested)s/%(available)s" % msg_args)
+ "%(requested)s/%(available)s", msg_args)
+
+ # Only evaluate using max_over_subscription_ratio if
+ # thin_provisioning_support is True. Check if the ratio of
+ # provisioned capacity over total capacity has exceeded over
+ # subscription ratio.
+ if (host_state.thin_provisioning_support and
+ host_state.max_over_subscription_ratio >= 1):
+ provisioned_ratio = ((host_state.provisioned_capacity_gb +
+ volume_size) / total)
+ if provisioned_ratio >= host_state.max_over_subscription_ratio:
+ LOG.warning(_LW(
+ "Insufficient free space for thin provisioning. "
+ "The ratio of provisioned capacity over total capacity "
+ "%(provisioned_ratio).2f has exceeded the maximum over "
+ "subscription ratio %(oversub_ratio).2f on host "
+ "%(host)s."),
+ {"provisioned_ratio": provisioned_ratio,
+ "oversub_ratio": host_state.max_over_subscription_ratio,
+ "host": host_state.host})
+ return False
+ else:
+ free_virtual = free * host_state.max_over_subscription_ratio
+ return free_virtual >= volume_size
return free >= volume_size
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager')
+CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver')
LOG = logging.getLogger(__name__)
# all volumes on a backend, which could be greater than or
# equal to the allocated_capacity_gb.
self.provisioned_capacity_gb = 0
+ self.max_over_subscription_ratio = 1.0
+ self.thin_provisioning_support = False
+ self.thick_provisioning_support = False
# PoolState for all pools
self.pools = {}
# provisioned_capacity_gb if it is not set.
self.provisioned_capacity_gb = capability.get(
'provisioned_capacity_gb', self.allocated_capacity_gb)
+ self.max_over_subscription_ratio = capability.get(
+ 'max_over_subscription_ratio',
+ CONF.max_over_subscription_ratio)
+ self.thin_provisioning_support = capability.get(
+ 'thin_provisioning_support', False)
+ self.thick_provisioning_support = capability.get(
+ 'thick_provisioning_support', False)
def update_pools(self, capability):
# Do nothing, since we don't have pools within pool, yet
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2012 OpenStack Foundation
+# Copyright (c) 2015 EMC Corporation
+#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Weighers that weigh hosts by their capacity, including following two
weighers:
-1. Capacity Weigher. Weigh hosts by their available capacity.
+1. Capacity Weigher. Weigh hosts by their virtual or actual free capacity.
+
+For thin provisioning, weigh hosts by their virtual free capacity calculated
+by the total capacity multiplied by the max over subscription ratio and
+subtracting the provisioned capacity; Otherwise, weigh hosts by their actual
+free capacity, taking into account the reserved space.
The default is to spread volumes across all hosts evenly. If you prefer
stacking, you can set the 'capacity_weight_multiplier' option to a negative
"""Higher weights win. We want spreading to be the default."""
reserved = float(host_state.reserved_percentage) / 100
free_space = host_state.free_capacity_gb
- if free_space == 'infinite' or free_space == 'unknown':
+ total_space = host_state.total_capacity_gb
+ if (free_space == 'infinite' or free_space == 'unknown' or
+ total_space == 'infinite' or total_space == 'unknown'):
#(zhiteng) 'infinite' and 'unknown' are treated the same
# here, for sorting purpose.
# capacity anymore.
free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf')
else:
- free = math.floor(host_state.free_capacity_gb * (1 - reserved))
+ total = float(total_space)
+ if host_state.thin_provisioning_support:
+ # Calculate virtual free capacity for thin provisioning.
+ free = (total * host_state.max_over_subscription_ratio
+ - host_state.provisioned_capacity_gb -
+ math.floor(total * reserved))
+ else:
+ # Calculate how much free space is left after taking into
+ # account the reserved space.
+ free = free_space - math.floor(total * reserved)
return free
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
+ 'provisioned_capacity_gb': 0,
+ 'max_over_subscription_ratio': 1.0,
+ 'thin_provisioning_support': False,
+ 'thick_provisioning_support': True,
'reserved_percentage': 10,
'volume_backend_name': 'lvm1',
'timestamp': None},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
+ 'provisioned_capacity_gb': 1748,
+ 'max_over_subscription_ratio': 1.5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
'reserved_percentage': 10,
'volume_backend_name': 'lvm2',
'timestamp': None},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
+ 'provisioned_capacity_gb': 256,
+ 'max_over_subscription_ratio': 2.0,
+ 'thin_provisioning_support': False,
+ 'thick_provisioning_support': True,
'reserved_percentage': 0,
'volume_backend_name': 'lvm3',
'timestamp': None},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
'allocated_capacity_gb': 1848,
+ 'provisioned_capacity_gb': 2047,
+ 'max_over_subscription_ratio': 1.0,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
'reserved_percentage': 5,
'volume_backend_name': 'lvm4',
'timestamp': None,
'host5': {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
+ 'provisioned_capacity_gb': 1548,
+ 'max_over_subscription_ratio': 1.0,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': None},
}
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
- weight_properties = {}
+ weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects([CapacityWeigher],
hosts,
weight_properties)[0]
ctxt, CONF.volume_topic, disabled=disabled)
return host_states
+ # If thin_provisioning_support = False, use the following formula:
+ # free = free_space - math.floor(total * reserved)
+ # Otherwise, use the following formula:
+ # free = (total * host_state.max_over_subscription_ratio
+ # - host_state.provisioned_capacity_gb
+ # - math.floor(total * reserved))
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
- # host1: free_capacity_gb=1024, free=1024*(1-0.1)
- # host2: free_capacity_gb=300, free=300*(1-0.1)
- # host3: free_capacity_gb=512, free=256
- # host4: free_capacity_gb=200, free=200*(1-0.05)
+ # host1: thin_provisioning_support = False
+ # free_capacity_gb=1024,
+ # free=1024-math.floor(1024*0.1)=922
+ # host2: thin_provisioning_support = True
+ # free_capacity_gb=300,
+ # free=2048*1.5-1748-math.floor(2048*0.1)=1120
+ # host3: thin_provisioning_support = False
+ # free_capacity_gb=512, free=256-512*0=256
+ # host4: thin_provisioning_support = True
+ # free_capacity_gb=200,
+ # free=2048*1.0-2047-math.floor(2048*0.05)=-101
# host5: free_capacity_gb=unknown free=-1
- # so, host1 should win:
+ # so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(weighed_host.weight, 921.0)
+ self.assertEqual(weighed_host.weight, 1120.0)
self.assertEqual(
- utils.extract_host(weighed_host.obj.host), 'host1')
+ utils.extract_host(weighed_host.obj.host), 'host2')
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
- # host1: free_capacity_gb=1024, free=-1024*(1-0.1)
- # host2: free_capacity_gb=300, free=-300*(1-0.1)
- # host3: free_capacity_gb=512, free=-256
- # host4: free_capacity_gb=200, free=-200*(1-0.05)
+ # host1: thin_provisioning_support = False
+ # free_capacity_gb=1024,
+ # free=-(1024-math.floor(1024*0.1))=-922
+ # host2: thin_provisioning_support = True
+ # free_capacity_gb=300,
+ # free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
+ # host3: thin_provisioning_support = False
+ # free_capacity_gb=512, free=-(256-512*0)=-256
+ # host4: thin_provisioning_support = True
+ # free_capacity_gb=200,
+ # free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# host5: free_capacity_gb=unknown free=-float('inf')
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(weighed_host.weight, -190.0)
+ self.assertEqual(weighed_host.weight, 101.0)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host4')
self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
- # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2
- # host2: free_capacity_gb=300, free=300*(1-0.1)*2
- # host3: free_capacity_gb=512, free=256*2
- # host4: free_capacity_gb=200, free=200*(1-0.05)*2
+ # host1: thin_provisioning_support = False
+ # free_capacity_gb=1024,
+ # free=(1024-math.floor(1024*0.1))*2=1844
+ # host2: thin_provisioning_support = True
+ # free_capacity_gb=300,
+ # free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
+ # host3: thin_provisioning_support = False
+ # free_capacity_gb=512, free=(256-512*0)*2=512
+ # host4: thin_provisioning_support = True
+ # free_capacity_gb=200,
+ # free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# host5: free_capacity_gb=unknown free=-2
- # so, host1 should win:
+ # so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(weighed_host.weight, 921.0 * 2)
+ self.assertEqual(weighed_host.weight, 1120.0 * 2)
self.assertEqual(
- utils.extract_host(weighed_host.obj.host), 'host1')
+ utils.extract_host(weighed_host.obj.host), 'host2')
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
- {'free_capacity_gb': 200,
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
- {'free_capacity_gb': 10,
+ {'total_capacity_gb': 100,
+ 'free_capacity_gb': 10,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
- {'free_capacity_gb': 120,
+ {'total_capacity_gb': 200,
+ 'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_thin_true_passes(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> False'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 200,
+ 'provisioned_capacity_gb': 500,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'updated_at': None,
+ 'service': service})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_thin_false_passes(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> False',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ # If "thin_provisioning_support" is False,
+ # "max_over_subscription_ratio" will be ignored.
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 200,
+ 'provisioned_capacity_gb': 300,
+ 'max_over_subscription_ratio': 1.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': False,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_over_subscription_fails(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> False'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 200,
+ 'provisioned_capacity_gb': 500,
+ 'max_over_subscription_ratio': 1.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_over_subscription_fails2(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 30,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> False'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 30,
+ 'provisioned_capacity_gb': 500,
+ 'max_over_subscription_ratio': 1.0,
+ 'reserved_percentage': 0,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> False'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 100,
+ 'provisioned_capacity_gb': 500,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> False',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ # If "thin_provisioning_support" is False,
+ # "max_over_subscription_ratio" will be ignored.
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 100,
+ 'provisioned_capacity_gb': 400,
+ 'max_over_subscription_ratio': 1.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': False,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 100,
+ 'provisioned_capacity_gb': 400,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 125,
+ 'provisioned_capacity_gb': 400,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 99,
+ 'provisioned_capacity_gb': 400,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 5,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100,
+ 'capabilities:thin_provisioning_support':
+ '<is> True',
+ 'capabilities:thick_provisioning_support':
+ '<is> True'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'total_capacity_gb': 500,
+ 'free_capacity_gb': 100,
+ 'provisioned_capacity_gb': 400,
+ 'max_over_subscription_ratio': 2.0,
+ 'reserved_percentage': 0,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': True,
+ 'updated_at': None,
+ 'service': service})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
class AffinityFilterTestCase(HostFiltersTestCase):
@mock.patch('cinder.utils.service_is_up')
default=False,
help='Tell driver to use SSL for connection to backend '
'storage if the driver supports it.'),
+ cfg.FloatOpt('max_over_subscription_ratio',
+ default=2.0,
+ help='Float representation of the over subscription ratio '
+ 'when thin provisioning is involved. Default ratio is '
+ '2.0, meaning provisioned capacity can be twice of the '
+ 'total physical capacity. If the ratio is 10.5, it '
+ 'means provisioned capacity can be 10.5 times of the '
+ 'total physical capacity. A ratio of 1.0 means '
+ 'provisioned capacity cannot exceed the total physical '
+ 'capacity. A ratio lower than 1.0 will be ignored and '
+ 'the default value will be used instead.'),
]
# for backward compatibility
data["storage_protocol"] = 'iSCSI'
data["pools"] = []
+ # provisioned_capacity_gb is set to None by default below, but
+ # None won't be used in calculation. It will be overridden by
+ # driver's provisioned_capacity_gb if reported, otherwise it
+ # defaults to allocated_capacity_gb in host_manager.py.
if self.pools:
for pool in self.pools:
new_pool = {}
pool_name=pool,
total_capacity_gb=0,
free_capacity_gb=0,
- provisioned_capacity_gb=0,
+ provisioned_capacity_gb=None,
reserved_percentage=100,
QoS_support=False
))
pool_name=data["volume_backend_name"],
total_capacity_gb=0,
free_capacity_gb=0,
- provisioned_capacity_gb=0,
+ provisioned_capacity_gb=None,
reserved_percentage=100,
QoS_support=False
))
total_capacity = 0
free_capacity = 0
+ thin_enabled = False
+
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
+ thin_enabled = True
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
- provisioned_capacity_gb=provisioned_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
+ provisioned_capacity_gb=provisioned_capacity,
+ max_over_subscription_ratio=(
+ self.configuration.max_over_subscription_ratio),
+ thin_provisioning_support=thin_enabled,
+ thick_provisioning_support=not thin_enabled,
))
data["pools"].append(single_pool)