]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Support over subscription in thin provisioning
authorXing Yang <xing.yang@emc.com>
Fri, 16 Jan 2015 21:27:23 +0000 (16:27 -0500)
committerXing Yang <xing.yang@emc.com>
Thu, 5 Feb 2015 04:22:34 +0000 (23:22 -0500)
This patch adds support for over subscription in thin provisioning.
The following changes are proposed:

* A configuration option "max_over_subscription_ratio" will be
  introduced.
* Driver reports the following capacities and ratios:
  * provisioned_capacity
  * max_over_subscription_ratio
    * Driver can use the newly added configuration option to report
      this ratio or it can decide what ratio to report itself.
      The value of this ratio is depending on driver implementation
      and will be reported together with other capabilities and
      capacities by the driver.
  * reserved_percentage
    * Note: This is an existing parameter reported by the driver.
    * Currently it is measured against the free capacity. In this
      patch, it will be changed to measure against the total
      capacity in the filter scheduler.
* Driver also reports the following capabilities:
  * thin_provisioning_support (True or False)
  * thick_provisioning_support (True or False)
* Scheduler will use the above new capabilities reported by the
  driver to make decisions when choosing a backend.

For more details, please see Cinder spec:
https://review.openstack.org/#/c/129342/12/specs/kilo/
over-subscription-in-thin-provisioning.rst

Implements: blueprint over-subscription-in-thin-provisioning
Change-Id: I176a691a4e25bbdc7c4d598628acf2543b2c7ac6

cinder/scheduler/filters/capacity_filter.py
cinder/scheduler/host_manager.py
cinder/scheduler/weights/capacity.py
cinder/tests/scheduler/fakes.py
cinder/tests/scheduler/test_capacity_weigher.py
cinder/tests/scheduler/test_host_filters.py
cinder/volume/driver.py
cinder/volume/drivers/lvm.py

index ec7b551dae6107882fc26ad26f713da56815b256..fb72d8536acc584b31ddfd37db2d8e1474cfa6a9 100644 (file)
@@ -1,5 +1,6 @@
 # Copyright (c) 2012 Intel
 # Copyright (c) 2012 OpenStack Foundation
+# Copyright (c) 2015 EMC Corporation
 #
 # All Rights Reserved.
 #
@@ -46,14 +47,34 @@ class CapacityFilter(filters.BaseHostFilter):
             return False
 
         free_space = host_state.free_capacity_gb
-        if free_space == 'infinite' or free_space == 'unknown':
+        total_space = host_state.total_capacity_gb
+        reserved = float(host_state.reserved_percentage) / 100
+        if free_space in ['infinite', 'unknown']:
             # NOTE(zhiteng) for those back-ends cannot report actual
             # available capacity, we assume it is able to serve the
             # request.  Even if it was not, the retry mechanism is
             # able to handle the failure by rescheduling
             return True
-        reserved = float(host_state.reserved_percentage) / 100
-        free = math.floor(free_space * (1 - reserved))
+        elif total_space in ['infinite', 'unknown']:
+            # If total_space is 'infinite' or 'unknown' and reserved
+            # is 0, we assume the back-ends can serve the request.
+            # If total_space is 'infinite' or 'unknown' and reserved
+            # is not 0, we cannot calculate the reserved space.
+            # float(total_space) will throw an exception. total*reserved
+            # also won't work. So the back-ends cannot serve the request.
+            if reserved == 0:
+                return True
+            return False
+        total = float(total_space)
+        if total <= 0:
+            LOG.warning(_LW("Insufficient free space for volume creation. "
+                            "Total capacity is %(total).2f on host %(host)s."),
+                        {"total": total,
+                         "host": host_state.host})
+            return False
+        # Calculate how much free space is left after taking into account
+        # the reserved space.
+        free = free_space - math.floor(total * reserved)
 
         msg_args = {"host": host_state.host,
                     "requested": volume_size,
@@ -61,10 +82,34 @@ class CapacityFilter(filters.BaseHostFilter):
         if free < volume_size:
             LOG.warning(_LW("Insufficient free space for volume creation "
                             "on host %(host)s (requested / avail): "
-                            "%(requested)s/%(available)s") % msg_args)
+                            "%(requested)s/%(available)s"), msg_args)
+            return free >= volume_size
         else:
-            LOG.debug("Sufficient free space for volume creation "
+            LOG.debug("Space information for volume creation "
                       "on host %(host)s (requested / avail): "
-                      "%(requested)s/%(available)s" % msg_args)
+                      "%(requested)s/%(available)s", msg_args)
+
+        # Only evaluate using max_over_subscription_ratio if
+        # thin_provisioning_support is True. Check if the ratio of
+        # provisioned capacity over total capacity has exceeded over
+        # subscription ratio.
+        if (host_state.thin_provisioning_support and
+                host_state.max_over_subscription_ratio >= 1):
+            provisioned_ratio = ((host_state.provisioned_capacity_gb +
+                                  volume_size) / total)
+            if provisioned_ratio >= host_state.max_over_subscription_ratio:
+                LOG.warning(_LW(
+                    "Insufficient free space for thin provisioning. "
+                    "The ratio of provisioned capacity over total capacity "
+                    "%(provisioned_ratio).2f has exceeded the maximum over "
+                    "subscription ratio %(oversub_ratio).2f on host "
+                    "%(host)s."),
+                    {"provisioned_ratio": provisioned_ratio,
+                     "oversub_ratio": host_state.max_over_subscription_ratio,
+                     "host": host_state.host})
+                return False
+            else:
+                free_virtual = free * host_state.max_over_subscription_ratio
+                return free_virtual >= volume_size
 
         return free >= volume_size
index 2fadc13afafaec15100d5b6fd3b5138b5a23c52a..b9c59724ae2b46dd51e65efd2bea4e40abdb55ee 100644 (file)
@@ -51,6 +51,7 @@ host_manager_opts = [
 CONF = cfg.CONF
 CONF.register_opts(host_manager_opts)
 CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager')
+CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver')
 
 LOG = logging.getLogger(__name__)
 
@@ -114,6 +115,9 @@ class HostState(object):
         # all volumes on a backend, which could be greater than or
         # equal to the allocated_capacity_gb.
         self.provisioned_capacity_gb = 0
+        self.max_over_subscription_ratio = 1.0
+        self.thin_provisioning_support = False
+        self.thick_provisioning_support = False
 
         # PoolState for all pools
         self.pools = {}
@@ -320,6 +324,13 @@ class PoolState(HostState):
             # provisioned_capacity_gb if it is not set.
             self.provisioned_capacity_gb = capability.get(
                 'provisioned_capacity_gb', self.allocated_capacity_gb)
+            self.max_over_subscription_ratio = capability.get(
+                'max_over_subscription_ratio',
+                CONF.max_over_subscription_ratio)
+            self.thin_provisioning_support = capability.get(
+                'thin_provisioning_support', False)
+            self.thick_provisioning_support = capability.get(
+                'thick_provisioning_support', False)
 
     def update_pools(self, capability):
         # Do nothing, since we don't have pools within pool, yet
index fe0074f3d0152b03399f2f310b438d6f6e2dfd3b..d5068e97d4c2d529af02ee5b8bd9a0b853b79616 100644 (file)
@@ -1,5 +1,7 @@
 # Copyright (c) 2013 eBay Inc.
 # Copyright (c) 2012 OpenStack Foundation
+# Copyright (c) 2015 EMC Corporation
+#
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 Weighers that weigh hosts by their capacity, including following two
 weighers:
 
-1. Capacity Weigher.  Weigh hosts by their available capacity.
+1. Capacity Weigher.  Weigh hosts by their virtual or actual free capacity.
+
+For thin provisioning, weigh hosts by their virtual free capacity calculated
+by the total capacity multiplied by the max over subscription ratio and
+subtracting the provisioned capacity; Otherwise, weigh hosts by their actual
+free capacity, taking into account the reserved space.
 
 The default is to spread volumes across all hosts evenly.  If you prefer
 stacking, you can set the 'capacity_weight_multiplier' option to a negative
@@ -64,7 +71,9 @@ class CapacityWeigher(weights.BaseHostWeigher):
         """Higher weights win.  We want spreading to be the default."""
         reserved = float(host_state.reserved_percentage) / 100
         free_space = host_state.free_capacity_gb
-        if free_space == 'infinite' or free_space == 'unknown':
+        total_space = host_state.total_capacity_gb
+        if (free_space == 'infinite' or free_space == 'unknown' or
+                total_space == 'infinite' or total_space == 'unknown'):
             #(zhiteng) 'infinite' and 'unknown' are treated the same
             # here, for sorting purpose.
 
@@ -73,7 +82,16 @@ class CapacityWeigher(weights.BaseHostWeigher):
             # capacity anymore.
             free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf')
         else:
-            free = math.floor(host_state.free_capacity_gb * (1 - reserved))
+            total = float(total_space)
+            if host_state.thin_provisioning_support:
+                # Calculate virtual free capacity for thin provisioning.
+                free = (total * host_state.max_over_subscription_ratio
+                        - host_state.provisioned_capacity_gb -
+                        math.floor(total * reserved))
+            else:
+                # Calculate how much free space is left after taking into
+                # account the reserved space.
+                free = free_space - math.floor(total * reserved)
         return free
 
 
index 8bda883c3465b9dec783c73da8c95a161a41aa4a..8d38e2f6a24ee21ee1b0ae62aacfd02bd4fdbcb9 100644 (file)
@@ -37,24 +37,40 @@ class FakeHostManager(host_manager.HostManager):
             'host1': {'total_capacity_gb': 1024,
                       'free_capacity_gb': 1024,
                       'allocated_capacity_gb': 0,
+                      'provisioned_capacity_gb': 0,
+                      'max_over_subscription_ratio': 1.0,
+                      'thin_provisioning_support': False,
+                      'thick_provisioning_support': True,
                       'reserved_percentage': 10,
                       'volume_backend_name': 'lvm1',
                       'timestamp': None},
             'host2': {'total_capacity_gb': 2048,
                       'free_capacity_gb': 300,
                       'allocated_capacity_gb': 1748,
+                      'provisioned_capacity_gb': 1748,
+                      'max_over_subscription_ratio': 1.5,
+                      'thin_provisioning_support': True,
+                      'thick_provisioning_support': False,
                       'reserved_percentage': 10,
                       'volume_backend_name': 'lvm2',
                       'timestamp': None},
             'host3': {'total_capacity_gb': 512,
                       'free_capacity_gb': 256,
                       'allocated_capacity_gb': 256,
+                      'provisioned_capacity_gb': 256,
+                      'max_over_subscription_ratio': 2.0,
+                      'thin_provisioning_support': False,
+                      'thick_provisioning_support': True,
                       'reserved_percentage': 0,
                       'volume_backend_name': 'lvm3',
                       'timestamp': None},
             'host4': {'total_capacity_gb': 2048,
                       'free_capacity_gb': 200,
                       'allocated_capacity_gb': 1848,
+                      'provisioned_capacity_gb': 2047,
+                      'max_over_subscription_ratio': 1.0,
+                      'thin_provisioning_support': True,
+                      'thick_provisioning_support': False,
                       'reserved_percentage': 5,
                       'volume_backend_name': 'lvm4',
                       'timestamp': None,
@@ -62,6 +78,10 @@ class FakeHostManager(host_manager.HostManager):
             'host5': {'total_capacity_gb': 'infinite',
                       'free_capacity_gb': 'unknown',
                       'allocated_capacity_gb': 1548,
+                      'provisioned_capacity_gb': 1548,
+                      'max_over_subscription_ratio': 1.0,
+                      'thin_provisioning_support': True,
+                      'thick_provisioning_support': False,
                       'reserved_percentage': 5,
                       'timestamp': None},
         }
index 962220a26ae56826bd60ab5974e2e885946074ea..2b5d50fa27a2f2d45b7f5501de45038b5aec0b8a 100644 (file)
@@ -37,7 +37,7 @@ class CapacityWeigherTestCase(test.TestCase):
 
     def _get_weighed_host(self, hosts, weight_properties=None):
         if weight_properties is None:
-            weight_properties = {}
+            weight_properties = {'size': 1}
         return self.weight_handler.get_weighed_objects([CapacityWeigher],
                                                        hosts,
                                                        weight_properties)[0]
@@ -52,34 +52,54 @@ class CapacityWeigherTestCase(test.TestCase):
             ctxt, CONF.volume_topic, disabled=disabled)
         return host_states
 
+    # If thin_provisioning_support = False, use the following formula:
+    # free = free_space - math.floor(total * reserved)
+    # Otherwise, use the following formula:
+    # free = (total * host_state.max_over_subscription_ratio
+    #         - host_state.provisioned_capacity_gb
+    #         - math.floor(total * reserved))
     def test_default_of_spreading_first(self):
         hostinfo_list = self._get_all_hosts()
 
-        # host1: free_capacity_gb=1024, free=1024*(1-0.1)
-        # host2: free_capacity_gb=300, free=300*(1-0.1)
-        # host3: free_capacity_gb=512, free=256
-        # host4: free_capacity_gb=200, free=200*(1-0.05)
+        # host1: thin_provisioning_support = False
+        #        free_capacity_gb=1024,
+        #        free=1024-math.floor(1024*0.1)=922
+        # host2: thin_provisioning_support = True
+        #        free_capacity_gb=300,
+        #        free=2048*1.5-1748-math.floor(2048*0.1)=1120
+        # host3: thin_provisioning_support = False
+        #        free_capacity_gb=512, free=256-512*0=256
+        # host4: thin_provisioning_support = True
+        #        free_capacity_gb=200,
+        #        free=2048*1.0-2047-math.floor(2048*0.05)=-101
         # host5: free_capacity_gb=unknown free=-1
 
-        # so, host1 should win:
+        # so, host2 should win:
         weighed_host = self._get_weighed_host(hostinfo_list)
-        self.assertEqual(weighed_host.weight, 921.0)
+        self.assertEqual(weighed_host.weight, 1120.0)
         self.assertEqual(
-            utils.extract_host(weighed_host.obj.host), 'host1')
+            utils.extract_host(weighed_host.obj.host), 'host2')
 
     def test_capacity_weight_multiplier1(self):
         self.flags(capacity_weight_multiplier=-1.0)
         hostinfo_list = self._get_all_hosts()
 
-        # host1: free_capacity_gb=1024, free=-1024*(1-0.1)
-        # host2: free_capacity_gb=300, free=-300*(1-0.1)
-        # host3: free_capacity_gb=512, free=-256
-        # host4: free_capacity_gb=200, free=-200*(1-0.05)
+        # host1: thin_provisioning_support = False
+        #        free_capacity_gb=1024,
+        #        free=-(1024-math.floor(1024*0.1))=-922
+        # host2: thin_provisioning_support = True
+        #        free_capacity_gb=300,
+        #        free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
+        # host3: thin_provisioning_support = False
+        #        free_capacity_gb=512, free=-(256-512*0)=-256
+        # host4: thin_provisioning_support = True
+        #        free_capacity_gb=200,
+        #        free=-(2048*1.0-2047-math.floor(2048*0.05))=101
         # host5: free_capacity_gb=unknown free=-float('inf')
 
         # so, host4 should win:
         weighed_host = self._get_weighed_host(hostinfo_list)
-        self.assertEqual(weighed_host.weight, -190.0)
+        self.assertEqual(weighed_host.weight, 101.0)
         self.assertEqual(
             utils.extract_host(weighed_host.obj.host), 'host4')
 
@@ -87,14 +107,21 @@ class CapacityWeigherTestCase(test.TestCase):
         self.flags(capacity_weight_multiplier=2.0)
         hostinfo_list = self._get_all_hosts()
 
-        # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2
-        # host2: free_capacity_gb=300, free=300*(1-0.1)*2
-        # host3: free_capacity_gb=512, free=256*2
-        # host4: free_capacity_gb=200, free=200*(1-0.05)*2
+        # host1: thin_provisioning_support = False
+        #        free_capacity_gb=1024,
+        #        free=(1024-math.floor(1024*0.1))*2=1844
+        # host2: thin_provisioning_support = True
+        #        free_capacity_gb=300,
+        #        free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
+        # host3: thin_provisioning_support = False
+        #        free_capacity_gb=512, free=(256-512*0)*2=512
+        # host4: thin_provisioning_support = True
+        #        free_capacity_gb=200,
+        #        free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
         # host5: free_capacity_gb=unknown free=-2
 
-        # so, host1 should win:
+        # so, host2 should win:
         weighed_host = self._get_weighed_host(hostinfo_list)
-        self.assertEqual(weighed_host.weight, 921.0 * 2)
+        self.assertEqual(weighed_host.weight, 1120.0 * 2)
         self.assertEqual(
-            utils.extract_host(weighed_host.obj.host), 'host1')
+            utils.extract_host(weighed_host.obj.host), 'host2')
index bb4d96692c11406c701140128031a0503141f037..b5b82c1900fd818882043852fbc41371a70ce472 100644 (file)
@@ -59,7 +59,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
         filter_properties = {'size': 100}
         service = {'disabled': False}
         host = fakes.FakeHostState('host1',
-                                   {'free_capacity_gb': 200,
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 200,
                                     'updated_at': None,
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -71,7 +72,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
         filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
         service = {'disabled': False}
         host = fakes.FakeHostState('host1',
-                                   {'free_capacity_gb': 10,
+                                   {'total_capacity_gb': 100,
+                                    'free_capacity_gb': 10,
                                     'updated_at': None,
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -83,7 +85,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
         filter_properties = {'size': 100}
         service = {'disabled': False}
         host = fakes.FakeHostState('host1',
-                                   {'free_capacity_gb': 120,
+                                   {'total_capacity_gb': 200,
+                                    'free_capacity_gb': 120,
                                     'reserved_percentage': 20,
                                     'updated_at': None,
                                     'service': service})
@@ -113,6 +116,230 @@ class CapacityFilterTestCase(HostFiltersTestCase):
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_thin_true_passes(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> False'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 200,
+                                    'provisioned_capacity_gb': 500,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': False,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_thin_false_passes(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> False',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        # If "thin_provisioning_support" is False,
+        # "max_over_subscription_ratio" will be ignored.
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 200,
+                                    'provisioned_capacity_gb': 300,
+                                    'max_over_subscription_ratio': 1.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': False,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_over_subscription_fails(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> False'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 200,
+                                    'provisioned_capacity_gb': 500,
+                                    'max_over_subscription_ratio': 1.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': False,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_over_subscription_fails2(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 30,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> False'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 30,
+                                    'provisioned_capacity_gb': 500,
+                                    'max_over_subscription_ratio': 1.0,
+                                    'reserved_percentage': 0,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': False,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> False'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 100,
+                                    'provisioned_capacity_gb': 500,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': False,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> False',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        # If "thin_provisioning_support" is False,
+        # "max_over_subscription_ratio" will be ignored.
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 100,
+                                    'provisioned_capacity_gb': 400,
+                                    'max_over_subscription_ratio': 1.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': False,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 100,
+                                    'provisioned_capacity_gb': 400,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 125,
+                                    'provisioned_capacity_gb': 400,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 99,
+                                    'provisioned_capacity_gb': 400,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 5,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['CapacityFilter']()
+        filter_properties = {'size': 100,
+                             'capabilities:thin_provisioning_support':
+                                 '<is> True',
+                             'capabilities:thick_provisioning_support':
+                                 '<is> True'}
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host1',
+                                   {'total_capacity_gb': 500,
+                                    'free_capacity_gb': 100,
+                                    'provisioned_capacity_gb': 400,
+                                    'max_over_subscription_ratio': 2.0,
+                                    'reserved_percentage': 0,
+                                    'thin_provisioning_support': True,
+                                    'thick_provisioning_support': True,
+                                    'updated_at': None,
+                                    'service': service})
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
 
 class AffinityFilterTestCase(HostFiltersTestCase):
     @mock.patch('cinder.utils.service_is_up')
index 2d7c5585ea282a4aed500e85e1b1aac0230dacd8..1103c27234bd3045b4f7e87d95f24ea70a3c4631 100644 (file)
@@ -129,6 +129,17 @@ volume_opts = [
                 default=False,
                 help='Tell driver to use SSL for connection to backend '
                      'storage if the driver supports it.'),
+    cfg.FloatOpt('max_over_subscription_ratio',
+                 default=2.0,
+                 help='Float representation of the over subscription ratio '
+                      'when thin provisioning is involved. Default ratio is '
+                      '2.0, meaning provisioned capacity can be twice of the '
+                      'total physical capacity. If the ratio is 10.5, it '
+                      'means provisioned capacity can be 10.5 times of the '
+                      'total physical capacity. A ratio of 1.0 means '
+                      'provisioned capacity cannot exceed the total physical '
+                      'capacity. A ratio lower than 1.0 will be ignored and '
+                      'the default value will be used instead.'),
 ]
 
 # for backward compatibility
@@ -1111,6 +1122,10 @@ class ISCSIDriver(VolumeDriver):
         data["storage_protocol"] = 'iSCSI'
         data["pools"] = []
 
+        # provisioned_capacity_gb is set to None by default below, but
+        # None won't be used in calculation. It will be overridden by
+        # driver's provisioned_capacity_gb if reported, otherwise it
+        # defaults to allocated_capacity_gb in host_manager.py.
         if self.pools:
             for pool in self.pools:
                 new_pool = {}
@@ -1118,7 +1133,7 @@ class ISCSIDriver(VolumeDriver):
                     pool_name=pool,
                     total_capacity_gb=0,
                     free_capacity_gb=0,
-                    provisioned_capacity_gb=0,
+                    provisioned_capacity_gb=None,
                     reserved_percentage=100,
                     QoS_support=False
                 ))
@@ -1130,7 +1145,7 @@ class ISCSIDriver(VolumeDriver):
                 pool_name=data["volume_backend_name"],
                 total_capacity_gb=0,
                 free_capacity_gb=0,
-                provisioned_capacity_gb=0,
+                provisioned_capacity_gb=None,
                 reserved_percentage=100,
                 QoS_support=False
             ))
index 4954e34b15ea386f91bb955bbef7a614a1187270..20f908c8f931362cde50a93d07014e994c1bf880 100644 (file)
@@ -184,6 +184,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
 
         total_capacity = 0
         free_capacity = 0
+        thin_enabled = False
+
         if self.configuration.lvm_mirrors > 0:
             total_capacity =\
                 self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
@@ -195,6 +197,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
             total_capacity = self.vg.vg_thin_pool_size
             free_capacity = self.vg.vg_thin_pool_free_space
             provisioned_capacity = self.vg.vg_provisioned_capacity
+            thin_enabled = True
         else:
             total_capacity = self.vg.vg_size
             free_capacity = self.vg.vg_free_space
@@ -216,10 +219,14 @@ class LVMVolumeDriver(driver.VolumeDriver):
             pool_name=data["volume_backend_name"],
             total_capacity_gb=total_capacity,
             free_capacity_gb=free_capacity,
-            provisioned_capacity_gb=provisioned_capacity,
             reserved_percentage=self.configuration.reserved_percentage,
             location_info=location_info,
             QoS_support=False,
+            provisioned_capacity_gb=provisioned_capacity,
+            max_over_subscription_ratio=(
+                self.configuration.max_over_subscription_ratio),
+            thin_provisioning_support=thin_enabled,
+            thick_provisioning_support=not thin_enabled,
         ))
         data["pools"].append(single_pool)