]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add affinity/anti-affinity filters
authorZhiteng Huang <zhithuang@ebaysf.com>
Tue, 28 Jan 2014 06:23:04 +0000 (14:23 +0800)
committerZhiteng Huang <zhithuang@ebaysf.com>
Tue, 15 Jul 2014 09:56:39 +0000 (17:56 +0800)
Cinder has done a good job hiding the details of storage backends from end
users by using volume types.  However there are use cases where users who
build their application on top of volumes would like to be able to 'choose'
where a volume be created on.  How can Cinder provide such capability without
hurting the simplicity we have been keeping?  Affinity/anti-affinity is one
of the flexibility we can provide without exposing details to backends.

The term affinity/anti-affinity here is to to describe the relationship
between two sets of volumes in terms of location.  To limit the scope, we
describe one volume is affinity with the other one only when they reside in
the same volume back-end (this notion can be extended to volume pools if
volume pool support lands in Cinder); on the contrary, 'anti-affinity'
relation between two sets of volumes simply implies they are on different
Cinder back-ends (pools).

This affinity/anti-affinity filter filters Cinder backend based on hint
specified by end user.  The hint expresses the affinity or anti-affinity
relation between new volumes and existing volume(s).  This allows end
users to provide hints like 'please put this volume to a place that is
different from where Volume-XYZ resides in'.

This change adds two new filters to Cinder - SameBackendFilter and
DifferentBackendFilter.  These two filters will look at the scheduler hint
provided by end users (via scheduler hint extension) and filter backends by
checking the 'host' of old and new volumes see if a backend meets the
requirement (being on the same backend as existing volume or not being on
the same backend(s) as existing volume(s)).

For example:
  Volume A is on 'backend 1', to create Volume B on the same backend as A,
  use:
    cinder create --hint same_host=VolA-UUID SIZE

  To create Volume C on different backend than that of A, use:
    cinder create --hint different_host=VolA-UUID SIZE

  Now, to create Volume D on different backend other than those of A and C,
  use:
    cinder create --hint different_host=VolA-UUID --hint
    different_host=VolC-UUID SIZE
  or:
    cinder create --hint different_host="[VolA-UUID, VolC-UUID]" SIZE

implements bp: affinity-antiaffinity-filter

DocImpact

Change-Id: I19f298bd87b0069c0d1bb133202188d3bf65b770

cinder/scheduler/filters/affinity_filter.py [new file with mode: 0644]
cinder/tests/scheduler/test_host_filters.py
setup.cfg

diff --git a/cinder/scheduler/filters/affinity_filter.py b/cinder/scheduler/filters/affinity_filter.py
new file mode 100644 (file)
index 0000000..64f3c04
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright 2014, eBay Inc.
+# Copyright 2014, OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from cinder.openstack.common import log as logging
+from cinder.openstack.common.scheduler import filters
+from cinder.openstack.common import uuidutils
+from cinder.volume import api as volume
+
+LOG = logging.getLogger(__name__)
+
+
+class AffinityFilter(filters.BaseHostFilter):
+    def __init__(self):
+        self.volume_api = volume.API()
+
+
+class DifferentBackendFilter(AffinityFilter):
+    """Schedule volume on a different back-end from a set of volumes."""
+
+    def host_passes(self, host_state, filter_properties):
+        context = filter_properties['context']
+        scheduler_hints = filter_properties.get('scheduler_hints') or {}
+
+        affinity_uuids = scheduler_hints.get('different_host', [])
+
+        # scheduler hint verification: affinity_uuids can be a list of uuids
+        # or single uuid.  The checks here is to make sure every single string
+        # in the list looks like a uuid, otherwise, this filter will fail to
+        # pass.  Note that the filter does *NOT* ignore string doesn't look
+        # like a uuid, it is better to fail the request than serving it wrong.
+        if isinstance(affinity_uuids, list):
+            for uuid in affinity_uuids:
+                if uuidutils.is_uuid_like(uuid):
+                    continue
+                else:
+                    return False
+        elif uuidutils.is_uuid_like(affinity_uuids):
+            affinity_uuids = [affinity_uuids]
+        else:
+            # Not a list, not a string looks like uuid, don't pass it
+            # to DB for query to avoid potential risk.
+            return False
+
+        if affinity_uuids:
+            return not self.volume_api.get_all(
+                context, filters={'host': host_state.host,
+                                  'id': affinity_uuids,
+                                  'deleted': False})
+
+        # With no different_host key
+        return True
+
+
+class SameBackendFilter(AffinityFilter):
+    """Schedule volume on the same back-end as another volume."""
+
+    def host_passes(self, host_state, filter_properties):
+        context = filter_properties['context']
+        scheduler_hints = filter_properties.get('scheduler_hints') or {}
+
+        affinity_uuids = scheduler_hints.get('same_host', [])
+
+        # scheduler hint verification: affinity_uuids can be a list of uuids
+        # or single uuid.  The checks here is to make sure every single string
+        # in the list looks like a uuid, otherwise, this filter will fail to
+        # pass.  Note that the filter does *NOT* ignore string doesn't look
+        # like a uuid, it is better to fail the request than serving it wrong.
+        if isinstance(affinity_uuids, list):
+            for uuid in affinity_uuids:
+                if uuidutils.is_uuid_like(uuid):
+                    continue
+                else:
+                    return False
+        elif uuidutils.is_uuid_like(affinity_uuids):
+            affinity_uuids = [affinity_uuids]
+        else:
+            # Not a list, not a string looks like uuid, don't pass it
+            # to DB for query to avoid potential risk.
+            return False
+
+        if affinity_uuids:
+            return self.volume_api.get_all(
+                context, filters={'host': host_state.host,
+                                  'id': affinity_uuids,
+                                  'deleted': False})
+
+        # With no same_host key
+        return True
index 231943aae2567eb84cdccf69cb0d6a85b0191995..11c4685f40436b9932a78b55c8db5dec245932a2 100644 (file)
@@ -18,10 +18,12 @@ Tests For Scheduler Host Filters.
 import mock
 
 from cinder import context
+from cinder import db
 from cinder.openstack.common import jsonutils
 from cinder.openstack.common.scheduler import filters
 from cinder import test
 from cinder.tests.scheduler import fakes
+from cinder.tests import utils
 
 
 class HostFiltersTestCase(test.TestCase):
@@ -90,3 +92,171 @@ class HostFiltersTestCase(test.TestCase):
                                     'updated_at': None,
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    @mock.patch('cinder.utils.service_is_up')
+    def test_affinity_different_filter_passes(self, _mock_serv_is_up):
+        _mock_serv_is_up.return_value = True
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        service = {'disabled': False}
+        host = fakes.FakeHostState('host2',
+                                   {'free_capacity_gb': '1000',
+                                    'updated_at': None,
+                                    'service': service})
+        volume = utils.create_volume(self.context, host='host1')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': [vol_id], }}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_no_list_passes(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host2', {})
+        volume = utils.create_volume(self.context, host='host2')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': vol_id}}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_fails(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host1')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': [vol_id], }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_handles_none(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': None}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_handles_deleted_instance(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host1')
+        vol_id = volume.id
+        db.volume_destroy(utils.get_test_admin_context(), vol_id)
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': [vol_id], }}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_fail_nonuuid_hint(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': "NOT-a-valid-UUID", }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_handles_multiple_uuids(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume1 = utils.create_volume(self.context, host='host2')
+        vol_id1 = volume1.id
+        volume2 = utils.create_volume(self.context, host='host3')
+        vol_id2 = volume2.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': [vol_id1, vol_id2], }}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_different_filter_handles_invalid_uuids(self):
+        filt_cls = self.class_map['DifferentBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host2')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'different_host': [vol_id, "NOT-a-valid-UUID"], }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_no_list_passes(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host1')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'same_host': vol_id}}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_passes(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host1')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'same_host': [vol_id], }}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_fails(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host2')
+        vol_id = volume.id
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'same_host': [vol_id], }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_handles_none(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': None}
+
+        self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_handles_deleted_instance(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+        volume = utils.create_volume(self.context, host='host2')
+        vol_id = volume.id
+        db.volume_destroy(utils.get_test_admin_context(), vol_id)
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'same_host': [vol_id], }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+    def test_affinity_same_filter_fail_nonuuid_hint(self):
+        filt_cls = self.class_map['SameBackendFilter']()
+        host = fakes.FakeHostState('host1', {})
+
+        filter_properties = {'context': self.context.elevated(),
+                             'scheduler_hints': {
+            'same_host': "NOT-a-valid-UUID", }}
+
+        self.assertFalse(filt_cls.host_passes(host, filter_properties))
index 3d47126b1d8b9b3b330009bcb2813b89288a6f2d..82d201f196e922a3e27d6667a972ef7ec445fd9a 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -42,8 +42,10 @@ cinder.scheduler.filters =
     AvailabilityZoneFilter = cinder.openstack.common.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter
     CapabilitiesFilter = cinder.openstack.common.scheduler.filters.capabilities_filter:CapabilitiesFilter
     CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter
+    DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter
     JsonFilter = cinder.openstack.common.scheduler.filters.json_filter:JsonFilter
     RetryFilter = cinder.openstack.common.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter
+    SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter
 cinder.scheduler.weights =
     AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher
     CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher