--- /dev/null
+# -*- coding: utf-8 -*-
+# Copyright 2014, Adrien Vergé <adrien.verge@numergy.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from cinder.compute import nova
+from cinder import exception
+from cinder.i18n import _, _LW
+from cinder.openstack.common import log as logging
+from cinder.openstack.common.scheduler import filters
+from cinder.openstack.common import uuidutils
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
+
+HINT_KEYWORD = 'local_to_instance'
+INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host'
+REQUESTS_TIMEOUT = 5
+
+
+class InstanceLocalityFilter(filters.BaseHostFilter):
+ """Schedule volume on the same host as a given instance.
+
+ This filter enables selection of a storage back-end located on the host
+ where the instance's hypervisor is running. This provides data locality:
+ the instance and the volume are located on the same physical machine.
+
+ In order to work:
+ - The Extended Server Attributes extension needs to be active in Nova (this
+ is by default), so that the 'OS-EXT-SRV-ATTR:host' property is returned
+ when requesting instance info.
+ - Either an account with privileged rights for Nova must be configured in
+ Cinder configuration (see 'os_privileged_user_name'), or the user making
+ the call needs to have sufficient rights (see
+ 'extended_server_attributes' in Nova policy).
+ """
+
+ def __init__(self):
+ # Cache Nova API answers directly into the Filter object.
+ # Since a BaseHostFilter instance lives only during the volume's
+ # scheduling, the cache is re-created for every new volume creation.
+ self._cache = {}
+ super(InstanceLocalityFilter, self).__init__()
+
+ def _nova_has_extended_server_attributes(self, context):
+ """Check Extended Server Attributes presence
+
+ Find out whether the Extended Server Attributes extension is activated
+ in Nova or not. Cache the result to query Nova only once.
+ """
+
+ if not hasattr(self, '_nova_ext_srv_attr'):
+ self._nova_ext_srv_attr = nova.API().has_extension(
+ context, 'ExtendedServerAttributes', timeout=REQUESTS_TIMEOUT)
+
+ return self._nova_ext_srv_attr
+
+ def host_passes(self, host_state, filter_properties):
+ context = filter_properties['context']
+ host = volume_utils.extract_host(host_state.host, 'host')
+
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)
+
+ # Without 'local_to_instance' hint
+ if not instance_uuid:
+ return True
+
+ if not uuidutils.is_uuid_like(instance_uuid):
+ raise exception.InvalidUUID(uuid=instance_uuid)
+
+ # TODO(adrienverge): Currently it is not recommended to allow instance
+ # migrations for hypervisors where this hint will be used. In case of
+ # instance migration, a previously locally-created volume will not be
+ # automatically migrated. Also in case of instance migration during the
+ # volume's scheduling, the result is unpredictable. A future
+ # enhancement would be to subscribe to Nova migration events (e.g. via
+ # Ceilometer).
+
+ # First, lookup for already-known information in local cache
+ if instance_uuid in self._cache:
+ return self._cache[instance_uuid] == host
+
+ if not self._nova_has_extended_server_attributes(context):
+ LOG.warning(_LW('Hint "%s" dropped because '
+ 'ExtendedServerAttributes not active in Nova.'),
+ HINT_KEYWORD)
+ raise exception.CinderException(_('Hint "%s" not supported.') %
+ HINT_KEYWORD)
+
+ server = nova.API().get_server(context, instance_uuid,
+ privileged_user=True,
+ timeout=REQUESTS_TIMEOUT)
+
+ if not hasattr(server, INSTANCE_HOST_PROP):
+ LOG.warning(_LW('Hint "%s" dropped because Nova did not return '
+ 'enough information. Either Nova policy needs to '
+ 'be changed or a privileged account for Nova '
+ 'should be specified in conf.'), HINT_KEYWORD)
+ raise exception.CinderException(_('Hint "%s" not supported.') %
+ HINT_KEYWORD)
+
+ self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)
+
+ # Match if given instance is hosted on host
+ return self._cache[instance_uuid] == host
import mock
from oslo.serialization import jsonutils
+from requests import exceptions as request_exceptions
+from cinder.compute import nova
from cinder import context
from cinder import db
+from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
- self.json_query = jsonutils.dumps(
- ['and',
- ['>=', '$free_capacity_gb', 1024],
- ['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
for cls in classes:
self.class_map[cls.__name__] = cls
+
+class CapacityFilterTestCase(HostFiltersTestCase):
+ def setUp(self):
+ super(CapacityFilterTestCase, self).setUp()
+ self.json_query = jsonutils.dumps(
+ ['and',
+ ['>=', '$free_capacity_gb', 1024],
+ ['>=', '$total_capacity_gb', 10 * 1024]])
+
@mock.patch('cinder.utils.service_is_up')
- def test_capacity_filter_passes(self, _mock_serv_is_up):
+ def test_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
- def test_capacity_filter_current_host_passes(self, _mock_serv_is_up):
+ def test_filter_current_host_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
- def test_capacity_filter_fails(self, _mock_serv_is_up):
+ def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
- def test_capacity_filter_passes_infinite(self, _mock_serv_is_up):
+ def test_filter_passes_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
- def test_capacity_filter_passes_unknown(self, _mock_serv_is_up):
+ def test_filter_passes_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+class AffinityFilterTestCase(HostFiltersTestCase):
@mock.patch('cinder.utils.service_is_up')
- def test_affinity_different_filter_passes(self, _mock_serv_is_up):
+ def test_different_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
- def test_affinity_different_filter_legacy_volume_hint_passes(
+ def test_different_filter_legacy_volume_hint_passes(
self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_non_list_fails(self):
+ def test_different_filter_non_list_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host2', {})
volume = utils.create_volume(self.context, host='host2')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_fails(self):
+ def test_different_filter_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_handles_none(self):
+ def test_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_handles_deleted_instance(self):
+ def test_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_fail_nonuuid_hint(self):
+ def test_different_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_handles_multiple_uuids(self):
+ def test_different_filter_handles_multiple_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume1 = utils.create_volume(self.context, host='host1:pool1')
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_handles_invalid_uuids(self):
+ def test_different_filter_handles_invalid_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_no_list_passes(self):
+ def test_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_passes(self):
+ def test_same_filter_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool0')
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_legacy_vol_fails(self):
+ def test_same_filter_legacy_vol_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_fails(self):
+ def test_same_filter_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_vol_list_pass(self):
+ def test_same_filter_vol_list_pass(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume1 = utils.create_volume(self.context, host='host1')
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_handles_none(self):
+ def test_same_filter_handles_none(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_handles_deleted_instance(self):
+ def test_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_affinity_same_filter_fail_nonuuid_hint(self):
+ def test_same_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_driver_filter_passing_function(self):
+
+class DriverFilterTestCase(HostFiltersTestCase):
+ def test_passing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_failing_function(self):
+ def test_failing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_no_filter_function(self):
+ def test_no_filter_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_not_implemented(self):
+ def test_not_implemented(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_no_volume_extra_specs(self):
+ def test_no_volume_extra_specs(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_volume_backend_name_different(self):
+ def test_volume_backend_name_different(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_extra_spec_replacement(self):
+ def test_function_extra_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_stats_replacement(self):
+ def test_function_stats_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_volume_replacement(self):
+ def test_function_volume_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_qos_spec_replacement(self):
+ def test_function_qos_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_exception_caught(self):
+ def test_function_exception_caught(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_function_empty_qos(self):
+ def test_function_empty_qos(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
- def test_driver_filter_capabilities(self):
+ def test_capabilities(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
}
}
- self.assertTrue(filt_cls.host_passes(host1, filter_properties))
\ No newline at end of file
+ self.assertTrue(filt_cls.host_passes(host1, filter_properties))
+
+
+class InstanceLocalityFilterTestCase(HostFiltersTestCase):
+ def setUp(self):
+ super(InstanceLocalityFilterTestCase, self).setUp()
+ self.override_config('nova_endpoint_template',
+ 'http://novahost:8774/v2/%(project_id)s')
+ self.context.service_catalog = \
+ [{'type': 'compute', 'name': 'nova', 'endpoints':
+ [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]},
+ {'type': 'identity', 'name': 'keystone', 'endpoints':
+ [{'publicURL': 'http://keystonehost:5000/v2.0'}]}]
+
+ @mock.patch('cinder.compute.nova.novaclient')
+ def test_same_host(self, _mock_novaclient):
+ _mock_novaclient.return_value = fakes.FakeNovaClient()
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+ uuid = nova.novaclient().servers.create('host1')
+
+ filter_properties = {'context': self.context,
+ 'scheduler_hints': {'local_to_instance': uuid}}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.compute.nova.novaclient')
+ def test_different_host(self, _mock_novaclient):
+ _mock_novaclient.return_value = fakes.FakeNovaClient()
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+ uuid = nova.novaclient().servers.create('host2')
+
+ filter_properties = {'context': self.context,
+ 'scheduler_hints': {'local_to_instance': uuid}}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_handles_none(self):
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+
+ filter_properties = {'context': self.context,
+ 'scheduler_hints': None}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_invalid_uuid(self):
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+
+ filter_properties = {'context': self.context,
+ 'scheduler_hints':
+ {'local_to_instance': 'e29b11d4-not-valid-a716'}}
+ self.assertRaises(exception.InvalidUUID,
+ filt_cls.host_passes, host, filter_properties)
+
+ @mock.patch('cinder.compute.nova.novaclient')
+ def test_nova_no_extended_server_attributes(self, _mock_novaclient):
+ _mock_novaclient.return_value = fakes.FakeNovaClient(
+ ext_srv_attr=False)
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+ uuid = nova.novaclient().servers.create('host1')
+
+ filter_properties = {'context': self.context,
+ 'scheduler_hints': {'local_to_instance': uuid}}
+ self.assertRaises(exception.CinderException,
+ filt_cls.host_passes, host, filter_properties)
+
+ @mock.patch('cinder.compute.nova.novaclient')
+ def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient):
+ # Simulate Nova API is not available
+ _mock_novaclient.side_effect = Exception
+
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+
+ filter_properties = {'context': self.context, 'size': 100}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('requests.request')
+ def test_nova_timeout(self, _mock_request):
+ # Simulate a HTTP timeout
+ _mock_request.side_effect = request_exceptions.Timeout
+
+ filt_cls = self.class_map['InstanceLocalityFilter']()
+ host = fakes.FakeHostState('host1', {})
+
+ filter_properties = \
+ {'context': self.context, 'scheduler_hints':
+ {'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}}
+ self.assertRaises(exception.APITimeout,
+ filt_cls.host_passes, host, filter_properties)