]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Drop Chance/SimpleScheduler Implementation
authorZhiteng Huang <zhithuang@ebaysf.com>
Thu, 2 Jan 2014 07:44:30 +0000 (15:44 +0800)
committerZhiteng Huang <zhithuang@ebaysf.com>
Thu, 9 Jan 2014 10:29:27 +0000 (18:29 +0800)
This patch removes the implementation of ChanceScheduler and SimpleScheduler
as previous changes have made sure they are internally replaced by
FilterScheduler.

The "max_gigabytes" config option is deprecated and will leave it like that
for one more release before we can remove it.

DocImpact: "ChanceScheduler and SimpleScheduler have been deprecated and
their implementation have been removed from Cinder."

Implement bp: deprecate-chance-and-simple-schedulers

Change-Id: Ifb1cb25e3bb4cdf26fa3283336b83fce5c97141e

cinder/scheduler/chance.py [deleted file]
cinder/scheduler/driver.py
cinder/scheduler/simple.py
cinder/tests/scheduler/test_scheduler.py
doc/source/devref/scheduler.rst
etc/cinder/cinder.conf.sample

diff --git a/cinder/scheduler/chance.py b/cinder/scheduler/chance.py
deleted file mode 100644 (file)
index 06b101a..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2010 OpenStack Foundation
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Chance (Random) Scheduler implementation
-"""
-
-import random
-
-from oslo.config import cfg
-
-from cinder import exception
-from cinder.scheduler import driver
-
-
-CONF = cfg.CONF
-
-
-class ChanceScheduler(driver.Scheduler):
-    """Implements Scheduler as a random node selector."""
-
-    def _filter_hosts(self, request_spec, hosts, **kwargs):
-        """Filter a list of hosts based on request_spec."""
-
-        filter_properties = kwargs.get('filter_properties', {})
-        if not filter_properties:
-            filter_properties = {}
-        ignore_hosts = filter_properties.get('ignore_hosts', [])
-        hosts = [host for host in hosts if host not in ignore_hosts]
-        return hosts
-
-    def _get_weighted_candidates(self, context, topic, request_spec, **kwargs):
-        """Returns a list of the available hosts."""
-
-        elevated = context.elevated()
-        hosts = self.hosts_up(elevated, topic)
-        if not hosts:
-            msg = _("Is the appropriate service running?")
-            raise exception.NoValidHost(reason=msg)
-
-        return self._filter_hosts(request_spec, hosts, **kwargs)
-
-    def _choose_host_from_list(self, hosts):
-        return hosts[int(random.random() * len(hosts))]
-
-    def _schedule(self, context, topic, request_spec, **kwargs):
-        """Picks a host that is up at random."""
-        hosts = self._get_weighted_candidates(context, topic,
-                                              request_spec, **kwargs)
-        if not hosts:
-            msg = _("Could not find another host")
-            raise exception.NoValidHost(reason=msg)
-        return self._choose_host_from_list(hosts)
-
-    def schedule_create_volume(self, context, request_spec, filter_properties):
-        """Picks a host that is up at random."""
-        topic = CONF.volume_topic
-        host = self._schedule(context, topic, request_spec,
-                              filter_properties=filter_properties)
-        volume_id = request_spec['volume_id']
-        snapshot_id = request_spec['snapshot_id']
-        image_id = request_spec['image_id']
-
-        updated_volume = driver.volume_update_db(context, volume_id, host)
-        self.volume_rpcapi.create_volume(context, updated_volume, host,
-                                         request_spec, filter_properties,
-                                         snapshot_id=snapshot_id,
-                                         image_id=image_id)
-
-    def host_passes_filters(self, context, host, request_spec,
-                            filter_properties):
-        """Check if the specified host passes the filters."""
-        weighed_hosts = self._get_weighted_candidates(
-            context,
-            CONF.volume_topic,
-            request_spec,
-            filter_properties=filter_properties)
-
-        for weighed_host in weighed_hosts:
-            if weighed_host == host:
-                elevated = context.elevated()
-                host_states = self.host_manager.get_all_host_states(elevated)
-                for host_state in host_states:
-                    if host_state.host == host:
-                        return host_state
-
-        msg = (_('cannot place volume %(id)s on %(host)s')
-               % {'id': request_spec['volume_id'], 'host': host})
-        raise exception.NoValidHost(reason=msg)
-
-    def find_retype_host(self, context, request_spec, filter_properties,
-                         migration_policy='never'):
-        """Find a host that can accept the volume with its new type."""
-        current_host = request_spec['volume_properties']['host']
-
-        # The volume already exists on this host, and so we shouldn't check if
-        # it can accept the volume again.
-        filter_properties['vol_exists_on'] = current_host
-
-        weighed_hosts = self._get_weighted_candidates(
-            context,
-            CONF.volume_topic,
-            request_spec,
-            filter_properties=filter_properties)
-        if not weighed_hosts:
-            msg = (_('No valid hosts for volume %(id)s with type %(type)s')
-                   % {'id': request_spec['volume_id'],
-                      'type': request_spec['volume_type']})
-            raise exception.NoValidHost(reason=msg)
-
-        target_host = None
-        for weighed_host in weighed_hosts:
-            if weighed_host == current_host:
-                target_host = current_host
-
-        if migration_policy == 'never' and target_host is None:
-            msg = (_('Current host not valid for volume %(id)s with type '
-                     '%(type)s, migration not allowed')
-                   % {'id': request_spec['volume_id'],
-                      'type': request_spec['volume_type']})
-            raise exception.NoValidHost(reason=msg)
-
-        if not target_host:
-            target_host = self._choose_host_from_list(weighed_hosts)
-
-        elevated = context.elevated()
-        host_states = self.host_manager.get_all_host_states(elevated)
-        for host_state in host_states:
-            if host_state.host == target_host:
-                return (host_state, migration_policy)
-
-        # NOTE(avishay):We should never get here, but raise just in case
-        msg = (_('No host_state for selected host %s') % target_host)
-        raise exception.NoValidHost(reason=msg)
index 9eaf746fefd37919b73f8ab37ed004a78efcba49..b64f776467f065407c190a531c934164545bea59 100644 (file)
@@ -24,7 +24,6 @@ from oslo.config import cfg
 from cinder import db
 from cinder.openstack.common import importutils
 from cinder.openstack.common import timeutils
-from cinder import utils
 from cinder.volume import rpcapi as volume_rpcapi
 
 
@@ -65,14 +64,6 @@ class Scheduler(object):
                                                       host,
                                                       capabilities)
 
-    def hosts_up(self, context, topic):
-        """Return the list of hosts that have a running service for topic."""
-
-        services = db.service_get_all_by_topic(context, topic)
-        return [service['host']
-                for service in services
-                if utils.service_is_up(service)]
-
     def host_passes_filters(self, context, volume_id, host, filter_properties):
         """Check if the specified host passes the filters."""
         raise NotImplementedError(_("Must implement host_passes_filters"))
index 7ed512123f78ff940e6ecdd110f067206fa1722b..2dd59015ed0fa71e052a592b9335ad14aed34ec6 100644 (file)
 #    under the License.
 
 """
-Simple Scheduler
-"""
-
-from oslo.config import cfg
+Chance and Simple Scheduler are DEPRECATED.
 
-from cinder import db
-from cinder import exception
-from cinder.scheduler import chance
-from cinder import utils
+Chance and Simple scheduler implementation have been deprecated, as their
+functionality can be implemented using the FilterScheduler, here's how:
 
+If one would like to have scheduler randomly picks available back-end
+(like ChanceScheduler did), use FilterScheduler with following combination
+of filters and weighers.
 
-simple_scheduler_opts = [
-    cfg.IntOpt("max_gigabytes",
-               default=10000,
-               help="maximum number of volume gigabytes to allow per host"), ]
-
-CONF = cfg.CONF
-CONF.register_opts(simple_scheduler_opts)
+  scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler
+  scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter',
+                               'CapabilitiesFilter']
+  scheduler_default_weighers = 'ChanceWeigher'
 
+If one prefers the scheduler to pick up the back-end has most available
+space that scheudler can see (like SimpleScheduler did), use following
+combination of filters and weighers with FilterScheduler.
 
-class SimpleScheduler(chance.ChanceScheduler):
-    """Implements Naive Scheduler that tries to find least loaded host."""
+  scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler
+  scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter',
+                               'CapabilitiesFilter']
+  scheduler_default_weighers = 'AllocatedCapacityWeigher'
+  allocated_capacity_weight_multiplier = -1.0
 
-    def _get_weighted_candidates(self, context, topic, request_spec, **kwargs):
-        """Picks a host that is up and has the fewest volumes."""
-        elevated = context.elevated()
+Setting/leaving configure option
+'scheduler_driver=cinder.scheduler.chance.ChanceScheduler' or
+'scheduler_driver=cinder.scheduler.simple.SimpleScheduler' in cinder.conf
+works exactly the same as described above since scheduler manager has been
+updated to do the trick internally/transparently for users.
 
-        volume_id = request_spec.get('volume_id')
-        snapshot_id = request_spec.get('snapshot_id')
-        image_id = request_spec.get('image_id')
-        volume_properties = request_spec.get('volume_properties')
-        volume_size = volume_properties.get('size')
-        availability_zone = volume_properties.get('availability_zone')
-        filter_properties = kwargs.get('filter_properties', {})
-
-        zone, host = None, None
-        if availability_zone:
-            zone, _x, host = availability_zone.partition(':')
-        if host and context.is_admin:
-            service = db.service_get_by_args(elevated, host, topic)
-            if not utils.service_is_up(service):
-                raise exception.WillNotSchedule(host=host)
-            return [host]
+With that, FilterScheduler behaves mostly the same as Chance/SimpleScheduler,
+with additional benefits of supporting volume types, volume encryption, QoS.
+"""
 
-        candidates = []
-        results = db.service_get_all_volume_sorted(elevated)
-        if zone:
-            results = [(s, gigs) for (s, gigs) in results
-                       if s['availability_zone'] == zone]
-        for result in results:
-            (service, volume_gigabytes) = result
-            no_skip = service['host'] != filter_properties.get('vol_exists_on')
-            if no_skip and volume_gigabytes + volume_size > CONF.max_gigabytes:
-                continue
-            if utils.service_is_up(service) and not service['disabled']:
-                candidates.append(service['host'])
+from oslo.config import cfg
 
-        if candidates:
-            return candidates
-        else:
-            msg = _("No service with adequate space or no service running")
-            raise exception.NoValidHost(reason=msg)
+simple_scheduler_opts = [
+    cfg.IntOpt("max_gigabytes",
+               default=10000,
+               help="This configure option has been deprecated along with "
+                    "the SimpleScheduler.  New scheduler is able to gather "
+                    "capacity information for each host, thus setting the "
+                    "maximum number of volume gigabytes for host is no "
+                    "longer needed.  It's safe to remove this configure "
+                    "from cinder.conf."), ]
 
-    def _choose_host_from_list(self, hosts):
-        return hosts[0]
+CONF = cfg.CONF
+CONF.register_opts(simple_scheduler_opts)
index 33491e74bde1e5620d998afa85a57fe1925c0034..42c7180eeade4d0462fd5b38bd795a0691a5ae94 100644 (file)
@@ -212,23 +212,6 @@ class SchedulerTestCase(test.TestCase):
         _mock_update_cap.assert_called_once_with(service_name, host,
                                                  capabilities)
 
-    @mock.patch('cinder.db.service_get_all_by_topic')
-    @mock.patch('cinder.utils.service_is_up')
-    def test_hosts_up(self, _mock_serv_is_up, _mock_serv_get_all_by_topic):
-        service1 = {'host': 'host1', 'disabled': False}
-        service2 = {'host': 'host2', 'disabled': False}
-        services = [service1, service2]
-
-        def fake_serv_is_up(service):
-            return service['host'] is 'host2'
-
-        _mock_serv_get_all_by_topic.return_value = services
-        _mock_serv_is_up.side_effect = fake_serv_is_up
-        result = self.driver.hosts_up(self.context, self.topic)
-        self.assertEqual(result, ['host2'])
-        _mock_serv_get_all_by_topic.assert_called_once_with(self.context,
-                                                            self.topic)
-
 
 class SchedulerDriverBaseTestCase(SchedulerTestCase):
     """Test cases for base scheduler driver class methods
index 6e531fb1ae982065f20b51fa1f9274d48e6af1de..e2b7e5f16ec07f0b70023520ebe74a54f1e18198 100644 (file)
@@ -38,10 +38,10 @@ The :mod:`cinder.scheduler.driver` Module
     :show-inheritance:
 
 
-The :mod:`cinder.scheduler.simple` Driver
+The :mod:`cinder.scheduler.filter_scheduler` Driver
 -----------------------------------------
 
-.. automodule:: cinder.scheduler.simple
+.. automodule:: cinder.scheduler.filter_scheduler
     :noindex:
     :members:
     :undoc-members:
index 60813351148c33cf9c21a0e1c41ae89e8f745f67..6ed923a1ded3b53a7e3afc2d0eb288989e6f1f4a 100644 (file)
 # Options defined in cinder.scheduler.simple
 #
 
-# maximum number of volume gigabytes to allow per host
-# (integer value)
+# This configure option has been deprecated along with the
+# SimpleScheduler.  New scheduler is able to gather capacity
+# information for each host, thus setting the maximum number
+# of volume gigabytes for host is no longer needed.  It's safe
+# to remove this configure from cinder.conf. (integer value)
 #max_gigabytes=10000