]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Allow scheduler pool information to be retrieved
authorZhiteng Huang <zhithuang@ebaysf.com>
Mon, 8 Sep 2014 21:42:39 +0000 (14:42 -0700)
committerZhiteng Huang <zhithuang@ebaysf.com>
Sat, 20 Sep 2014 18:04:12 +0000 (11:04 -0700)
With pool support added to Cinder, now we are kind of in an awkward
situation where we require admin to input exact location for volumes
to-be managed (imported) or migrated, which must have pool info, but
there is no way to find out what pools are there for backends except
looking at the scheduler log.  That causes bad user experience, and
thus is a bug from UX POV.

This change simply adds a new admin-api extension to allow admin to
fetch all the pool information from scheduler cache (memory), which
closes the gap for end users.

This extension provides two level of pool information: names only or
detailed information:

Pool name only:
GET http://CINDER_API_ENDPOINT/v2/TENANT_ID/scheduler-stats/get_pools

Detailed Pool info:
GET http://CINDER_API_ENDPOINT/v2/TENANT_ID/scheduler-stats/get_pools
\?detail\=True

Closes-bug: #1364279

Change-Id: I445d4e472c83db2f2d8db414de139c87d09f8fda

12 files changed:
cinder/api/contrib/scheduler_stats.py [new file with mode: 0644]
cinder/api/views/scheduler_stats.py [new file with mode: 0644]
cinder/scheduler/driver.py
cinder/scheduler/filter_scheduler.py
cinder/scheduler/host_manager.py
cinder/scheduler/manager.py
cinder/scheduler/rpcapi.py
cinder/tests/api/contrib/test_scheduler_stats.py [new file with mode: 0644]
cinder/tests/policy.json
cinder/tests/scheduler/test_host_manager.py
cinder/tests/scheduler/test_rpcapi.py
etc/cinder/policy.json

diff --git a/cinder/api/contrib/scheduler_stats.py b/cinder/api/contrib/scheduler_stats.py
new file mode 100644 (file)
index 0000000..9270b02
--- /dev/null
@@ -0,0 +1,70 @@
+# Copyright (c) 2014 eBay Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""The Scheduler Stats extension"""
+
+from cinder.api import extensions
+from cinder.api.openstack import wsgi
+from cinder.api.views import scheduler_stats as scheduler_stats_view
+from cinder.openstack.common import log as logging
+from cinder.scheduler import rpcapi
+
+
+LOG = logging.getLogger(__name__)
+
+
+def authorize(context, action_name):
+    action = 'scheduler_stats:%s' % action_name
+    extensions.extension_authorizer('scheduler', action)(context)
+
+
+class SchedulerStatsController(wsgi.Controller):
+    """The Scheduler Stats controller for the OpenStack API."""
+
+    _view_builder_class = scheduler_stats_view.ViewBuilder
+
+    def __init__(self):
+        self.scheduler_api = rpcapi.SchedulerAPI()
+        super(SchedulerStatsController, self).__init__()
+
+    def get_pools(self, req):
+        """List all active pools in scheduler."""
+        context = req.environ['cinder.context']
+        authorize(context, 'get_pools')
+
+        #TODO(zhiteng) Add filters support
+        detail = req.params.get('detail', False)
+        pools = self.scheduler_api.get_pools(context, filters=None)
+
+        return self._view_builder.pools(req, pools, detail)
+
+
+class Scheduler_stats(extensions.ExtensionDescriptor):
+    """Scheduler stats support."""
+
+    name = "Scheduler_stats"
+    alias = "scheduler-stats"
+    namespace = "http://docs.openstack.org/volume/ext/scheduler-stats/api/v1"
+    updated = "2014-09-07T00:00:00+00:00"
+
+    def get_resources(self):
+        resources = []
+        res = extensions.ResourceExtension(
+            Scheduler_stats.alias,
+            SchedulerStatsController(),
+            collection_actions={"get_pools": "GET"})
+
+        resources.append(res)
+
+        return resources
diff --git a/cinder/api/views/scheduler_stats.py b/cinder/api/views/scheduler_stats.py
new file mode 100644 (file)
index 0000000..1926840
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (C) 2014 eBay Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from cinder.api import common
+
+
+class ViewBuilder(common.ViewBuilder):
+    """Model scheduler-stats API responses as a python dictionary."""
+
+    _collection_name = "scheduler-stats"
+
+    def __init__(self):
+        """Initialize view builder."""
+        super(ViewBuilder, self).__init__()
+
+    def summary(self, request, pool):
+        """Detailed view of a single pool."""
+        return {
+            'pool': {
+                'name': pool.get('name'),
+            }
+        }
+
+    def detail(self, request, pool):
+        """Detailed view of a single pool."""
+        return {
+            'pool': {
+                'name': pool.get('name'),
+                'capabilities': pool.get('capabilities'),
+            }
+        }
+
+    def pools(self, request, pools, detail):
+        """Detailed view of a list of pools seen by scheduler."""
+        if detail:
+            plist = [self.detail(request, pool)['pool'] for pool in pools]
+        else:
+            plist = [self.summary(request, pool)['pool'] for pool in pools]
+        pools_dict = dict(pools=plist)
+
+        return pools_dict
index d3b2a8dd2d5b7be8b8a11c6d585ff1ae954f721f..7749e3671fc6f7fb19af43f94e4ca1b7eb4541c6 100644 (file)
@@ -98,3 +98,8 @@ class Scheduler(object):
         """Must override schedule method for scheduler to work."""
         raise NotImplementedError(_(
             "Must implement schedule_create_consistencygroup"))
+
+    def get_pools(self, context, filters):
+        """Must override schedule method for scheduler to work."""
+        raise NotImplementedError(_(
+            "Must implement schedule_get_pools"))
index 28054c0a1e3acd58b1fd31e0f92f382f09d2f55e..521571adee64bba53143622c37187a2f80d7147b 100644 (file)
@@ -168,6 +168,10 @@ class FilterScheduler(driver.Scheduler):
         top_host = self._choose_top_host(weighed_hosts, request_spec)
         return top_host.obj
 
+    def get_pools(self, context, filters):
+        #TODO(zhiteng) Add filters support
+        return self.host_manager.get_pools(context)
+
     def _post_select_populate_filter_properties(self, filter_properties,
                                                 host_state):
         """Add additional information to the filter properties after a host has
index b0270f20cfea418548d9ba51fceff4e0a0de9b87..4729e235563b1cc09b78427c282cd629353c18a2 100644 (file)
@@ -486,3 +486,18 @@ class HostManager(object):
                 all_pools[pool_key] = pool
 
         return all_pools.itervalues()
+
+    def get_pools(self, context):
+        """Returns a dict of all pools on all hosts HostManager knows about."""
+
+        all_pools = []
+        for host, state in self.host_state_map.items():
+            for key in state.pools:
+                pool = state.pools[key]
+                # use host.pool_name to make sure key is unique
+                pool_key = vol_utils.append_host(host, pool.pool_name)
+                new_pool = dict(name=pool_key)
+                new_pool.update(dict(capabilities=pool.capabilities))
+                all_pools.append(new_pool)
+
+        return all_pools
index 9376713840acdb256136e356974c92d845467dbb..6db095b57e9b8be519ceb0b763642363f5912148 100644 (file)
@@ -53,7 +53,7 @@ LOG = logging.getLogger(__name__)
 class SchedulerManager(manager.Manager):
     """Chooses a host to create volumes."""
 
-    RPC_API_VERSION = '1.6'
+    RPC_API_VERSION = '1.7'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -240,6 +240,10 @@ class SchedulerManager(manager.Manager):
             volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
                                                       request_spec.get('ref'))
 
+    def get_pools(self, context, filters=None):
+        """Get active pools from scheduler's cache."""
+        return self.driver.get_pools(context, filters)
+
     def _set_volume_state_and_notify(self, method, updates, context, ex,
                                      request_spec, msg=None):
         # TODO(harlowja): move into a task that just does this later.
index b6e7b2b2ac96e44f6562cc0b82a5241b547f5d17..f6cab644b569c47e1aafb566aac4e3b3fac480d7 100644 (file)
@@ -39,6 +39,7 @@ class SchedulerAPI(object):
         1.4 - Add retype method
         1.5 - Add manage_existing method
         1.6 - Add create_consistencygroup method
+        1.7 - Add get_active_pools method
     '''
 
     RPC_API_VERSION = '1.0'
@@ -47,7 +48,7 @@ class SchedulerAPI(object):
         super(SchedulerAPI, self).__init__()
         target = messaging.Target(topic=CONF.scheduler_topic,
                                   version=self.RPC_API_VERSION)
-        self.client = rpc.get_client(target, version_cap='1.6')
+        self.client = rpc.get_client(target, version_cap='1.7')
 
     def create_consistencygroup(self, ctxt, topic, group_id,
                                 request_spec_list=None,
@@ -114,6 +115,11 @@ class SchedulerAPI(object):
                           request_spec=request_spec_p,
                           filter_properties=filter_properties)
 
+    def get_pools(self, ctxt, filters=None):
+        cctxt = self.client.prepare(version='1.7')
+        return cctxt.call(ctxt, 'get_pools',
+                          filters=filters)
+
     def update_service_capabilities(self, ctxt,
                                     service_name, host,
                                     capabilities):
diff --git a/cinder/tests/api/contrib/test_scheduler_stats.py b/cinder/tests/api/contrib/test_scheduler_stats.py
new file mode 100644 (file)
index 0000000..ca0518b
--- /dev/null
@@ -0,0 +1,110 @@
+# Copyright 2013 eBay Inc.
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from cinder.api.contrib import scheduler_stats
+from cinder import context
+from cinder import test
+from cinder.tests.api import fakes
+
+
+def schedule_rpcapi_get_pools(self, context, filters=None):
+    all_pools = []
+    pool1 = dict(name='pool1',
+                 capabilities=dict(
+                     total_capacity=1024, free_capacity=100,
+                     volume_backend_name='pool1', reserved_percentage=0,
+                     driver_version='1.0.0', storage_protocol='iSCSI',
+                     QoS_support='False', updated=None))
+    all_pools.append(pool1)
+    pool2 = dict(name='pool2',
+                 capabilities=dict(
+                     total_capacity=512, free_capacity=200,
+                     volume_backend_name='pool2', reserved_percentage=0,
+                     driver_version='1.0.1', storage_protocol='iSER',
+                     QoS_support='True', updated=None))
+    all_pools.append(pool2)
+
+    return all_pools
+
+
+@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools',
+            schedule_rpcapi_get_pools)
+class SchedulerStatsAPITest(test.TestCase):
+    def setUp(self):
+        super(SchedulerStatsAPITest, self).setUp()
+        self.flags(host='fake')
+        self.controller = scheduler_stats.SchedulerStatsController()
+        self.ctxt = context.RequestContext('admin', 'fake', True)
+
+    def test_get_pools_summery(self):
+        req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats')
+        req.environ['cinder.context'] = self.ctxt
+        res = self.controller.get_pools(req)
+
+        self.assertEqual(2, len(res['pools']))
+
+        expected = {
+            'pools': [
+                {
+                    'name': 'pool1',
+                },
+                {
+                    'name': 'pool2',
+                }
+            ]
+        }
+
+        self.assertDictMatch(res, expected)
+
+    def test_get_pools_detail(self):
+        req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats?detail=True')
+        req.environ['cinder.context'] = self.ctxt
+        res = self.controller.get_pools(req)
+
+        self.assertEqual(2, len(res['pools']))
+
+        expected = {
+            'pools': [
+                {
+                    'name': 'pool1',
+                    'capabilities': {
+                        'updated': None,
+                        'total_capacity': 1024,
+                        'free_capacity': 100,
+                        'volume_backend_name': 'pool1',
+                        'reserved_percentage': 0,
+                        'driver_version': '1.0.0',
+                        'storage_protocol': 'iSCSI',
+                        'QoS_support': 'False', }
+                },
+                {
+                    'name': 'pool2',
+                    'capabilities': {
+                        'updated': None,
+                        'total_capacity': 512,
+                        'free_capacity': 200,
+                        'volume_backend_name': 'pool2',
+                        'reserved_percentage': 0,
+                        'driver_version': '1.0.1',
+                        'storage_protocol': 'iSER',
+                        'QoS_support': 'True', }
+                }
+            ]
+        }
+
+        self.assertDictMatch(res, expected)
index a4d29661078c3086a75e279013e049aa7514e91f..10b7a518362098c5a2f469c50259ea239e36db91 100644 (file)
@@ -89,5 +89,7 @@
     "consistencygroup:create_cgsnapshot" : "",
     "consistencygroup:delete_cgsnapshot": "",
     "consistencygroup:get_cgsnapshot": "",
-    "consistencygroup:get_all_cgsnapshots": ""
+    "consistencygroup:get_all_cgsnapshots": "",
+
+    "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
 }
index 93373b1dbb78ee2fd53924caf49811322bc005b2..acb72726dc589d9f44e7332f66d2f00b6398c2f3 100644 (file)
@@ -188,6 +188,89 @@ class HostManagerTestCase(test.TestCase):
             self.assertEqual(host_state_map[host].service,
                              volume_node)
 
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    @mock.patch('cinder.utils.service_is_up')
+    def test_get_pools(self, _mock_service_is_up,
+                       _mock_service_get_all_by_topic):
+        context = 'fake_context'
+
+        services = [
+            dict(id=1, host='host1', topic='volume', disabled=False,
+                 availability_zone='zone1', updated_at=timeutils.utcnow()),
+            dict(id=2, host='host2@back1', topic='volume', disabled=False,
+                 availability_zone='zone1', updated_at=timeutils.utcnow()),
+            dict(id=3, host='host2@back2', topic='volume', disabled=False,
+                 availability_zone='zone2', updated_at=timeutils.utcnow()),
+        ]
+
+        mocked_service_states = {
+            'host1': dict(volume_backend_name='AAA',
+                          total_capacity_gb=512, free_capacity_gb=200,
+                          timestamp=None, reserved_percentage=0),
+            'host2@back1': dict(volume_backend_name='BBB',
+                                total_capacity_gb=256, free_capacity_gb=100,
+                                timestamp=None, reserved_percentage=0),
+            'host2@back2': dict(volume_backend_name='CCC',
+                                total_capacity_gb=10000, free_capacity_gb=700,
+                                timestamp=None, reserved_percentage=0),
+        }
+
+        _mock_service_get_all_by_topic.return_value = services
+        _mock_service_is_up.return_value = True
+        _mock_warning = mock.Mock()
+        host_manager.LOG.warn = _mock_warning
+
+        with mock.patch.dict(self.host_manager.service_states,
+                             mocked_service_states):
+            # call get_all_host_states to populate host_state_map
+            self.host_manager.get_all_host_states(context)
+
+            res = self.host_manager.get_pools(context)
+
+            # check if get_pools returns all 3 pools
+            self.assertEqual(3, len(res))
+
+            expected = [
+                {
+                    'name': 'host1#AAA',
+                    'capabilities': {
+                        'timestamp': None,
+                        'volume_backend_name': 'AAA',
+                        'free_capacity_gb': 200,
+                        'driver_version': None,
+                        'total_capacity_gb': 512,
+                        'reserved_percentage': 0,
+                        'vendor_name': None,
+                        'storage_protocol': None},
+                },
+                {
+                    'name': 'host2@back1#BBB',
+                    'capabilities': {
+                        'timestamp': None,
+                        'volume_backend_name': 'BBB',
+                        'free_capacity_gb': 100,
+                        'driver_version': None,
+                        'total_capacity_gb': 256,
+                        'reserved_percentage': 0,
+                        'vendor_name': None,
+                        'storage_protocol': None},
+                },
+                {
+                    'name': 'host2@back2#CCC',
+                    'capabilities': {
+                        'timestamp': None,
+                        'volume_backend_name': 'CCC',
+                        'free_capacity_gb': 700,
+                        'driver_version': None,
+                        'total_capacity_gb': 10000,
+                        'reserved_percentage': 0,
+                        'vendor_name': None,
+                        'storage_protocol': None},
+                }
+            ]
+            self.assertEqual(len(expected), len(res))
+            self.assertEqual(sorted(expected), sorted(res))
+
 
 class HostStateTestCase(test.TestCase):
     """Test case for HostState class."""
index b48c64751bbe20bf413d191336a17c7fe033f484..fe6287a2ce627b2edc835aeb8229cd0af303fc86 100644 (file)
@@ -123,3 +123,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
                                  request_spec='fake_request_spec',
                                  filter_properties='filter_properties',
                                  version='1.5')
+
+    def test_get_pools(self):
+        self._test_scheduler_api('get_pools',
+                                 rpc_method='call',
+                                 filters=None,
+                                 version='1.7')
index d6f7792147ec834d9af938587dedaddf87ae7f92..8f3a7b2f97a83d74d45c7cf749e8814d70ed8648 100644 (file)
@@ -74,5 +74,7 @@
     "consistencygroup:create_cgsnapshot" : "",
     "consistencygroup:delete_cgsnapshot": "",
     "consistencygroup:get_cgsnapshot": "",
-    "consistencygroup:get_all_cgsnapshots": ""
+    "consistencygroup:get_all_cgsnapshots": "",
+
+    "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
 }