]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
CG creation should be scheduled on backend level
authorwanghao <wanghao749@huawei.com>
Thu, 17 Sep 2015 02:23:54 +0000 (10:23 +0800)
committerwanghao <wanghao749@huawei.com>
Tue, 10 Nov 2015 01:41:58 +0000 (09:41 +0800)
Currently, cinder scheduler schedules the CG creation
on pool level, that means if there are multiple volume
types in one CG if each volume type is associated with a
different pool (those pools under same backend), you can't
create the CG successfully.

According to the design of CG's, it should be scheduled on
backend level to support the case mentioned above.

DocImpact
UpgradeImpact
Change-Id: I335dbdd1d2b1146543148ccce8d52162804369f4
Closes-Bug: #1496655

cinder/scheduler/filter_scheduler.py
cinder/tests/unit/scheduler/test_filter_scheduler.py
cinder/tests/unit/volume/flows/test_create_volume_flow.py
cinder/volume/flows/api/create_volume.py

index 66604f3c467c67af2cfcdfac22d80993a36638aa..b014d84ba70143b2be466dd93ad56d857428427c 100644 (file)
@@ -393,7 +393,10 @@ class FilterScheduler(driver.Scheduler):
                 new_weighed_hosts = []
                 for host1 in weighed_hosts:
                     for host2 in temp_weighed_hosts:
-                        if host1.obj.host == host2.obj.host:
+                        # Should schedule creation of CG on backend level,
+                        # not pool level.
+                        if (utils.extract_host(host1.obj.host) ==
+                                utils.extract_host(host2.obj.host)):
                             new_weighed_hosts.append(host1)
                 weighed_hosts = new_weighed_hosts
                 if not weighed_hosts:
@@ -406,6 +409,16 @@ class FilterScheduler(driver.Scheduler):
     def _schedule(self, context, request_spec, filter_properties=None):
         weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                       filter_properties)
+        # When we get the weighed_hosts, we clear those hosts whose backend
+        # is not same as consistencygroup's backend.
+        CG_backend = request_spec.get('CG_backend')
+        if weighed_hosts and CG_backend:
+            # Get host name including host@backend#pool info from
+            # weighed_hosts.
+            for host in weighed_hosts[::-1]:
+                backend = utils.extract_host(host.obj.host)
+                if backend != CG_backend:
+                    weighed_hosts.remove(host)
         if not weighed_hosts:
             LOG.warning(_LW('No weighed hosts found for volume '
                             'with properties: %s'),
index 26bf6508428463fd9157f8beba518ece481b438c..875bba26a2dd6fe37cd99a624fbbd02d2f89e4d9 100644 (file)
@@ -177,6 +177,37 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         self.assertIsNotNone(weighed_host.obj)
         self.assertTrue(_mock_service_get_all_by_topic.called)
 
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_create_volume_clear_host_different_with_cg(self,
+                                                        _mock_service_get_all):
+        # Ensure we clear those hosts whose backend is not same as
+        # consistencygroup's backend.
+        sched = fakes.FakeFilterScheduler()
+        sched.host_manager = fakes.FakeHostManager()
+        fakes.mock_host_manager_db_calls(_mock_service_get_all)
+        fake_context = context.RequestContext('user', 'project')
+        request_spec = {'volume_properties': {'project_id': 1,
+                                              'size': 1},
+                        'volume_type': {'name': 'LVM_iSCSI'},
+                        'CG_backend': 'host@lvmdriver'}
+        weighed_host = sched._schedule(fake_context, request_spec, {})
+        self.assertIsNone(weighed_host)
+
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_create_volume_host_same_as_cg(self, _mock_service_get_all):
+        # Ensure we don't clear the host whose backend is same as
+        # consistencygroup's backend.
+        sched = fakes.FakeFilterScheduler()
+        sched.host_manager = fakes.FakeHostManager()
+        fakes.mock_host_manager_db_calls(_mock_service_get_all)
+        fake_context = context.RequestContext('user', 'project')
+        request_spec = {'volume_properties': {'project_id': 1,
+                                              'size': 1},
+                        'volume_type': {'name': 'LVM_iSCSI'},
+                        'CG_backend': 'host1'}
+        weighed_host = sched._schedule(fake_context, request_spec, {})
+        self.assertEqual('host1#lvm1', weighed_host.obj.host)
+
     def test_max_attempts(self):
         self.flags(scheduler_max_attempts=4)
 
index df820918ea0acc9d0e6a3b076bb4bbf55c8a0814..04086c72eabc11f103b476832c4084ac571bdce7 100644 (file)
@@ -50,14 +50,16 @@ class CreateVolumeFlowTestCase(test.TestCase):
         # called to avoid div by zero errors.
         self.counter = float(0)
 
+    @mock.patch('cinder.volume.utils.extract_host')
     @mock.patch('time.time', side_effect=time_inc)
     @mock.patch('cinder.objects.ConsistencyGroup.get_by_id')
-    def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time):
+    def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time,
+                                mock_extract_host):
         props = {}
-        consistencygroup_obj = \
-            fake_consistencygroup.fake_consistencyobject_obj(
-                self.ctxt, consistencygroup_id=1, host=None)
-        consistencygroup_get_by_id.return_value = consistencygroup_obj
+        cg_obj = (fake_consistencygroup.
+                  fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1,
+                                             host='host@backend#pool'))
+        consistencygroup_get_by_id.return_value = cg_obj
         spec = {'volume_id': None,
                 'source_volid': None,
                 'snapshot_id': None,
@@ -90,6 +92,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
 
         task._cast_create_volume(self.ctxt, spec, props)
         consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5)
+        mock_extract_host.assert_called_once_with('host@backend#pool')
 
     @mock.patch('cinder.volume.volume_types.is_encrypted')
     @mock.patch('cinder.volume.flows.api.create_volume.'
index 6156a225b0f995e9c1085720f89db581430a1fe5..8e1f98ce43ba2281b2ad2b525a09dc3aec7eaf57 100644 (file)
@@ -27,6 +27,7 @@ from cinder import policy
 from cinder import quota
 from cinder import utils
 from cinder.volume.flows import common
+from cinder.volume import utils as vol_utils
 from cinder.volume import volume_types
 
 LOG = logging.getLogger(__name__)
@@ -697,8 +698,14 @@ class VolumeCastTask(flow_utils.CinderTask):
         cgsnapshot_id = request_spec['cgsnapshot_id']
 
         if cgroup_id:
+            # If cgroup_id existed, we should cast volume to the scheduler
+            # to choose a proper pool whose backend is same as CG's backend.
             cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
-            host = cgroup.host
+            # FIXME(wanghao): CG_backend got added before request_spec was
+            # converted to versioned objects. We should make sure that this
+            # will be handled by object version translations once we add
+            # RequestSpec object.
+            request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
         elif snapshot_id and CONF.snapshot_same_host:
             # NOTE(Rongze Zhu): A simple solution for bug 1008866.
             #