]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Re-enable a lot of cinder scheduler tests
authorMichael Kerrin <michael.kerrin@hp.com>
Wed, 14 Aug 2013 10:16:42 +0000 (10:16 +0000)
committerMichael Kerrin <michael.kerrin@hp.com>
Wed, 21 Aug 2013 12:14:59 +0000 (12:14 +0000)
This highlight and fixes a NoneType exception in the schedulers
_get_weighted_candidates method

This was originally added to get around the fact that the filter tests
required that cinder was correctly installed on the system. But the
is_cinder_installed condition always returning False and this hid
problems of old packages been installed in our system. Also
930f5891b0815e1b49b9b2cc840e0c24b2796e84 added the setup.py automatically
to run_tests.sh so we can remove this.

See https://review.openstack.org/#/c/20213/ for history.

Fixes bug: 1213226

Change-Id: I86fee802c0543355471ddbd712e52ccec750cea0

cinder/scheduler/filter_scheduler.py
cinder/tests/scheduler/fakes.py
cinder/tests/scheduler/test_capacity_weigher.py
cinder/tests/scheduler/test_filter_scheduler.py
cinder/tests/scheduler/test_host_filters.py
cinder/tests/scheduler/test_host_manager.py
cinder/tests/utils.py

index 83a135ef7ec26c9c089b0cce02b41bf299e3ce8b..3f2449ccbac744931edcc5ce957c0cace39fe077 100644 (file)
@@ -222,7 +222,7 @@ class FilterScheduler(driver.Scheduler):
         hosts = self.host_manager.get_filtered_hosts(hosts,
                                                      filter_properties)
         if not hosts:
-            return None
+            return []
 
         LOG.debug(_("Filtered %s") % hosts)
         # weighted_host = WeightedHost() ... the best
index 6668a1087cf96969546d72010800ff6f2a020943..4059f11301ff97441ab2886967ab965bbe9c63fd 100644 (file)
@@ -24,21 +24,6 @@ from cinder.scheduler import filter_scheduler
 from cinder.scheduler import host_manager
 
 
-VOLUME_SERVICES = [
-    dict(id=1, host='host1', topic='volume', disabled=False,
-         availability_zone='zone1', updated_at=timeutils.utcnow()),
-    dict(id=2, host='host2', topic='volume', disabled=False,
-         availability_zone='zone1', updated_at=timeutils.utcnow()),
-    dict(id=3, host='host3', topic='volume', disabled=False,
-         availability_zone='zone2', updated_at=timeutils.utcnow()),
-    dict(id=4, host='host4', topic='volume', disabled=False,
-         availability_zone='zone3', updated_at=timeutils.utcnow()),
-    # service on host5 is disabled
-    dict(id=5, host='host5', topic='volume', disabled=True,
-         availability_zone='zone4', updated_at=timeutils.utcnow()),
-]
-
-
 class FakeFilterScheduler(filter_scheduler.FilterScheduler):
     def __init__(self, *args, **kwargs):
         super(FakeFilterScheduler, self).__init__(*args, **kwargs)
@@ -79,5 +64,19 @@ class FakeHostState(host_manager.HostState):
 def mox_host_manager_db_calls(mock, context):
     mock.StubOutWithMock(db, 'service_get_all_by_topic')
 
+    services = [
+        dict(id=1, host='host1', topic='volume', disabled=False,
+             availability_zone='zone1', updated_at=timeutils.utcnow()),
+        dict(id=2, host='host2', topic='volume', disabled=False,
+             availability_zone='zone1', updated_at=timeutils.utcnow()),
+        dict(id=3, host='host3', topic='volume', disabled=False,
+             availability_zone='zone2', updated_at=timeutils.utcnow()),
+        dict(id=4, host='host4', topic='volume', disabled=False,
+             availability_zone='zone3', updated_at=timeutils.utcnow()),
+        # service on host5 is disabled
+        dict(id=5, host='host5', topic='volume', disabled=True,
+             availability_zone='zone4', updated_at=timeutils.utcnow()),
+    ]
+
     db.service_get_all_by_topic(mox.IgnoreArg(),
-                                mox.IgnoreArg()).AndReturn(VOLUME_SERVICES)
+                                mox.IgnoreArg()).AndReturn(services)
index 4232bbd03249c36d758add57e3d235a075b31c82..9498da836c786ad52f43787f31104ec8cd689ca8 100644 (file)
@@ -48,8 +48,6 @@ class CapacityWeigherTestCase(test.TestCase):
         self.mox.ResetAll()
         return host_states
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_default_of_spreading_first(self):
         hostinfo_list = self._get_all_hosts()
 
@@ -63,8 +61,6 @@ class CapacityWeigherTestCase(test.TestCase):
         self.assertEqual(weighed_host.weight, 921.0)
         self.assertEqual(weighed_host.obj.host, 'host1')
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_weight_multiplier1(self):
         self.flags(capacity_weight_multiplier=-1.0)
         hostinfo_list = self._get_all_hosts()
@@ -79,8 +75,6 @@ class CapacityWeigherTestCase(test.TestCase):
         self.assertEqual(weighed_host.weight, -190.0)
         self.assertEqual(weighed_host.obj.host, 'host4')
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_weight_multiplier2(self):
         self.flags(capacity_weight_multiplier=2.0)
         hostinfo_list = self._get_all_hosts()
index c747c62a6977a00f0fd3d2a1837584962de963c2..a821dc13966d4472326680f9fd168f7cfe6a942a 100644 (file)
@@ -30,17 +30,11 @@ from cinder.tests.scheduler import test_scheduler
 from cinder.tests import utils as test_utils
 
 
-def fake_get_filtered_hosts(hosts, filter_properties):
-    return list(hosts)
-
-
 class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
     """Test case for Filter Scheduler."""
 
     driver_cls = filter_scheduler.FilterScheduler
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_create_volume_no_hosts(self):
         """
         Ensure empty hosts & child_zones result in NoValidHosts exception.
@@ -58,8 +52,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
                           fake_context, request_spec, {})
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_create_volume_non_admin(self):
         """Test creating an instance locally using run_instance, passing
         a non-admin context.  DB actions should work.
@@ -85,29 +77,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
                           fake_context, request_spec, {})
         self.assertTrue(self.was_admin)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_schedule_happy_day(self):
         """Make sure there's nothing glaringly wrong with _schedule()
         by doing a happy day pass through.
         """
 
-        self.next_weight = 1.0
-
-        def _fake_weigh_objects(_self, functions, hosts, options):
-            self.next_weight += 2.0
-            host_state = hosts[0]
-            return [weights.WeighedHost(host_state, self.next_weight)]
-
         sched = fakes.FakeFilterScheduler()
         sched.host_manager = fakes.FakeHostManager()
         fake_context = context.RequestContext('user', 'project',
                                               is_admin=True)
 
-        self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
-                       fake_get_filtered_hosts)
-        self.stubs.Set(weights.HostWeightHandler,
-                       'get_weighed_objects', _fake_weigh_objects)
         fakes.mox_host_manager_db_calls(self.mox, fake_context)
 
         request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
@@ -129,8 +108,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         self.assertRaises(exception.InvalidParameterValue,
                           fakes.FakeFilterScheduler)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_retry_disabled(self):
         # Retry info should not get populated when re-scheduling is off.
         self.flags(scheduler_max_attempts=1)
@@ -147,8 +124,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         # should not have retry info in the populated filter properties:
         self.assertFalse("retry" in filter_properties)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_retry_attempt_one(self):
         # Test retry logic on initial scheduling attempt.
         self.flags(scheduler_max_attempts=2)
@@ -165,8 +140,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         num_attempts = filter_properties['retry']['num_attempts']
         self.assertEqual(1, num_attempts)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_retry_attempt_two(self):
         # Test retry logic when re-scheduling.
         self.flags(scheduler_max_attempts=2)
@@ -229,29 +202,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         self.assertEqual(1024, host_state.total_capacity_gb)
 
     def _host_passes_filters_setup(self):
-        self.next_weight = 1.0
-
-        def _fake_weigh_objects(_self, functions, hosts, options):
-            self.next_weight += 2.0
-            host_state = hosts[0]
-            return [weights.WeighedHost(host_state, self.next_weight)]
-
         sched = fakes.FakeFilterScheduler()
         sched.host_manager = fakes.FakeHostManager()
         fake_context = context.RequestContext('user', 'project',
                                               is_admin=True)
 
-        self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
-                       fake_get_filtered_hosts)
-        self.stubs.Set(weights.HostWeightHandler,
-                       'get_weighed_objects', _fake_weigh_objects)
         fakes.mox_host_manager_db_calls(self.mox, fake_context)
 
         self.mox.ReplayAll()
         return (sched, fake_context)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_host_passes_filters_happy_day(self):
         """Do a successful pass through of with host_passes_filters()."""
         sched, ctx = self._host_passes_filters_setup()
@@ -262,8 +222,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
         ret_host = sched.host_passes_filters(ctx, 'host1', request_spec, {})
         self.assertEqual(ret_host.host, 'host1')
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed (try setup.py develop')
     def test_host_passes_filters_no_capacity(self):
         """Fail the host due to insufficient capacity."""
         sched, ctx = self._host_passes_filters_setup()
index 7034879a30d3aa2359d00887696501de5f56ce07..9e44f1cfc4dc5bfc4f5fa25318b652a009ddff8b 100644 (file)
@@ -78,8 +78,6 @@ class HostFiltersTestCase(test.TestCase):
             return ret_value
         self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_filter_passes(self):
         self._stub_service_is_up(True)
         filt_cls = self.class_map['CapacityFilter']()
@@ -91,8 +89,6 @@ class HostFiltersTestCase(test.TestCase):
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_filter_fails(self):
         self._stub_service_is_up(True)
         filt_cls = self.class_map['CapacityFilter']()
@@ -105,8 +101,6 @@ class HostFiltersTestCase(test.TestCase):
                                     'service': service})
         self.assertFalse(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_filter_passes_infinite(self):
         self._stub_service_is_up(True)
         filt_cls = self.class_map['CapacityFilter']()
@@ -118,8 +112,6 @@ class HostFiltersTestCase(test.TestCase):
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_capacity_filter_passes_unknown(self):
         self._stub_service_is_up(True)
         filt_cls = self.class_map['CapacityFilter']()
@@ -131,8 +123,6 @@ class HostFiltersTestCase(test.TestCase):
                                     'service': service})
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_retry_filter_disabled(self):
         # Test case where retry/re-scheduling is disabled.
         filt_cls = self.class_map['RetryFilter']()
@@ -140,8 +130,6 @@ class HostFiltersTestCase(test.TestCase):
         filter_properties = {}
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_retry_filter_pass(self):
         # Node not previously tried.
         filt_cls = self.class_map['RetryFilter']()
@@ -150,8 +138,6 @@ class HostFiltersTestCase(test.TestCase):
         filter_properties = dict(retry=retry)
         self.assertTrue(filt_cls.host_passes(host, filter_properties))
 
-    @testtools.skipIf(not test_utils.is_cinder_installed(),
-                      'Test requires Cinder installed')
     def test_retry_filter_fail(self):
         # Node was already tried.
         filt_cls = self.class_map['RetryFilter']()
index c6588785f355c839b954679502c67263bc2e056c..90f0ddd2ea2602b3e05152f8e2b200ad0bacb8bd 100644 (file)
@@ -143,26 +143,39 @@ class HostManagerTestCase(test.TestCase):
         self.mox.StubOutWithMock(host_manager.LOG, 'warn')
         self.mox.StubOutWithMock(host_manager.utils, 'service_is_up')
 
-        ret_services = fakes.VOLUME_SERVICES
-        db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
-        host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[3]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
+        services = [
+            dict(id=1, host='host1', topic='volume', disabled=False,
+                 availability_zone='zone1', updated_at=timeutils.utcnow()),
+            dict(id=2, host='host2', topic='volume', disabled=False,
+                 availability_zone='zone1', updated_at=timeutils.utcnow()),
+            dict(id=3, host='host3', topic='volume', disabled=False,
+                 availability_zone='zone2', updated_at=timeutils.utcnow()),
+            dict(id=4, host='host4', topic='volume', disabled=False,
+                 availability_zone='zone3', updated_at=timeutils.utcnow()),
+            # service on host5 is disabled
+            dict(id=5, host='host5', topic='volume', disabled=True,
+                 availability_zone='zone4', updated_at=timeutils.utcnow()),
+        ]
+
+        db.service_get_all_by_topic(context, topic).AndReturn(services)
+        host_manager.utils.service_is_up(services[0]).AndReturn(True)
+        host_manager.utils.service_is_up(services[1]).AndReturn(True)
+        host_manager.utils.service_is_up(services[2]).AndReturn(True)
+        host_manager.utils.service_is_up(services[3]).AndReturn(True)
+        host_manager.utils.service_is_up(services[4]).AndReturn(True)
         # Disabled service
         host_manager.LOG.warn("volume service is down or disabled. "
                               "(host: host5)")
 
-        db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
-        host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
-        host_manager.utils.service_is_up(ret_services[3]).AndReturn(False)
+        db.service_get_all_by_topic(context, topic).AndReturn(services)
+        host_manager.utils.service_is_up(services[0]).AndReturn(True)
+        host_manager.utils.service_is_up(services[1]).AndReturn(True)
+        host_manager.utils.service_is_up(services[2]).AndReturn(True)
+        host_manager.utils.service_is_up(services[3]).AndReturn(False)
         # Stopped service
         host_manager.LOG.warn("volume service is down or disabled. "
                               "(host: host4)")
-        host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
+        host_manager.utils.service_is_up(services[4]).AndReturn(True)
         # Disabled service
         host_manager.LOG.warn("volume service is down or disabled. "
                               "(host: host5)")
@@ -174,7 +187,7 @@ class HostManagerTestCase(test.TestCase):
         self.assertEqual(len(host_state_map), 4)
         # Check that service is up
         for i in xrange(4):
-            volume_node = fakes.VOLUME_SERVICES[i]
+            volume_node = services[i]
             host = volume_node['host']
             self.assertEqual(host_state_map[host].service,
                              volume_node)
@@ -184,7 +197,7 @@ class HostManagerTestCase(test.TestCase):
 
         self.assertEqual(len(host_state_map), 3)
         for i in xrange(3):
-            volume_node = fakes.VOLUME_SERVICES[i]
+            volume_node = services[i]
             host = volume_node['host']
             self.assertEqual(host_state_map[host].service,
                              volume_node)
index ccf38cba548711425497b80b1df37026ccb3550c..5ee91e8df38346470d8e74b52bca8d2bf740692d 100644 (file)
@@ -26,13 +26,6 @@ def get_test_admin_context():
     return context.get_admin_context()
 
 
-def is_cinder_installed():
-    if os.path.exists('../../cinder.cinder.egg-info'):
-        return True
-    else:
-        return False
-
-
 def create_volume(ctxt,
                   host='test_host',
                   display_name='test_volume',