from oslo_log import log as logging
from oslo_utils import timeutils
+from cinder import context as cinder_context
from cinder import db
from cinder import exception
from cinder.i18n import _LI, _LW
# Do nothing when some other scheduler is configured
pass
+ self._no_capabilities_hosts = set() # Hosts having no capabilities
+ self._update_host_state_map(cinder_context.get_admin_context())
+
def _choose_host_filters(self, filter_cls_names):
"""Return a list of available filter names.
{'service_name': service_name, 'host': host,
'cap': capabilities})
+ self._no_capabilities_hosts.discard(host)
+
+ def has_all_capabilities(self):
+ return len(self._no_capabilities_hosts) == 0
+
def _update_host_state_map(self, context):
# Get resource usage across the available volume nodes:
topic,
disabled=False)
active_hosts = set()
+ no_capabilities_hosts = set()
for service in volume_services:
host = service['host']
if not utils.service_is_up(service):
LOG.warn(_LW("volume service is down. (host: %s)") % host)
continue
capabilities = self.service_states.get(host, None)
+ if capabilities is None:
+ no_capabilities_hosts.add(host)
+ continue
+
host_state = self.host_state_map.get(host)
if not host_state:
host_state = self.host_state_cls(host,
dict(service.iteritems()))
active_hosts.add(host)
+ self._no_capabilities_hosts = no_capabilities_hosts
+
# remove non-active hosts from host_state_map
nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
for host in nonactive_hosts:
'host3': host3_volume_capabs}
self.assertDictMatch(service_states, expected)
+ @mock.patch('cinder.utils.service_is_up')
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_has_all_capabilities(self, _mock_service_get_all_by_topic,
+ _mock_service_is_up):
+ _mock_service_is_up.return_value = True
+ services = [
+ dict(id=1, host='host1', topic='volume', disabled=False,
+ availability_zone='zone1', updated_at=timeutils.utcnow()),
+ dict(id=2, host='host2', topic='volume', disabled=False,
+ availability_zone='zone1', updated_at=timeutils.utcnow()),
+ dict(id=3, host='host3', topic='volume', disabled=False,
+ availability_zone='zone1', updated_at=timeutils.utcnow()),
+ ]
+ _mock_service_get_all_by_topic.return_value = services
+ # Create host_manager again to let db.service_get_all_by_topic mock run
+ self.host_manager = host_manager.HostManager()
+ self.assertFalse(self.host_manager.has_all_capabilities())
+
+ host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1)
+ host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1)
+ host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1)
+
+ service_name = 'volume'
+ self.host_manager.update_service_capabilities(service_name, 'host1',
+ host1_volume_capabs)
+ self.assertFalse(self.host_manager.has_all_capabilities())
+ self.host_manager.update_service_capabilities(service_name, 'host2',
+ host2_volume_capabs)
+ self.assertFalse(self.host_manager.has_all_capabilities())
+ self.host_manager.update_service_capabilities(service_name, 'host3',
+ host3_volume_capabs)
+ self.assertTrue(self.host_manager.has_all_capabilities())
+
@mock.patch('cinder.db.service_get_all_by_topic')
@mock.patch('cinder.utils.service_is_up')
@mock.patch('oslo_utils.timeutils.utcnow')
availability_zone='zone3', updated_at=timeutils.utcnow()),
]
- # First test: service_is_up is always True, host5 is disabled
+ service_states = {
+ 'host1': dict(volume_backend_name='AAA',
+ total_capacity_gb=512, free_capacity_gb=200,
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=312),
+ 'host2': dict(volume_backend_name='BBB',
+ total_capacity_gb=256, free_capacity_gb=100,
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=156),
+ 'host3': dict(volume_backend_name='CCC',
+ total_capacity_gb=10000, free_capacity_gb=700,
+ timestamp=None, reserved_percentage=0,
+ provisioned_capacity_gb=9300),
+ }
+
+ # First test: service_is_up is always True, host5 is disabled,
+ # host4 has no capabilities
+ self.host_manager.service_states = service_states
_mock_service_get_all_by_topic.return_value = services
_mock_service_is_up.return_value = True
_mock_warning = mock.Mock()
# Get host_state_map and make sure we have the first 4 hosts
host_state_map = self.host_manager.host_state_map
- self.assertEqual(len(host_state_map), 4)
- for i in xrange(4):
+ self.assertEqual(len(host_state_map), 3)
+ for i in xrange(3):
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service, volume_node)
- # Second test: Now service_is_up returns False for host4
+ # Second test: Now service_is_up returns False for host3
_mock_service_is_up.reset_mock()
- _mock_service_is_up.side_effect = [True, True, True, False]
+ _mock_service_is_up.side_effect = [True, True, False, True]
_mock_service_get_all_by_topic.reset_mock()
_mock_warning.reset_mock()
- # Get all states, make sure host 4 is reported as down
+ # Get all states, make sure host 3 is reported as down
self.host_manager.get_all_host_states(context)
_mock_service_get_all_by_topic.assert_called_with(context,
topic,
for service in services:
expected.append(mock.call(service))
self.assertEqual(expected, _mock_service_is_up.call_args_list)
- expected = []
- for num in ['4']:
- expected.append(mock.call("volume service is down. "
- "(host: host" + num + ")"))
- self.assertEqual(expected, _mock_warning.call_args_list)
+ _mock_warning.assert_called_once_with("volume service is down. "
+ "(host: host3)")
- # Get host_state_map and make sure we have the first 4 hosts
+ # Get host_state_map and make sure we have the first 2 hosts (host3 is
+ # down, host4 is missing capabilities)
host_state_map = self.host_manager.host_state_map
- self.assertEqual(len(host_state_map), 3)
- for i in xrange(3):
+ self.assertEqual(len(host_state_map), 2)
+ for i in xrange(2):
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,