ignore_dirs = [
"cinder/db",
"cinder/openstack",
- "cinder/scheduler",
"cinder/volume",
"cinder/zonemanager"]
for directory in ignore_dirs:
try:
result = _vars[which_dict][entry]
except KeyError as e:
- msg = _("KeyError: %s") % e
- raise exception.EvaluatorParseException(msg)
+ raise exception.EvaluatorParseException(
+ _("KeyError: %s") % six.text_type(e))
except TypeError as e:
- msg = _("TypeError: %s") % e
- raise exception.EvaluatorParseException(msg)
+ raise exception.EvaluatorParseException(
+ _("TypeError: %s") % six.text_type(e))
try:
result = int(result)
try:
result = float(result)
except ValueError as e:
- msg = _("ValueError: %s") % e
- raise exception.EvaluatorParseException(msg)
+ raise exception.EvaluatorParseException(
+ _("ValueError: %s") % six.text_type(e))
return result
elif op == '/':
prod /= float(val.eval())
except ZeroDivisionError as e:
- msg = _("ZeroDivisionError: %s") % e
- raise exception.EvaluatorParseException(msg)
+ raise exception.EvaluatorParseException(
+ _("ZeroDivisionError: %s") % six.text_type(e))
return prod
try:
result = _parser.parseString(expression, parseAll=True)[0]
except pyparsing.ParseException as e:
- msg = _("ParseException: %s") % e
- raise exception.EvaluatorParseException(msg)
+ raise exception.EvaluatorParseException(
+ _("ParseException: %s") % six.text_type(e))
return result.eval()
from oslo_log import log as logging
from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _, _LE, _LW
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
filter_properties_list)
if not weighed_host:
- raise exception.NoValidHost(reason="No weighed hosts available")
+ raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
filter_properties)
if not weighed_host:
- raise exception.NoValidHost(reason="No weighed hosts available")
+ raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
volume_id = request_spec['volume_id']
if host_state.host == host:
return host_state
- msg = (_('Cannot place volume %(id)s on %(host)s')
- % {'id': request_spec['volume_id'], 'host': host})
- raise exception.NoValidHost(reason=msg)
+ raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on '
+ '%(host)s') %
+ {'id': request_spec['volume_id'],
+ 'host': host})
def find_retype_host(self, context, request_spec, filter_properties=None,
migration_policy='never'):
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_hosts:
- msg = (_('No valid hosts for volume %(id)s with type %(type)s')
- % {'id': request_spec['volume_id'],
- 'type': request_spec['volume_type']})
- raise exception.NoValidHost(reason=msg)
+ raise exception.NoValidHost(reason=_('No valid hosts for volume '
+ '%(id)s with type %(type)s') %
+ {'id': request_spec['volume_id'],
+ 'type': request_spec['volume_type']})
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
return host_state
if migration_policy == 'never':
- msg = (_('Current host not valid for volume %(id)s with type '
- '%(type)s, migration not allowed')
- % {'id': request_spec['volume_id'],
- 'type': request_spec['volume_type']})
- raise exception.NoValidHost(reason=msg)
+ raise exception.NoValidHost(reason=_('Current host not valid for '
+ 'volume %(id)s with type '
+ '%(type)s, migration not '
+ 'allowed') %
+ {'id': request_spec['volume_id'],
+ 'type': request_spec['volume_type']})
top_host = self._choose_top_host(weighed_hosts, request_spec)
return top_host.obj
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
- msg = _("Invalid value for 'scheduler_max_attempts', "
- "must be >=1")
- raise exception.InvalidParameterValue(err=msg)
+ raise exception.InvalidParameterValue(
+ err=_("Invalid value for 'scheduler_max_attempts', "
+ "must be >=1"))
return max_attempts
def _log_volume_error(self, volume_id, retry):
return # no previously attempted hosts, skip
last_host = hosts[-1]
- msg = _("Error scheduling %(volume_id)s from last vol-service: "
- "%(last_host)s : %(exc)s") % {
- 'volume_id': volume_id,
- 'last_host': last_host,
- 'exc': exc,
- }
- LOG.error(msg)
+ LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
+ "%(last_host)s : %(exc)s"),
+ {'volume_id': volume_id,
+ 'last_host': last_host,
+ 'exc': exc})
def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts:
- msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
- "volume %(volume_id)s") % {
- 'max_attempts': max_attempts,
- 'volume_id': volume_id,
- }
- raise exception.NoValidHost(reason=msg)
+ raise exception.NoValidHost(
+ reason=_("Exceeded max scheduling attempts %(max_attempts)d "
+ "for volume %(volume_id)s") %
+ {'max_attempts': max_attempts,
+ 'volume_id': volume_id})
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
if not hosts:
return []
- LOG.debug("Filtered %s" % hosts)
+ LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
if not hosts:
return []
- LOG.debug("Filtered %s" % hosts)
+ LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
- LOG.debug("Choosing %s" % host_state.host)
+ LOG.debug("Choosing %s", host_state.host)
volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties)
return top_host
def _choose_top_host_group(self, weighed_hosts, request_spec_list):
top_host = weighed_hosts[0]
host_state = top_host.obj
- LOG.debug("Choosing %s" % host_state.host)
+ LOG.debug("Choosing %s", host_state.host)
return top_host
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
if not volume_id:
- msg = _("No volume_id provided to populate a request_spec from")
- raise exception.InvalidInput(reason=msg)
+ raise exception.InvalidInput(
+ reason=_("No volume_id provided to populate a "
+ "request_spec from"))
volume_ref = self.db_api.volume_get(context, volume_id)
volume_type_id = volume_ref.get('volume_type_id')
vol_type = self.db_api.volume_type_get(context, volume_type_id)
try:
self._notify_failure(context, request_spec, cause)
finally:
- LOG.error(_LE("Failed to run task %(name)s: %(cause)s") %
+ LOG.error(_LE("Failed to run task %(name)s: %(cause)s"),
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
- "payload %(payload)s") %
+ "payload %(payload)s"),
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties):
nonactive_pools = set(self.pools.keys()) - active_pools
for pool in nonactive_pools:
LOG.debug("Removing non-active pool %(pool)s @ %(host)s "
- "from scheduler cache." % {'pool': pool,
- 'host': self.host})
+ "from scheduler cache.", {'pool': pool,
+ 'host': self.host})
del self.pools[pool]
def _append_backend_info(self, pool_cap):
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
- msg = ", ".join(bad_filters)
- raise exception.SchedulerHostFilterNotFound(filter_name=msg)
+ raise exception.SchedulerHostFilterNotFound(
+ filter_name=", ".join(bad_filters))
return good_filters
def _choose_host_weighers(self, weight_cls_names):
if not found_class:
bad_weighers.append(weigher_name)
if bad_weighers:
- msg = ", ".join(bad_weighers)
- raise exception.SchedulerHostWeigherNotFound(weigher_name=msg)
+ raise exception.SchedulerHostWeigherNotFound(
+ weigher_name=", ".join(bad_weighers))
return good_weighers
def get_filtered_hosts(self, hosts, filter_properties,
self.service_states[host] = capab_copy
LOG.debug("Received %(service_name)s service update from "
- "%(host)s: %(cap)s" %
+ "%(host)s: %(cap)s",
{'service_name': service_name, 'host': host,
'cap': capabilities})
for service in volume_services:
host = service['host']
if not utils.service_is_up(service):
- LOG.warn(_LW("volume service is down. (host: %s)") % host)
+ LOG.warning(_LW("volume service is down. (host: %s)"), host)
continue
capabilities = self.service_states.get(host, None)
if capabilities is None:
nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
for host in nonactive_hosts:
LOG.info(_LI("Removing non-active host: %(host)s from "
- "scheduler cache.") % {'host': host})
+ "scheduler cache."), {'host': host})
del self.host_state_map[host]
def get_all_host_states(self, context):
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
+import six
from cinder import context
from cinder import db
request_spec_list,
filter_properties_list)
except exception.NoValidHost:
- msg = (_("Could not find a host for consistency group "
- "%(group_id)s.") %
- {'group_id': group_id})
- LOG.error(msg)
+ LOG.error(_LE("Could not find a host for consistency group "
+ "%(group_id)s."),
+ {'group_id': group_id})
db.consistencygroup_update(context, group_id,
{'status': 'error'})
except Exception:
snapshot_id,
image_id)
except Exception:
- LOG.exception(_LE("Failed to create scheduler "
- "manager volume flow"))
- raise exception.CinderException(
- _("Failed to create scheduler manager volume flow"))
+ msg = _("Failed to create scheduler manager volume flow")
+ LOG.exception(msg)
+ raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
- msg = (_("Failed to schedule_%(method)s: %(ex)s") %
- {'method': method, 'ex': ex})
+ msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
+ {'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
volume_state = updates['volume_state']
"""Get the last modified datetime. Broken out for testing."""
try:
return os.path.getmtime(filename)
- except os.error as e:
+ except os.error:
LOG.exception(_LE("Could not stat scheduler options file "
- "%(filename)s: '%(e)s'"),
- {'filename': filename, 'e': e})
+ "%(filename)s."),
+ {'filename': filename})
raise
def _load_file(self, handle):
"""Decode the JSON file. Broken out for testing."""
try:
return json.load(handle)
- except ValueError as e:
- LOG.exception(_LE("Could not decode scheduler options: '%s'") % e)
+ except ValueError:
+ LOG.exception(_LE("Could not decode scheduler options."))
return {}
def _get_time_now(self):
_mock_service_get_all_by_topic.return_value = services
_mock_service_is_up.return_value = True
_mock_warning = mock.Mock()
- host_manager.LOG.warn = _mock_warning
+ host_manager.LOG.warning = _mock_warning
# Get all states
self.host_manager.get_all_host_states(context)
for service in services:
expected.append(mock.call(service))
self.assertEqual(expected, _mock_service_is_up.call_args_list)
- _mock_warning.assert_called_once_with("volume service is down. "
- "(host: host3)")
+ self.assertTrue(_mock_warning.call_count > 0)
# Get host_state_map and make sure we have the first 2 hosts (host3 is
# down, host4 is missing capabilities)
from cinder import context
from cinder import db
from cinder import exception
-from cinder.i18n import _
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
self.context,
'volume',
group_id)
- LOG.exception.assert_called_once_with(_(
- "Failed to create consistency group "
- "%(group_id)s."), {'group_id': group_id})
+ self.assertTrue(LOG.exception.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, 'volume', group_id)
- LOG.error.assert_called_once_with(_(
- "Could not find a host for consistency group "
- "%(group_id)s.") % {'group_id': group_id})
+ self.assertTrue(LOG.error.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})