- [N323] Add check for explicit import of _() to ensure proper translation.
- [N324] Enforce no use of LOG.audit messages. LOG.info should be used instead.
- [N327] assert_called_once is not a valid Mock method.
+- [N328] LOG.info messages require translations `_LI()`.
+- [N329] LOG.exception and LOG.error messages require translations `_LE()`.
+- [N330] LOG.warning messages require translations `_LW()`.
- [N333] Ensure that oslo namespaces are used for namespaced libraries.
- [N339] Prevent use of deprecated contextlib.nested.
- [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now().
def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api:
- LOG.warn(_LW('The v1 api is deprecated and will be removed in the '
- 'Liberty release. You should set enable_v1_api=false and '
- 'enable_v2_api=true in your cinder.conf file.'))
+ LOG.warning(_LW('The v1 api is deprecated and will be removed in the '
+ 'Liberty release. You should set enable_v1_api=false '
+ 'and enable_v2_api=true in your cinder.conf file.'))
else:
del local_conf['/v1']
if not CONF.enable_v2_api:
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
- msg = _("Updating %(resource)s '%(id)s' with '%(update)r'")
+ msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
active = 'enabled'
if host['disabled']:
active = 'disabled'
- LOG.debug('status, active and update: %s, %s, %s' %
+ LOG.debug('status, active and update: %s, %s, %s',
(status, active, host['updated_at']))
hosts.append({'host_name': host['host'],
'service': host['topic'],
# Convert string to bool type in strict manner
force = strutils.bool_from_string(force)
- LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" %
+ LOG.debug("Delete qos_spec: %(id)s, force: %(force)s",
{'id': id, 'force': force})
try:
raise webob.exc.HTTPBadRequest()
keys = body['keys']
- LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s" %
+ LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s",
{'id': id, 'keys': keys})
try:
context = req.environ['cinder.context']
authorize(context)
- LOG.debug("Get associations for qos_spec id: %s" % id)
+ LOG.debug("Get associations for qos_spec id: %s", id)
try:
associates = qos_specs.get_associations(context, id)
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
- LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s" %
+ LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
- LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s" %
+ LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
context = req.environ['cinder.context']
authorize(context)
- LOG.debug("Disassociate qos_spec: %s from all." % id)
+ LOG.debug("Disassociate qos_spec: %s from all.", id)
try:
qos_specs.disassociate_all(context, id)
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import db
-from cinder.i18n import _
+from cinder.i18n import _, _LI
LOG = logging.getLogger(__name__)
context = req.environ['cinder.context']
authorize(context, 'update_snapshot_status')
- LOG.debug("body: %s" % body)
+ LOG.debug("body: %s", body)
try:
status = body['os-update_snapshot_status']['status']
except KeyError:
update_dict.update({'progress': progress})
- LOG.info("Updating snapshot %(id)s with info %(dict)s" %
+ LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
{'id': id, 'dict': update_dict})
db.snapshot_update(context, id, update_dict)
import logging
+import six
+
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
all_metadata = self.volume_api.get_volumes_image_metadata(context)
except Exception as e:
LOG.debug('Problem retrieving volume image metadata. '
- 'It will be skipped. Error: %s', e)
+ 'It will be skipped. Error: %s', six.text_type(e))
all_metadata = {}
return all_metadata
vol = self.volume_api.get(context, id)
LOG.info(_LI('Attempting to promote secondary replica to primary'
' for volume %s.'),
- str(id),
+ id,
context=context)
self.replication_api.promote(context, vol)
except exception.NotFound:
vol = self.volume_api.get(context, id)
LOG.info(_LI('Attempting to sync secondary replica with primary'
' for volume %s.'),
- str(id),
+ id,
context=context)
self.replication_api.reenable(context, vol)
except exception.NotFound:
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
+from cinder.i18n import _LE, _LI, _LW
import cinder.policy
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
- except AttributeError as ex:
- LOG.exception(_LE("Exception loading extension: %s"), unicode(ex))
+ except AttributeError:
+ LOG.exception(_LE("Exception loading extension."))
return False
return True
try:
self.load_extension(ext_factory)
except Exception as exc:
- LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
- '%(exc)s'),
- {'ext_factory': ext_factory, 'exc': exc})
+ LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
+ '%(exc)s'),
+ {'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
- logger.warn(_('Failed to load extension %(classpath)s: '
- '%(exc)s'),
- {'classpath': classpath, 'exc': exc})
+ logger.warning(_LW('Failed to load extension %(classpath)s: '
+ '%(exc)s'),
+ {'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
try:
ext(ext_mgr)
except Exception as exc:
- logger.warn(_('Failed to load extension %(ext_name)s: '
- '%(exc)s'),
- {'ext_name': ext_name, 'exc': exc})
+ logger.warning(_LW('Failed to load extension '
+ '%(ext_name)s: %(exc)s'),
+ {'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def _error(self, inner, req):
if not isinstance(inner, exception.QuotaError):
- LOG.exception(_LE("Caught error: %s"), unicode(inner))
+ LOG.error(_LE("Caught error: %s"), inner)
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
status = 500
msg_dict = dict(url=req.url, status=status)
- LOG.info(_LI("%(url)s returned with HTTP %(status)d") % msg_dict)
+ LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE(
- 'Exception handling resource: %s') %
+ 'Exception handling resource: %s'),
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
- LOG.info(_LI("Fault thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
- LOG.info(_LI("HTTP exception thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
- LOG.info("%(method)s %(url)s" % {"method": request.method,
- "url": request.url})
+ LOG.info(_LI("%(method)s %(url)s"),
+ {"method": request.method,
+ "url": request.url})
# Identify the action, its arguments, and the requested
# content type
try:
msg_dict = dict(url=request.url, status=response.status_int)
- msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
+ msg = _LI("%(url)s returned with HTTP %(status)d")
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
- msg = _("%(url)s returned a fault: %(e)s") % msg_dict
+ msg = _LI("%(url)s returned a fault: %(e)s")
- LOG.info(msg)
+ LOG.info(msg, msg_dict)
return response
'create',
'delete',
'update']):
- LOG.exception(six.text_type(e))
+ LOG.exception(_LE('Get method error.'))
else:
ctxt.reraise = False
else:
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
- LOG.debug("Action body: %s" % body)
+ LOG.debug("Action body: %s", body)
else:
action_name = action
raise exc.HTTPNotFound()
force = snapshot.get('force', False)
- msg = _("Create snapshot from volume %s")
+ msg = _LI("Create snapshot from volume %s")
LOG.info(msg, volume_id, context=context)
if not utils.is_valid_boolstr(force):
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
force = snapshot.get('force', False)
- msg = _("Create snapshot from volume %s")
+ msg = _LI("Create snapshot from volume %s")
LOG.info(msg, volume_id, context=context)
# NOTE(thingee): v2 API allows name instead of display_name
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
- "<%(extra_info)s>" %
+ "<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
'exists', extra_usage_info=extra_info)
except Exception as e:
LOG.error(_LE("Failed to send exists notification"
- " for volume %s.") %
+ " for volume %s."),
volume_ref.id)
print(traceback.format_exc(e))
}
LOG.debug("Send create notification for "
"<volume_id: %(volume_id)s> "
- "<project_id %(project_id)s> <%(extra_info)s>" %
+ "<project_id %(project_id)s> <%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_LE("Failed to send create notification for "
- "volume %s.") % volume_ref.id)
+ "volume %s."), volume_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and volume_ref.deleted_at and
}
LOG.debug("Send delete notification for "
"<volume_id: %(volume_id)s> "
- "<project_id %(project_id)s> <%(extra_info)s>" %
+ "<project_id %(project_id)s> <%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_LE("Failed to send delete notification for volume "
- "%s.") % volume_ref.id)
+ "%s."), volume_ref.id)
print(traceback.format_exc(e))
snapshots = db.snapshot_get_active_by_window(admin_context,
for snapshot_ref in snapshots:
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
- "<project_id %(project_id)s> <%(extra_info)s>" %
+ "<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
extra_info)
except Exception as e:
LOG.error(_LE("Failed to send exists notification "
- "for snapshot %s.")
- % snapshot_ref.id)
+ "for snapshot %s."),
+ snapshot_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and
}
LOG.debug("Send create notification for "
"<snapshot_id: %(snapshot_id)s> "
- "<project_id %(project_id)s> <%(extra_info)s>" %
+ "<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_LE("Failed to send create notification for snapshot"
- "%s.") % snapshot_ref.id)
+ "%s."), snapshot_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and snapshot_ref.deleted_at and
}
LOG.debug("Send delete notification for "
"<snapshot_id: %(snapshot_id)s> "
- "<project_id %(project_id)s> <%(extra_info)s>" %
+ "<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
LOG.error(_LE("Failed to send delete notification for snapshot"
- "%s.") % snapshot_ref.id)
+ "%s."), snapshot_ref.id)
print(traceback.format_exc(e))
print(_("Volume usage audit completed"))
endpoint_type=endpoint_type,
**region_filter)
- LOG.debug('Creating a Nova client using "%s" user' %
+ LOG.debug('Creating a Nova client using "%s" user',
CONF.os_privileged_user_name)
else:
if nova_endpoint_template:
endpoint_type=endpoint_type,
**region_filter)
- LOG.debug('Nova client connection created using URL: %s' % url)
+ LOG.debug('Nova client connection created using URL: %s', url)
c = nova_client.Client(context.user_id,
context.auth_token,
from cinder.db import base
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
valid = self._valid_availability_zone(availability_zone)
if not valid:
- msg = _("Availability zone '%s' is invalid") % (availability_zone)
- LOG.warn(msg)
+ msg = _LW(
+ "Availability zone '%s' is invalid") % (availability_zone)
+ LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
return availability_zone
if not group['host']:
self.update_quota(context, group['id'], -1, group['project_id'])
- msg = ("No host for consistency group %s. Deleting from "
- "the database.") % group['id']
- LOG.debug(msg)
+ LOG.debug("No host for consistency group %s. Deleting from "
+ "the database.", group['id'])
self.db.consistencygroup_destroy(context.elevated(), group['id'])
return
raise exception.InvalidInput(reason=msg)
if filters:
- LOG.debug("Searching by: %s" % str(filters))
+ LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
context.elevated(), context.project_id)
if search_opts:
- LOG.debug("Searching by: %s" % search_opts)
+ LOG.debug("Searching by: %s", search_opts)
results = []
not_found = object()
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
- LOG.error("%s: %s" % (name, value))
+ LOG.error(_LE("%(name)s: %(value)s"),
+ {'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
# at least get the core message out if something happened
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db"
"|config|utils|serialization|log)")
+log_translation_LI = re.compile(
+ r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
+log_translation_LE = re.compile(
+ r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")")
+log_translation_LW = re.compile(
+ r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
+
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
yield (pos, msg)
+def validate_log_translations(logical_line, filename):
+ # TODO(smcginnis): The following is temporary as a series
+ # of patches are done to address these issues. It should be
+ # removed completely when bug 1433216 is closed.
+ ignore_dirs = [
+ "cinder/backup",
+ "cinder/brick",
+ "cinder/common",
+ "cinder/db",
+ "cinder/openstack",
+ "cinder/scheduler",
+ "cinder/volume",
+ "cinder/zonemanager"]
+ for directory in ignore_dirs:
+ if directory in filename:
+ return
+
+ # Translations are not required in the test directory.
+ # This will not catch all instances of violations, just direct
+ # misuse of the form LOG.info('Message').
+ if "cinder/tests" in filename:
+ return
+ msg = "N328: LOG.info messages require translations `_LI()`!"
+ if log_translation_LI.match(logical_line):
+ yield (0, msg)
+ msg = ("N329: LOG.exception and LOG.error messages require "
+ "translations `_LE()`!")
+ if log_translation_LE.match(logical_line):
+ yield (0, msg)
+ msg = "N330: LOG.warning messages require translations `_LW()`!"
+ if log_translation_LW.match(logical_line):
+ yield (0, msg)
+
+
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
register(check_oslo_namespace_imports)
register(check_no_contextlib_nested)
register(check_datetime_now)
+ register(validate_log_translations)
import six.moves.urllib.parse as urlparse
from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _LE, _LW
glance_opts = [
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
- error_msg = _("Error contacting glance server "
- "'%(netloc)s' for '%(method)s', "
- "%(extra)s.") % {'netloc': netloc,
- 'method': method,
- 'extra': extra,
- }
+ error_msg = _LE("Error contacting glance server "
+ "'%(netloc)s' for '%(method)s', "
+ "%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
- error_msg = _("Error contacting glance server "
- "'%(netloc)s' for '%(method)s', "
- "%(extra)s.") % {'netloc': netloc,
- 'method': method,
- 'extra': extra,
- }
- LOG.exception(error_msg)
+ LOG.exception(error_msg, {'netloc': netloc,
+ 'method': method,
+ 'extra': extra})
raise exception.GlanceConnectionFailed(reason=e)
- LOG.exception(error_msg)
+
+ LOG.exception(error_msg, {'netloc': netloc,
+ 'method': method,
+ 'extra': extra})
time.sleep(1)
from oslo_utils import units
from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import fileutils
from cinder.openstack.common import imageutils
from cinder import utils
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
- LOG.debug(msg % {"src": source,
- "sz": fsz_mb,
- "duration": duration,
- "dest": dest})
+ LOG.debug(msg, {"src": source,
+ "sz": fsz_mb,
+ "duration": duration,
+ "dest": dest})
- msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
- LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
+ msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s")
+ LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def convert_image(source, dest, out_format, run_as_root=True, throttle=None):
mbps = (fsz_mb / duration)
msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
"duration %(duration).2f sec")
- LOG.debug(msg % {"dest": image_file.name,
- "sz": fsz_mb,
- "duration": duration})
- msg = _("Image download %(sz).2f MB at %(mbps).2f MB/s")
- LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
+ LOG.debug(msg, {"dest": image_file.name,
+ "sz": fsz_mb,
+ "duration": duration})
+ msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s")
+ LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest,
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
- 'size: %(size)s' % {'tmp': tmp, 'dest': dest,
- 'size': image_meta['size']})
+ 'size: %(size)s', {'tmp': tmp, 'dest': dest,
+ 'size': image_meta['size']})
image_size_m = math.ceil(image_meta['size'] / units.Mi)
volume_utils.copy_volume(tmp, dest, image_size_m, blocksize)
return
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
- LOG.debug("%s was %s, converting to %s " % (image_id, fmt,
- volume_format))
+ LOG.debug("%s was %s, converting to %s ", (image_id, fmt,
+ volume_format))
convert_image(tmp, dest, volume_format,
run_as_root=run_as_root)
volume_format='raw', run_as_root=True):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
- LOG.debug("%s was %s, no need to convert to %s" %
- (image_id, volume_format, image_meta['disk_format']))
+ LOG.debug("%s was %s, no need to convert to %s",
+ image_id, volume_format, image_meta['disk_format'])
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with fileutils.file_open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
return
with temporary_file() as tmp:
- LOG.debug("%s was %s, converting to %s" %
- (image_id, volume_format, image_meta['disk_format']))
+ LOG.debug("%s was %s, converting to %s",
+ image_id, volume_format, image_meta['disk_format'])
convert_image(volume_path, tmp, image_meta['disk_format'],
run_as_root=run_as_root)
self._barbican_client = barbican_client.Client(
session=sess,
endpoint=self._barbican_endpoint)
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error creating Barbican client: %s"), (e))
+ LOG.exception(_LE("Error creating Barbican client."))
return self._barbican_client
order = barbican_client.orders.get(order_ref)
secret_uuid = order.secret_ref.rpartition('/')[2]
return secret_uuid
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error creating key: %s"), (e))
+ LOG.exception(_LE("Error creating key."))
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream',
secret_ref = secret.store()
secret_uuid = secret_ref.rpartition('/')[2]
return secret_uuid
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error storing key: %s"), (e))
+ LOG.exception(_LE("Error storing key."))
def copy_key(self, ctxt, key_id):
"""Copies (i.e., clones) a key stored by barbican.
secret.algorithm, secret.bit_length,
secret.mode, True)
return copy_uuid
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error copying key: %s"), (e))
+ LOG.exception(_LE("Error copying key."))
def _create_secret_ref(self, key_id, barbican_client):
"""Creates the URL required for accessing a secret.
else:
secret_data = generated_data
return secret_data
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error getting secret data: %s"), (e))
+ LOG.exception(_LE("Error getting secret data."))
def _get_secret(self, ctxt, secret_ref):
"""Creates the URL required for accessing a secret's metadata.
try:
return barbican_client.secrets.get(secret_ref)
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error getting secret metadata: %s"), (e))
+ LOG.exception(_LE("Error getting secret metadata."))
def get_key(self, ctxt, key_id,
payload_content_type='application/octet-stream'):
key_data = secret_data
key = keymgr_key.SymmetricKey(secret.algorithm, key_data)
return key
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error getting key: %s"), (e))
+ LOG.exception(_LE("Error getting key."))
def delete_key(self, ctxt, key_id):
"""Deletes the specified key.
try:
secret_ref = self._create_secret_ref(key_id, barbican_client)
barbican_client.secrets.delete(secret_ref)
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error deleting key: %s"), (e))
+ LOG.exception(_LE("Error deleting key."))
def _generate_hex_key(self, **kwargs):
if CONF.keymgr.fixed_key is None:
- LOG.warn(_LW('config option keymgr.fixed_key has not been defined:'
- ' some operations may fail unexpectedly'))
+ LOG.warning(
+ _LW('config option keymgr.fixed_key has not been defined:'
+ ' some operations may fail unexpectedly'))
raise ValueError(_('keymgr.fixed_key not defined'))
return CONF.keymgr.fixed_key
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
- LOG.warn(_LW("Not deleting key %s"), key_id)
+ LOG.warning(_LW("Not deleting key %s"), key_id)
expire=expire,
project_id=project_id)
- LOG.debug("Created reservations %s" % reservations)
+ LOG.debug("Created reservations %s", reservations)
return reservations
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to commit "
- "reservations %s") % reservations)
+ "reservations %s"), reservations)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to roll back reservations "
- "%s") % reservations)
+ "%s"), reservations)
def destroy_all_by_project(self, context, project_id):
"""Destroy all quotas, usages, and reservations associated with a
from oslo_log import log as logging
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
from cinder import quota
s_size = volume['size']
d_quota = quotas[over]
d_consumed = _consumed(over)
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
+ LOG.warning(
+ _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume - (%(d_consumed)dG of "
- "%(d_quota)dG already consumed)")
- LOG.warn(msg % {'s_pid': ctxt.project_id,
- 's_size': s_size,
- 'd_consumed': d_consumed,
- 'd_quota': d_quota})
+ "%(d_quota)dG already consumed)"),
+ {'s_pid': ctxt.project_id,
+ 's_size': s_size,
+ 'd_consumed': d_consumed,
+ 'd_quota': d_quota})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=s_size, quota=d_quota, consumed=d_consumed)
elif 'volumes' in over:
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
+ LOG.warning(
+ _LW("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
- "already consumed)")
-
- LOG.warn(msg % {'s_pid': ctxt.project_id,
- 'd_consumed': _consumed(over)})
+ "already consumed)"),
+ {'s_pid': ctxt.project_id,
+ 'd_consumed': _consumed(over)})
raise exception.VolumeLimitExceeded(
allowed=quotas[over])
return reservations
from cinder import context
from cinder import db
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import base as objects_base
from cinder.openstack.common import loopingcall
from cinder.openstack.common import service
"Messaging", messaging, context.get_admin_context().to_dict(),
rpc.TRANSPORT, "cinder", binary, host)
osprofiler.notifier.set(_notifier)
- LOG.warning("OSProfiler is enabled.\nIt means that person who knows "
- "any of hmac_keys that are specified in "
- "/etc/cinder/api-paste.ini can trace his requests. \n"
- "In real life only operator can read this file so there "
- "is no security issue. Note that even if person can "
- "trigger profiler, only admin user can retrieve trace "
- "information.\n"
- "To disable OSprofiler set in cinder.conf:\n"
- "[profiler]\nenabled=false")
+ LOG.warning(
+ _LW("OSProfiler is enabled.\nIt means that person who knows "
+ "any of hmac_keys that are specified in "
+ "/etc/cinder/api-paste.ini can trace his requests. \n"
+ "In real life only operator can read this file so there "
+ "is no security issue. Note that even if person can "
+ "trigger profiler, only admin user can retrieve trace "
+ "information.\n"
+ "To disable OSprofiler set in cinder.conf:\n"
+ "[profiler]\nenabled=false"))
else:
osprofiler.web.disable()
def start(self):
version_string = version.version_string()
- LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
+ LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
except exception.NotFound:
self._create_service_ref(ctxt)
- LOG.debug("Creating RPC server for service %s" % self.topic)
+ LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
- LOG.warn(_("Report interval must be less than service down "
- "time. Current config service_down_time: "
- "%(service_down_time)s, report_interval for this: "
- "service is: %(report_interval)s. Setting global "
- "service_down_time to: %(new_down_time)s") %
- {'service_down_time': CONF.service_down_time,
- 'report_interval': self.report_interval,
- 'new_down_time': new_down_time})
+ LOG.warning(
+ _LW("Report interval must be less than service down "
+ "time. Current config service_down_time: "
+ "%(service_down_time)s, report_interval for this: "
+ "service is: %(report_interval)s. Setting global "
+ "service_down_time to: %(new_down_time)s"),
+ {'service_down_time': CONF.service_down_time,
+ 'report_interval': self.report_interval,
+ 'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
- LOG.warn(_('Service killed that has no database entry'))
+ LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
- 'Recreating it.')
+ 'recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
- LOG.error(_('Recovered model server connection!'))
+ LOG.error(_LE('Recovered model server connection!'))
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
- LOG.exception(_('model server went away'))
+ LOG.exception(_LE('model server went away'))
class WSGIService(object):
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
- LOG.debug('%s : FLAG SET ' % flag)
+ LOG.debug('%s : FLAG SET ', flag)
else:
- LOG.debug('%(flag)s : %(flag_get)s' %
+ LOG.debug('%(flag)s : %(flag_get)s',
{'flag': flag, 'flag_get': flag_get})
try:
_launcher.wait()
from oslo_config import cfg
from oslo_log import log as logging
import paramiko
+import six
from cinder import exception
from cinder.i18n import _, _LI
if 'hosts_key_file' in kwargs.keys():
self.hosts_key_file = kwargs.pop('hosts_key_file')
LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
- "loaded along with %(conf)s from /etc/cinder.conf.") %
+ "loaded along with %(conf)s from /etc/cinder.conf."),
{'kwargs': self.hosts_key_file,
'conf': CONF.ssh_hosts_key_file})
LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
- "using ssh_hosts_key_file '%(key_file)s'." %
+ "using ssh_hosts_key_file '%(key_file)s'.",
{'policy': CONF.strict_ssh_host_key_policy,
'key_file': CONF.ssh_hosts_key_file})
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
- msg = _("Error connecting via ssh: %s") % e
+ msg = _("Error connecting via ssh: %s") % six.text_type(e)
LOG.error(msg)
raise paramiko.SSHException(msg)
def test_check_datetime_now_noqa(self):
self.assertEqual(0, len(list(checks.check_datetime_now(
- "datetime.now() # noqa", True))))
\ No newline at end of file
+ "datetime.now() # noqa", True))))
+
+ def test_validate_log_translations(self):
+ self.assertEqual(1, len(list(checks.validate_log_translations(
+ "LOG.info('foo')", "foo.py"))))
+ self.assertEqual(1, len(list(checks.validate_log_translations(
+ "LOG.warning('foo')", "foo.py"))))
+ self.assertEqual(1, len(list(checks.validate_log_translations(
+ "LOG.error('foo')", "foo.py"))))
+ self.assertEqual(1, len(list(checks.validate_log_translations(
+ "LOG.exception('foo')", "foo.py"))))
+ self.assertEqual(0, len(list(checks.validate_log_translations(
+ "LOG.info('foo')", "cinder/tests/foo.py"))))
+ self.assertEqual(0, len(list(checks.validate_log_translations(
+ "LOG.info(_LI('foo')", "foo.py"))))
+ self.assertEqual(0, len(list(checks.validate_log_translations(
+ "LOG.warning(_LW('foo')", "foo.py"))))
+ self.assertEqual(0, len(list(checks.validate_log_translations(
+ "LOG.error(_LE('foo')", "foo.py"))))
+ self.assertEqual(0, len(list(checks.validate_log_translations(
+ "LOG.exception(_LE('foo')", "foo.py"))))
\ No newline at end of file
from cinder.db import base
from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
from cinder import quota
from cinder.volume import api as volume_api
volume_ref = self.db.volume_get(context, transfer.volume_id)
if volume_ref['status'] != 'awaiting-transfer':
- msg = _("Volume in unexpected state")
- LOG.error(msg)
+ LOG.error(_LE("Volume in unexpected state"))
self.db.transfer_destroy(context, transfer_id)
def get_all(self, context, filters=None):
def create(self, context, volume_id, display_name):
"""Creates an entry in the transfers table."""
volume_api.check_policy(context, 'create_transfer')
- LOG.info("Generating transfer record for volume %s" % volume_id)
+ LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
LOG.error(_LE("Failed to create transfer record "
- "for %s") % volume_id)
+ "for %s"), volume_id)
raise
return {'id': transfer['id'],
'volume_id': transfer['volume_id'],
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs:
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
- "already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 's_size': vol_ref['size'],
- 'd_consumed': _consumed('gigabytes'),
- 'd_quota': quotas['gigabytes']})
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "%(s_size)sG volume (%(d_consumed)dG of "
+ "%(d_quota)dG already consumed)")
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 's_size': vol_ref['size'],
+ 'd_consumed': _consumed('gigabytes'),
+ 'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=vol_ref['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'volumes' in overs:
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "volume (%(d_consumed)d volumes "
- "already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 'd_consumed': _consumed('volumes')})
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "volume (%(d_consumed)d volumes "
+ "already consumed)")
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
try:
donor_id = vol_ref['project_id']
except Exception:
donor_reservations = None
LOG.exception(_LE("Failed to update quota donating volume"
- " transfer id %s") % transfer_id)
+ " transfer id %s"), transfer_id)
try:
# Transfer ownership of the volume now, must use an elevated
QUOTAS.commit(context, reservations)
if donor_reservations:
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
- LOG.info(_LI("Volume %s has been transferred.") % volume_id)
+ LOG.info(_LI("Volume %s has been transferred."), volume_id)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
try:
shutil.rmtree(tmpdir)
except OSError as e:
- LOG.debug('Could not remove tmpdir: %s', e)
+ LOG.debug('Could not remove tmpdir: %s',
+ six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
- LOG.error(_LE("Volume driver %s not initialized") % driver_name)
+ LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
- log_msg = "Removing options '%s' from query." % bad_options
- LOG.debug(log_msg)
+ LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
return True
return False
except Exception:
- LOG.debug('Path %s not found in is_blk_device check' % dev)
+ LOG.debug('Path %s not found in is_blk_device check', dev)
return False
{'host': host, 'port': port})
(self._host, self._port) = self._socket.getsockname()[0:2]
- LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s") %
+ LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"),
{'name': self.name, '_host': self._host, '_port': self._port})
def start(self):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to start %(name)s on %(_host)s:"
"%(_port)s with SSL "
- "support.") % self.__dict__)
+ "support."), self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
- except LookupError as err:
- LOG.error(err)
+ except LookupError:
+ LOG.exception(_LE("Error loading app %s"), name)
raise exception.PasteAppNotFound(name=name, path=self.config_path)