]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Logging not using oslo.i18n guidelines
authorSean McGinnis <sean_mcginnis@dell.com>
Thu, 19 Mar 2015 13:37:49 +0000 (08:37 -0500)
committerSean McGinnis <sean_mcginnis@dell.com>
Thu, 19 Mar 2015 17:28:12 +0000 (12:28 -0500)
Part of multi-patch set for easier chunks.

There have been quite a few instances found where the
i18n guidelines are not being followed. I believe this
has helped lead to some of the confusion around how to
correctly do this. Other developers see this code and
assume it is an example of the correct usage.

This patch attempts to clean up most of those violations
in the existing codebase to hopefully help avoid some of
that confusion in reviews.

Some issues address:
* Correct log translation markers for different log levels
* Passing format values as arguments to call, not preformatting
* Not forcing translation via six.text_type and others

Guidelines can be found here:
http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Hacking checks will not be able to identify all violations of
the guidelines, but it could be useful for catching obvious ones
such as LOG.info("No markers!").

Change-Id: I38f52c6408b47ccb59ec2064b360f7d4427d6830
Partial-bug: 1433216

30 files changed:
HACKING.rst
cinder/api/__init__.py
cinder/api/contrib/admin_actions.py
cinder/api/contrib/hosts.py
cinder/api/contrib/qos_specs_manage.py
cinder/api/contrib/snapshot_actions.py
cinder/api/contrib/volume_image_metadata.py
cinder/api/contrib/volume_replication.py
cinder/api/extensions.py
cinder/api/middleware/fault.py
cinder/api/openstack/wsgi.py
cinder/api/v1/snapshots.py
cinder/api/v2/snapshots.py
cinder/cmd/volume_usage_audit.py
cinder/compute/nova.py
cinder/consistencygroup/api.py
cinder/exception.py
cinder/hacking/checks.py
cinder/image/glance.py
cinder/image/image_utils.py
cinder/keymgr/barbican.py
cinder/keymgr/conf_key_mgr.py
cinder/quota.py
cinder/quota_utils.py
cinder/service.py
cinder/ssh_utils.py
cinder/tests/test_hacking.py
cinder/transfer/api.py
cinder/utils.py
cinder/wsgi.py

index 75f0111e53324cc101a4b44749f45be1a678a35f..ef9bd58890430553d89271aecd6b27b400bf817b 100644 (file)
@@ -13,6 +13,9 @@ Cinder Specific Commandments
 - [N323] Add check for explicit import of _() to ensure proper translation.
 - [N324] Enforce no use of LOG.audit messages.  LOG.info should be used instead.
 - [N327] assert_called_once is not a valid Mock method.
+- [N328] LOG.info messages require translations `_LI()`.
+- [N329] LOG.exception and LOG.error messages require translations `_LE()`.
+- [N330] LOG.warning messages require translations `_LW()`.
 - [N333] Ensure that oslo namespaces are used for namespaced libraries.
 - [N339] Prevent use of deprecated contextlib.nested.
 - [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now().
index 8a178ef9f513d18f0c273b7f01f5f911ae09dbf8..53712d1fe41da527f2cd6ce1b7ae79b089881c77 100644 (file)
@@ -28,9 +28,9 @@ LOG = logging.getLogger(__name__)
 
 def root_app_factory(loader, global_conf, **local_conf):
     if CONF.enable_v1_api:
-        LOG.warn(_LW('The v1 api is deprecated and will be removed in the '
-                     'Liberty release. You should set enable_v1_api=false and '
-                     'enable_v2_api=true in your cinder.conf file.'))
+        LOG.warning(_LW('The v1 api is deprecated and will be removed in the '
+                        'Liberty release. You should set enable_v1_api=false '
+                        'and enable_v2_api=true in your cinder.conf file.'))
     else:
         del local_conf['/v1']
     if not CONF.enable_v2_api:
index aee4ce109ebe4fad4423f284fb9629a4f2f912a1..cc5852474b7774c7821b6eb3188e68b69da96265 100644 (file)
@@ -81,7 +81,7 @@ class AdminController(wsgi.Controller):
         context = req.environ['cinder.context']
         self.authorize(context, 'reset_status')
         update = self.validate_update(body['os-reset_status'])
-        msg = _("Updating %(resource)s '%(id)s' with '%(update)r'")
+        msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
         LOG.debug(msg, {'resource': self.resource_name, 'id': id,
                         'update': update})
 
index ac0df3225e4c3e06d657453d2720a8335c11b609..382e95729f850c57e69ddd075de22aa6768fcb52 100644 (file)
@@ -112,7 +112,7 @@ def _list_hosts(req, service=None):
         active = 'enabled'
         if host['disabled']:
             active = 'disabled'
-        LOG.debug('status, active and update: %s, %s, %s' %
+        LOG.debug('status, active and update: %s, %s, %s',
                   (status, active, host['updated_at']))
         hosts.append({'host_name': host['host'],
                       'service': host['topic'],
index bab8e1930fc237a3315bfca273e5081dfab1872e..cd136f5e754bd6110bf35d57b88aeea4812a88ff 100644 (file)
@@ -219,7 +219,7 @@ class QoSSpecsController(wsgi.Controller):
 
         # Convert string to bool type in strict manner
         force = strutils.bool_from_string(force)
-        LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" %
+        LOG.debug("Delete qos_spec: %(id)s, force: %(force)s",
                   {'id': id, 'force': force})
 
         try:
@@ -258,7 +258,7 @@ class QoSSpecsController(wsgi.Controller):
             raise webob.exc.HTTPBadRequest()
 
         keys = body['keys']
-        LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s" %
+        LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s",
                   {'id': id, 'keys': keys})
 
         try:
@@ -287,7 +287,7 @@ class QoSSpecsController(wsgi.Controller):
         context = req.environ['cinder.context']
         authorize(context)
 
-        LOG.debug("Get associations for qos_spec id: %s" % id)
+        LOG.debug("Get associations for qos_spec id: %s", id)
 
         try:
             associates = qos_specs.get_associations(context, id)
@@ -325,7 +325,7 @@ class QoSSpecsController(wsgi.Controller):
                                          'qos_specs.delete',
                                          notifier_err)
             raise webob.exc.HTTPBadRequest(explanation=msg)
-        LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s" %
+        LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s",
                   {'id': id, 'type_id': type_id})
 
         try:
@@ -379,7 +379,7 @@ class QoSSpecsController(wsgi.Controller):
                                          'qos_specs.delete',
                                          notifier_err)
             raise webob.exc.HTTPBadRequest(explanation=msg)
-        LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s" %
+        LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s",
                   {'id': id, 'type_id': type_id})
 
         try:
@@ -415,7 +415,7 @@ class QoSSpecsController(wsgi.Controller):
         context = req.environ['cinder.context']
         authorize(context)
 
-        LOG.debug("Disassociate qos_spec: %s from all." % id)
+        LOG.debug("Disassociate qos_spec: %s from all.", id)
 
         try:
             qos_specs.disassociate_all(context, id)
index 74bc03d21bf6f4d1ff2878f264ba0647d12dd1ed..4ce889f3f29af582cc40b798521c6bc4b5133210 100644 (file)
@@ -18,7 +18,7 @@ import webob
 from cinder.api import extensions
 from cinder.api.openstack import wsgi
 from cinder import db
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 
 LOG = logging.getLogger(__name__)
 
@@ -45,7 +45,7 @@ class SnapshotActionsController(wsgi.Controller):
         context = req.environ['cinder.context']
         authorize(context, 'update_snapshot_status')
 
-        LOG.debug("body: %s" % body)
+        LOG.debug("body: %s", body)
         try:
             status = body['os-update_snapshot_status']['status']
         except KeyError:
@@ -87,7 +87,7 @@ class SnapshotActionsController(wsgi.Controller):
 
             update_dict.update({'progress': progress})
 
-        LOG.info("Updating snapshot %(id)s with info %(dict)s" %
+        LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
                  {'id': id, 'dict': update_dict})
 
         db.snapshot_update(context, id, update_dict)
index c2f2e53cbc26d950cb2f3980504987ecae604f37..454bc5092d9c3269d4d49c3b5b63db51daa43df7 100644 (file)
@@ -16,6 +16,8 @@
 
 import logging
 
+import six
+
 from cinder.api import extensions
 from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
@@ -39,7 +41,7 @@ class VolumeImageMetadataController(wsgi.Controller):
             all_metadata = self.volume_api.get_volumes_image_metadata(context)
         except Exception as e:
             LOG.debug('Problem retrieving volume image metadata. '
-                      'It will be skipped. Error: %s', e)
+                      'It will be skipped. Error: %s', six.text_type(e))
             all_metadata = {}
         return all_metadata
 
index 086efa1d39f515a8cf350cb1defacd8cfc2819ef..338d46f4b2995aadfc91f5c9f8012b5b687c2c7e 100644 (file)
@@ -69,7 +69,7 @@ class VolumeReplicationController(wsgi.Controller):
             vol = self.volume_api.get(context, id)
             LOG.info(_LI('Attempting to promote secondary replica to primary'
                          ' for volume %s.'),
-                     str(id),
+                     id,
                      context=context)
             self.replication_api.promote(context, vol)
         except exception.NotFound:
@@ -87,7 +87,7 @@ class VolumeReplicationController(wsgi.Controller):
             vol = self.volume_api.get(context, id)
             LOG.info(_LI('Attempting to sync secondary replica with primary'
                          ' for volume %s.'),
-                     str(id),
+                     id,
                      context=context)
             self.replication_api.reenable(context, vol)
         except exception.NotFound:
index 9b75f03d1fd4ca3e57fbe8b08b50722e2ed5a0b2..b0a5f5870677a9afd46155d3521a45e2deb9b6e1 100644 (file)
@@ -26,7 +26,7 @@ import cinder.api.openstack
 from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
 from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
+from cinder.i18n import _LE, _LI, _LW
 import cinder.policy
 
 
@@ -240,8 +240,8 @@ class ExtensionManager(object):
                       ' '.join(extension.__doc__.strip().split()))
             LOG.debug('Ext namespace: %s', extension.namespace)
             LOG.debug('Ext updated: %s', extension.updated)
-        except AttributeError as ex:
-            LOG.exception(_LE("Exception loading extension: %s"), unicode(ex))
+        except AttributeError:
+            LOG.exception(_LE("Exception loading extension."))
             return False
 
         return True
@@ -273,9 +273,9 @@ class ExtensionManager(object):
             try:
                 self.load_extension(ext_factory)
             except Exception as exc:
-                LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
-                             '%(exc)s'),
-                         {'ext_factory': ext_factory, 'exc': exc})
+                LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
+                                '%(exc)s'),
+                            {'ext_factory': ext_factory, 'exc': exc})
 
 
 class ControllerExtension(object):
@@ -342,9 +342,9 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
             try:
                 ext_mgr.load_extension(classpath)
             except Exception as exc:
-                logger.warn(_('Failed to load extension %(classpath)s: '
-                              '%(exc)s'),
-                            {'classpath': classpath, 'exc': exc})
+                logger.warning(_LW('Failed to load extension %(classpath)s: '
+                                   '%(exc)s'),
+                               {'classpath': classpath, 'exc': exc})
 
         # Now, let's consider any subdirectories we may have...
         subdirs = []
@@ -367,9 +367,9 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
                 try:
                     ext(ext_mgr)
                 except Exception as exc:
-                    logger.warn(_('Failed to load extension %(ext_name)s: '
-                                  '%(exc)s'),
-                                {'ext_name': ext_name, 'exc': exc})
+                    logger.warning(_LW('Failed to load extension '
+                                       '%(ext_name)s: %(exc)s'),
+                                   {'ext_name': ext_name, 'exc': exc})
 
         # Update the list of directories we'll explore...
         dirnames[:] = subdirs
index cb13f75b78e1bbc6bfd4faf84a514903d799d72d..480f8a89f63c3d5fa949e93b0b57eded2f38d365 100644 (file)
@@ -43,7 +43,7 @@ class FaultWrapper(base_wsgi.Middleware):
 
     def _error(self, inner, req):
         if not isinstance(inner, exception.QuotaError):
-            LOG.exception(_LE("Caught error: %s"), unicode(inner))
+            LOG.error(_LE("Caught error: %s"), inner)
         safe = getattr(inner, 'safe', False)
         headers = getattr(inner, 'headers', None)
         status = getattr(inner, 'code', 500)
@@ -51,7 +51,7 @@ class FaultWrapper(base_wsgi.Middleware):
             status = 500
 
         msg_dict = dict(url=req.url, status=status)
-        LOG.info(_LI("%(url)s returned with HTTP %(status)d") % msg_dict)
+        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
         outer = self.status_to_type(status)
         if headers:
             outer.headers = headers
index 45979c6129bdf5266a0d22ad9faa25ba81033239..266931e3d334bbc6629f7ac4ea28b07ac9964292 100644 (file)
@@ -752,14 +752,14 @@ class ResourceExceptionHandler(object):
         elif isinstance(ex_value, TypeError):
             exc_info = (ex_type, ex_value, ex_traceback)
             LOG.error(_LE(
-                'Exception handling resource: %s') %
+                'Exception handling resource: %s'),
                 ex_value, exc_info=exc_info)
             raise Fault(webob.exc.HTTPBadRequest())
         elif isinstance(ex_value, Fault):
-            LOG.info(_LI("Fault thrown: %s"), unicode(ex_value))
+            LOG.info(_LI("Fault thrown: %s"), ex_value)
             raise ex_value
         elif isinstance(ex_value, webob.exc.HTTPException):
-            LOG.info(_LI("HTTP exception thrown: %s"), unicode(ex_value))
+            LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
             raise Fault(ex_value)
 
         # We didn't handle the exception
@@ -959,8 +959,9 @@ class Resource(wsgi.Application):
     def __call__(self, request):
         """WSGI method that controls (de)serialization and method dispatch."""
 
-        LOG.info("%(method)s %(url)s" % {"method": request.method,
-                                         "url": request.url})
+        LOG.info(_LI("%(method)s %(url)s"),
+                 {"method": request.method,
+                  "url": request.url})
 
         # Identify the action, its arguments, and the requested
         # content type
@@ -1058,12 +1059,12 @@ class Resource(wsgi.Application):
 
         try:
             msg_dict = dict(url=request.url, status=response.status_int)
-            msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
+            msg = _LI("%(url)s returned with HTTP %(status)d")
         except AttributeError as e:
             msg_dict = dict(url=request.url, e=e)
-            msg = _("%(url)s returned a fault: %(e)s") % msg_dict
+            msg = _LI("%(url)s returned a fault: %(e)s")
 
-        LOG.info(msg)
+        LOG.info(msg, msg_dict)
 
         return response
 
@@ -1082,7 +1083,7 @@ class Resource(wsgi.Application):
                                                             'create',
                                                             'delete',
                                                             'update']):
-                    LOG.exception(six.text_type(e))
+                    LOG.exception(_LE('Get method error.'))
                 else:
                     ctxt.reraise = False
         else:
@@ -1092,7 +1093,7 @@ class Resource(wsgi.Application):
             # OK, it's an action; figure out which action...
             mtype = _MEDIA_TYPE_MAP.get(content_type)
             action_name = self.action_peek[mtype](body)
-            LOG.debug("Action body: %s" % body)
+            LOG.debug("Action body: %s", body)
         else:
             action_name = action
 
index 95e448f37ba65adc13c5255740d82c84e9f76fe9..0998e0f0250648304ecbd4d08c7a4427b5289f65 100644 (file)
@@ -177,7 +177,7 @@ class SnapshotsController(wsgi.Controller):
             raise exc.HTTPNotFound()
 
         force = snapshot.get('force', False)
-        msg = _("Create snapshot from volume %s")
+        msg = _LI("Create snapshot from volume %s")
         LOG.info(msg, volume_id, context=context)
 
         if not utils.is_valid_boolstr(force):
index 4cb663fe4528b3948b515926045a7176451c448a..159a05c1960d10d07a2aeff1050cb1f9e6e6519b 100644 (file)
@@ -188,7 +188,7 @@ class SnapshotsController(wsgi.Controller):
             msg = _("Volume could not be found")
             raise exc.HTTPNotFound(explanation=msg)
         force = snapshot.get('force', False)
-        msg = _("Create snapshot from volume %s")
+        msg = _LI("Create snapshot from volume %s")
         LOG.info(msg, volume_id, context=context)
 
         # NOTE(thingee): v2 API allows name instead of display_name
index 6642d318e415bbdf4081c0631c1b052fa44aa46a..b375d56f71382ba63f288359db7f2d0cedc54f83 100644 (file)
@@ -113,7 +113,7 @@ def main():
         try:
             LOG.debug("Send exists notification for <volume_id: "
                       "%(volume_id)s> <project_id %(project_id)s> "
-                      "<%(extra_info)s>" %
+                      "<%(extra_info)s>",
                       {'volume_id': volume_ref.id,
                        'project_id': volume_ref.project_id,
                        'extra_info': extra_info})
@@ -123,7 +123,7 @@ def main():
                 'exists', extra_usage_info=extra_info)
         except Exception as e:
             LOG.error(_LE("Failed to send exists notification"
-                          " for volume %s.") %
+                          " for volume %s."),
                       volume_ref.id)
             print(traceback.format_exc(e))
 
@@ -137,7 +137,7 @@ def main():
                 }
                 LOG.debug("Send create notification for "
                           "<volume_id: %(volume_id)s> "
-                          "<project_id %(project_id)s> <%(extra_info)s>" %
+                          "<project_id %(project_id)s> <%(extra_info)s>",
                           {'volume_id': volume_ref.id,
                            'project_id': volume_ref.project_id,
                            'extra_info': local_extra_info})
@@ -151,7 +151,7 @@ def main():
                     'create.end', extra_usage_info=local_extra_info)
             except Exception as e:
                 LOG.error(_LE("Failed to send create notification for "
-                              "volume %s.") % volume_ref.id)
+                              "volume %s."), volume_ref.id)
                 print(traceback.format_exc(e))
 
         if (CONF.send_actions and volume_ref.deleted_at and
@@ -164,7 +164,7 @@ def main():
                 }
                 LOG.debug("Send delete notification for "
                           "<volume_id: %(volume_id)s> "
-                          "<project_id %(project_id)s> <%(extra_info)s>" %
+                          "<project_id %(project_id)s> <%(extra_info)s>",
                           {'volume_id': volume_ref.id,
                            'project_id': volume_ref.project_id,
                            'extra_info': local_extra_info})
@@ -178,7 +178,7 @@ def main():
                     'delete.end', extra_usage_info=local_extra_info)
             except Exception as e:
                 LOG.error(_LE("Failed to send delete notification for volume "
-                              "%s.") % volume_ref.id)
+                              "%s."), volume_ref.id)
                 print(traceback.format_exc(e))
 
     snapshots = db.snapshot_get_active_by_window(admin_context,
@@ -188,7 +188,7 @@ def main():
     for snapshot_ref in snapshots:
         try:
             LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
-                      "<project_id %(project_id)s> <%(extra_info)s>" %
+                      "<project_id %(project_id)s> <%(extra_info)s>",
                       {'snapshot_id': snapshot_ref.id,
                        'project_id': snapshot_ref.project_id,
                        'extra_info': extra_info})
@@ -198,8 +198,8 @@ def main():
                                                             extra_info)
         except Exception as e:
             LOG.error(_LE("Failed to send exists notification "
-                          "for snapshot %s.")
-                      snapshot_ref.id)
+                          "for snapshot %s."),
+                      snapshot_ref.id)
             print(traceback.format_exc(e))
 
         if (CONF.send_actions and
@@ -212,7 +212,7 @@ def main():
                 }
                 LOG.debug("Send create notification for "
                           "<snapshot_id: %(snapshot_id)s> "
-                          "<project_id %(project_id)s> <%(extra_info)s>" %
+                          "<project_id %(project_id)s> <%(extra_info)s>",
                           {'snapshot_id': snapshot_ref.id,
                            'project_id': snapshot_ref.project_id,
                            'extra_info': local_extra_info})
@@ -226,7 +226,7 @@ def main():
                     'create.end', extra_usage_info=local_extra_info)
             except Exception as e:
                 LOG.error(_LE("Failed to send create notification for snapshot"
-                              "%s.") % snapshot_ref.id)
+                              "%s."), snapshot_ref.id)
                 print(traceback.format_exc(e))
 
         if (CONF.send_actions and snapshot_ref.deleted_at and
@@ -239,7 +239,7 @@ def main():
                 }
                 LOG.debug("Send delete notification for "
                           "<snapshot_id: %(snapshot_id)s> "
-                          "<project_id %(project_id)s> <%(extra_info)s>" %
+                          "<project_id %(project_id)s> <%(extra_info)s>",
                           {'snapshot_id': snapshot_ref.id,
                            'project_id': snapshot_ref.project_id,
                            'extra_info': local_extra_info})
@@ -253,7 +253,7 @@ def main():
                     'delete.end', extra_usage_info=local_extra_info)
             except Exception as e:
                 LOG.error(_LE("Failed to send delete notification for snapshot"
-                              "%s.") % snapshot_ref.id)
+                              "%s."), snapshot_ref.id)
                 print(traceback.format_exc(e))
 
     print(_("Volume usage audit completed"))
index 65060bff484771e91e2c66fe099bdac772c23472..8f63dc87d5b554757f37efcc60c57e69d44a8185 100644 (file)
@@ -119,7 +119,7 @@ def novaclient(context, admin_endpoint=False, privileged_user=False,
                          endpoint_type=endpoint_type,
                          **region_filter)
 
-        LOG.debug('Creating a Nova client using "%s" user' %
+        LOG.debug('Creating a Nova client using "%s" user',
                   CONF.os_privileged_user_name)
     else:
         if nova_endpoint_template:
@@ -130,7 +130,7 @@ def novaclient(context, admin_endpoint=False, privileged_user=False,
                              endpoint_type=endpoint_type,
                              **region_filter)
 
-        LOG.debug('Nova client connection created using URL: %s' % url)
+        LOG.debug('Nova client connection created using URL: %s', url)
 
     c = nova_client.Client(context.user_id,
                            context.auth_token,
index 5e4b27320784d4d3f48f71e7c4eca7bac40e7ecc..c13868700802dc6884d27ae395cd38715f61c670 100644 (file)
@@ -27,7 +27,7 @@ from oslo_utils import timeutils
 
 from cinder.db import base
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 import cinder.policy
 from cinder import quota
 from cinder.scheduler import rpcapi as scheduler_rpcapi
@@ -98,8 +98,9 @@ class API(base.Base):
 
         valid = self._valid_availability_zone(availability_zone)
         if not valid:
-            msg = _("Availability zone '%s' is invalid") % (availability_zone)
-            LOG.warn(msg)
+            msg = _LW(
+                "Availability zone '%s' is invalid") % (availability_zone)
+            LOG.warning(msg)
             raise exception.InvalidInput(reason=msg)
 
         return availability_zone
@@ -343,9 +344,8 @@ class API(base.Base):
         if not group['host']:
             self.update_quota(context, group['id'], -1, group['project_id'])
 
-            msg = ("No host for consistency group %s. Deleting from "
-                   "the database.") % group['id']
-            LOG.debug(msg)
+            LOG.debug("No host for consistency group %s. Deleting from "
+                      "the database.", group['id'])
             self.db.consistencygroup_destroy(context.elevated(), group['id'])
 
             return
@@ -600,7 +600,7 @@ class API(base.Base):
             raise exception.InvalidInput(reason=msg)
 
         if filters:
-            LOG.debug("Searching by: %s" % str(filters))
+            LOG.debug("Searching by: %s", filters)
 
         if (context.is_admin and 'all_tenants' in filters):
             # Need to remove all_tenants to pass the filtering below.
@@ -691,7 +691,7 @@ class API(base.Base):
                 context.elevated(), context.project_id)
 
         if search_opts:
-            LOG.debug("Searching by: %s" % search_opts)
+            LOG.debug("Searching by: %s", search_opts)
 
             results = []
             not_found = object()
index 8ac992c547d6a48ffb7d1a8df0d15988f5ac2e9f..84cfd236b16399edb2bcb36192fb73dcc750573f 100644 (file)
@@ -93,7 +93,8 @@ class CinderException(Exception):
                 # log the issue and the kwargs
                 LOG.exception(_LE('Exception in string format operation'))
                 for name, value in kwargs.iteritems():
-                    LOG.error("%s: %s" % (name, value))
+                    LOG.error(_LE("%(name)s: %(value)s"),
+                              {'name': name, 'value': value})
                 if CONF.fatal_exception_format_errors:
                     raise exc_info[0], exc_info[1], exc_info[2]
                 # at least get the core message out if something happened
index 3f356cc0ac90ce01101d9d0a1f615752e3de9e48..05c585d7d6e3c47f8eac8f2d2bfbdd168d4d5e75 100644 (file)
@@ -47,6 +47,13 @@ no_audit_log = re.compile(r"(.)*LOG\.audit(.)*")
 oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db"
                                     "|config|utils|serialization|log)")
 
+log_translation_LI = re.compile(
+    r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
+log_translation_LE = re.compile(
+    r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")")
+log_translation_LW = re.compile(
+    r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
+
 
 def no_vi_headers(physical_line, line_number, lines):
     """Check for vi editor configuration in source files.
@@ -130,6 +137,40 @@ def check_assert_called_once(logical_line, filename):
             yield (pos, msg)
 
 
+def validate_log_translations(logical_line, filename):
+    # TODO(smcginnis): The following is temporary as a series
+    # of patches are done to address these issues. It should be
+    # removed completely when bug 1433216 is closed.
+    ignore_dirs = [
+        "cinder/backup",
+        "cinder/brick",
+        "cinder/common",
+        "cinder/db",
+        "cinder/openstack",
+        "cinder/scheduler",
+        "cinder/volume",
+        "cinder/zonemanager"]
+    for directory in ignore_dirs:
+        if directory in filename:
+            return
+
+    # Translations are not required in the test directory.
+    # This will not catch all instances of violations, just direct
+    # misuse of the form LOG.info('Message').
+    if "cinder/tests" in filename:
+        return
+    msg = "N328: LOG.info messages require translations `_LI()`!"
+    if log_translation_LI.match(logical_line):
+        yield (0, msg)
+    msg = ("N329: LOG.exception and LOG.error messages require "
+           "translations `_LE()`!")
+    if log_translation_LE.match(logical_line):
+        yield (0, msg)
+    msg = "N330: LOG.warning messages require translations `_LW()`!"
+    if log_translation_LW.match(logical_line):
+        yield (0, msg)
+
+
 def check_oslo_namespace_imports(logical_line):
     if re.match(oslo_namespace_imports, logical_line):
         msg = ("N333: '%s' must be used instead of '%s'.") % (
@@ -167,3 +208,4 @@ def factory(register):
     register(check_oslo_namespace_imports)
     register(check_no_contextlib_nested)
     register(check_datetime_now)
+    register(validate_log_translations)
index f94ff4c9da4ab87dca145a1360200c604a894fba..5b9c85ed18b898c30abe47df56fb8d14ce661b00 100644 (file)
@@ -34,7 +34,7 @@ from oslo_utils import timeutils
 import six.moves.urllib.parse as urlparse
 
 from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _LE, _LW
 
 
 glance_opts = [
@@ -177,23 +177,19 @@ class GlanceClientWrapper(object):
             except retry_excs as e:
                 netloc = self.netloc
                 extra = "retrying"
-                error_msg = _("Error contacting glance server "
-                              "'%(netloc)s' for '%(method)s', "
-                              "%(extra)s.") % {'netloc': netloc,
-                                               'method': method,
-                                               'extra': extra,
-                                               }
+                error_msg = _LE("Error contacting glance server "
+                                "'%(netloc)s' for '%(method)s', "
+                                "%(extra)s.")
                 if attempt == num_attempts:
                     extra = 'done trying'
-                    error_msg = _("Error contacting glance server "
-                                  "'%(netloc)s' for '%(method)s', "
-                                  "%(extra)s.") % {'netloc': netloc,
-                                                   'method': method,
-                                                   'extra': extra,
-                                                   }
-                    LOG.exception(error_msg)
+                    LOG.exception(error_msg, {'netloc': netloc,
+                                              'method': method,
+                                              'extra': extra})
                     raise exception.GlanceConnectionFailed(reason=e)
-                LOG.exception(error_msg)
+
+                LOG.exception(error_msg, {'netloc': netloc,
+                                          'method': method,
+                                          'extra': extra})
                 time.sleep(1)
 
 
index 601d66f53473ca23b1d1dbb18fb2590eb440fd40..6224da6c89b355bfcefc1483fdba9c291e78cae6 100644 (file)
@@ -37,7 +37,7 @@ from oslo_utils import timeutils
 from oslo_utils import units
 
 from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _, _LI, _LW
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import imageutils
 from cinder import utils
@@ -129,13 +129,13 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True):
     mbps = (fsz_mb / duration)
     msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
            "duration %(duration).2f sec, destination %(dest)s")
-    LOG.debug(msg % {"src": source,
-                     "sz": fsz_mb,
-                     "duration": duration,
-                     "dest": dest})
+    LOG.debug(msg, {"src": source,
+                    "sz": fsz_mb,
+                    "duration": duration,
+                    "dest": dest})
 
-    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
-    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
+    msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s")
+    LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
 
 
 def convert_image(source, dest, out_format, run_as_root=True, throttle=None):
@@ -172,11 +172,11 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
     mbps = (fsz_mb / duration)
     msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
            "duration %(duration).2f sec")
-    LOG.debug(msg % {"dest": image_file.name,
-                     "sz": fsz_mb,
-                     "duration": duration})
-    msg = _("Image download %(sz).2f MB at %(mbps).2f MB/s")
-    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
+    LOG.debug(msg, {"dest": image_file.name,
+                    "sz": fsz_mb,
+                    "duration": duration})
+    msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s")
+    LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
 
 
 def fetch_verify_image(context, image_service, image_id, dest,
@@ -274,8 +274,8 @@ def fetch_to_volume_format(context, image_service,
             # result we only need to copy the image to the destination and then
             # return.
             LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
-                      'size: %(size)s' % {'tmp': tmp, 'dest': dest,
-                                          'size': image_meta['size']})
+                      'size: %(size)s', {'tmp': tmp, 'dest': dest,
+                                         'size': image_meta['size']})
             image_size_m = math.ceil(image_meta['size'] / units.Mi)
             volume_utils.copy_volume(tmp, dest, image_size_m, blocksize)
             return
@@ -312,8 +312,8 @@ def fetch_to_volume_format(context, image_service,
         # check via 'qemu-img info' that what we copied was in fact a raw
         # image and not a different format with a backing file, which may be
         # malicious.
-        LOG.debug("%s was %s, converting to %s " % (image_id, fmt,
-                                                    volume_format))
+        LOG.debug("%s was %s, converting to %s ", (image_id, fmt,
+                                                   volume_format))
         convert_image(tmp, dest, volume_format,
                       run_as_root=run_as_root)
 
@@ -331,8 +331,8 @@ def upload_volume(context, image_service, image_meta, volume_path,
                   volume_format='raw', run_as_root=True):
     image_id = image_meta['id']
     if (image_meta['disk_format'] == volume_format):
-        LOG.debug("%s was %s, no need to convert to %s" %
-                  (image_id, volume_format, image_meta['disk_format']))
+        LOG.debug("%s was %s, no need to convert to %s",
+                  image_id, volume_format, image_meta['disk_format'])
         if os.name == 'nt' or os.access(volume_path, os.R_OK):
             with fileutils.file_open(volume_path, 'rb') as image_file:
                 image_service.update(context, image_id, {}, image_file)
@@ -343,8 +343,8 @@ def upload_volume(context, image_service, image_meta, volume_path,
         return
 
     with temporary_file() as tmp:
-        LOG.debug("%s was %s, converting to %s" %
-                  (image_id, volume_format, image_meta['disk_format']))
+        LOG.debug("%s was %s, converting to %s",
+                  image_id, volume_format, image_meta['disk_format'])
         convert_image(volume_path, tmp, image_meta['disk_format'],
                       run_as_root=run_as_root)
 
index b984fb90e24af2c1ee619b1d48d24a7700efc59b..53e1ab2306124982dad0eb0791bfaed470a863fa 100644 (file)
@@ -71,9 +71,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
                 self._barbican_client = barbican_client.Client(
                     session=sess,
                     endpoint=self._barbican_endpoint)
-            except Exception as e:
+            except Exception:
                 with excutils.save_and_reraise_exception():
-                    LOG.error(_LE("Error creating Barbican client: %s"), (e))
+                    LOG.exception(_LE("Error creating Barbican client."))
 
         return self._barbican_client
 
@@ -108,9 +108,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             order = barbican_client.orders.get(order_ref)
             secret_uuid = order.secret_ref.rpartition('/')[2]
             return secret_uuid
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error creating key: %s"), (e))
+                LOG.exception(_LE("Error creating key."))
 
     def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
                   payload_content_type='application/octet-stream',
@@ -163,9 +163,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             secret_ref = secret.store()
             secret_uuid = secret_ref.rpartition('/')[2]
             return secret_uuid
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error storing key: %s"), (e))
+                LOG.exception(_LE("Error storing key."))
 
     def copy_key(self, ctxt, key_id):
         """Copies (i.e., clones) a key stored by barbican.
@@ -191,9 +191,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
                                        secret.algorithm, secret.bit_length,
                                        secret.mode, True)
             return copy_uuid
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error copying key: %s"), (e))
+                LOG.exception(_LE("Error copying key."))
 
     def _create_secret_ref(self, key_id, barbican_client):
         """Creates the URL required for accessing a secret.
@@ -228,9 +228,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             else:
                 secret_data = generated_data
             return secret_data
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error getting secret data: %s"), (e))
+                LOG.exception(_LE("Error getting secret data."))
 
     def _get_secret(self, ctxt, secret_ref):
         """Creates the URL required for accessing a secret's metadata.
@@ -247,9 +247,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
 
         try:
             return barbican_client.secrets.get(secret_ref)
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error getting secret metadata: %s"), (e))
+                LOG.exception(_LE("Error getting secret metadata."))
 
     def get_key(self, ctxt, key_id,
                 payload_content_type='application/octet-stream'):
@@ -276,9 +276,9 @@ class BarbicanKeyManager(key_mgr.KeyManager):
                 key_data = secret_data
             key = keymgr_key.SymmetricKey(secret.algorithm, key_data)
             return key
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error getting key: %s"), (e))
+                LOG.exception(_LE("Error getting key."))
 
     def delete_key(self, ctxt, key_id):
         """Deletes the specified key.
@@ -293,6 +293,6 @@ class BarbicanKeyManager(key_mgr.KeyManager):
         try:
             secret_ref = self._create_secret_ref(key_id, barbican_client)
             barbican_client.secrets.delete(secret_ref)
-        except Exception as e:
+        except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Error deleting key: %s"), (e))
+                LOG.exception(_LE("Error deleting key."))
index 701581483c28d8707e6ca9b4b77f27106fb6ea61..6ac307d4b2f50d7738a93088453bf85d85bf0a3b 100644 (file)
@@ -75,8 +75,9 @@ class ConfKeyManager(key_mgr.KeyManager):
 
     def _generate_hex_key(self, **kwargs):
         if CONF.keymgr.fixed_key is None:
-            LOG.warn(_LW('config option keymgr.fixed_key has not been defined:'
-                         ' some operations may fail unexpectedly'))
+            LOG.warning(
+                _LW('config option keymgr.fixed_key has not been defined:'
+                    ' some operations may fail unexpectedly'))
             raise ValueError(_('keymgr.fixed_key not defined'))
         return CONF.keymgr.fixed_key
 
@@ -131,4 +132,4 @@ class ConfKeyManager(key_mgr.KeyManager):
             raise exception.KeyManagerError(
                 reason="cannot delete non-existent key")
 
-        LOG.warn(_LW("Not deleting key %s"), key_id)
+        LOG.warning(_LW("Not deleting key %s"), key_id)
index 7b65a0e748da443c207b21cfa1d843ac9a3c916d..eefedbed4b60ff25be2444bb49b1a83ee21d12af 100644 (file)
@@ -760,7 +760,7 @@ class QuotaEngine(object):
                                             expire=expire,
                                             project_id=project_id)
 
-        LOG.debug("Created reservations %s" % reservations)
+        LOG.debug("Created reservations %s", reservations)
 
         return reservations
 
@@ -783,7 +783,7 @@ class QuotaEngine(object):
             # mechanisms will resolve the issue.  The exception is
             # logged, however, because this is less than optimal.
             LOG.exception(_LE("Failed to commit "
-                              "reservations %s") % reservations)
+                              "reservations %s"), reservations)
 
     def rollback(self, context, reservations, project_id=None):
         """Roll back reservations.
@@ -804,7 +804,7 @@ class QuotaEngine(object):
             # mechanisms will resolve the issue.  The exception is
             # logged, however, because this is less than optimal.
             LOG.exception(_LE("Failed to roll back reservations "
-                              "%s") % reservations)
+                              "%s"), reservations)
 
     def destroy_all_by_project(self, context, project_id):
         """Destroy all quotas, usages, and reservations associated with a
index 32c06c9852a18d20c938ea266300ec3054c17829..dd7fe0c722f1b0d55b683fa70860af5ef29f2154 100644 (file)
@@ -16,7 +16,7 @@
 from oslo_log import log as logging
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
 from cinder import quota
 
 
@@ -45,22 +45,23 @@ def get_volume_type_reservation(ctxt, volume, type_id):
                 s_size = volume['size']
                 d_quota = quotas[over]
                 d_consumed = _consumed(over)
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                LOG.warning(
+                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                         "%(s_size)sG volume - (%(d_consumed)dG of "
-                        "%(d_quota)dG already consumed)")
-                LOG.warn(msg % {'s_pid': ctxt.project_id,
-                                's_size': s_size,
-                                'd_consumed': d_consumed,
-                                'd_quota': d_quota})
+                        "%(d_quota)dG already consumed)"),
+                    {'s_pid': ctxt.project_id,
+                     's_size': s_size,
+                     'd_consumed': d_consumed,
+                     'd_quota': d_quota})
                 raise exception.VolumeSizeExceedsAvailableQuota(
                     requested=s_size, quota=d_quota, consumed=d_consumed)
             elif 'volumes' in over:
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                LOG.warning(
+                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                         "volume (%(d_consumed)d volumes "
-                        "already consumed)")
-
-                LOG.warn(msg % {'s_pid': ctxt.project_id,
-                                'd_consumed': _consumed(over)})
+                        "already consumed)"),
+                    {'s_pid': ctxt.project_id,
+                     'd_consumed': _consumed(over)})
                 raise exception.VolumeLimitExceeded(
                     allowed=quotas[over])
     return reservations
index e4630b4d0fdd5930114f9318c92d24117a66f022..4312702b9f55f1de3c38896a865244d1aab367a5 100644 (file)
@@ -35,7 +35,7 @@ import osprofiler.web
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.objects import base as objects_base
 from cinder.openstack.common import loopingcall
 from cinder.openstack.common import service
@@ -87,15 +87,16 @@ def setup_profiler(binary, host):
             "Messaging", messaging, context.get_admin_context().to_dict(),
             rpc.TRANSPORT, "cinder", binary, host)
         osprofiler.notifier.set(_notifier)
-        LOG.warning("OSProfiler is enabled.\nIt means that person who knows "
-                    "any of hmac_keys that are specified in "
-                    "/etc/cinder/api-paste.ini can trace his requests. \n"
-                    "In real life only operator can read this file so there "
-                    "is no security issue. Note that even if person can "
-                    "trigger profiler, only admin user can retrieve trace "
-                    "information.\n"
-                    "To disable OSprofiler set in cinder.conf:\n"
-                    "[profiler]\nenabled=false")
+        LOG.warning(
+            _LW("OSProfiler is enabled.\nIt means that person who knows "
+                "any of hmac_keys that are specified in "
+                "/etc/cinder/api-paste.ini can trace his requests. \n"
+                "In real life only operator can read this file so there "
+                "is no security issue. Note that even if person can "
+                "trigger profiler, only admin user can retrieve trace "
+                "information.\n"
+                "To disable OSprofiler set in cinder.conf:\n"
+                "[profiler]\nenabled=false"))
     else:
         osprofiler.web.disable()
 
@@ -137,7 +138,7 @@ class Service(service.Service):
 
     def start(self):
         version_string = version.version_string()
-        LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
+        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
                  {'topic': self.topic, 'version_string': version_string})
         self.model_disconnected = False
         self.manager.init_host()
@@ -150,7 +151,7 @@ class Service(service.Service):
         except exception.NotFound:
             self._create_service_ref(ctxt)
 
-        LOG.debug("Creating RPC server for service %s" % self.topic)
+        LOG.debug("Creating RPC server for service %s", self.topic)
 
         target = messaging.Target(topic=self.topic, server=self.host)
         endpoints = [self.manager]
@@ -186,14 +187,15 @@ class Service(service.Service):
         if self.report_interval:
             if CONF.service_down_time <= self.report_interval:
                 new_down_time = int(self.report_interval * 2.5)
-                LOG.warn(_("Report interval must be less than service down "
-                           "time. Current config service_down_time: "
-                           "%(service_down_time)s, report_interval for this: "
-                           "service is: %(report_interval)s. Setting global "
-                           "service_down_time to: %(new_down_time)s") %
-                         {'service_down_time': CONF.service_down_time,
-                          'report_interval': self.report_interval,
-                          'new_down_time': new_down_time})
+                LOG.warning(
+                    _LW("Report interval must be less than service down "
+                        "time. Current config service_down_time: "
+                        "%(service_down_time)s, report_interval for this: "
+                        "service is: %(report_interval)s. Setting global "
+                        "service_down_time to: %(new_down_time)s"),
+                    {'service_down_time': CONF.service_down_time,
+                     'report_interval': self.report_interval,
+                     'new_down_time': new_down_time})
                 CONF.set_override('service_down_time', new_down_time)
 
     def _create_service_ref(self, context):
@@ -254,7 +256,7 @@ class Service(service.Service):
         try:
             db.service_destroy(context.get_admin_context(), self.service_id)
         except exception.NotFound:
-            LOG.warn(_('Service killed that has no database entry'))
+            LOG.warning(_LW('Service killed that has no database entry'))
 
     def stop(self):
         # Try to shut the connection down, but if we get any sort of
@@ -293,7 +295,7 @@ class Service(service.Service):
                 service_ref = db.service_get(ctxt, self.service_id)
             except exception.NotFound:
                 LOG.debug('The service database object disappeared, '
-                          'Recreating it.')
+                          'recreating it.')
                 self._create_service_ref(ctxt)
                 service_ref = db.service_get(ctxt, self.service_id)
 
@@ -307,12 +309,12 @@ class Service(service.Service):
             # TODO(termie): make this pattern be more elegant.
             if getattr(self, 'model_disconnected', False):
                 self.model_disconnected = False
-                LOG.error(_('Recovered model server connection!'))
+                LOG.error(_LE('Recovered model server connection!'))
 
         except db_exc.DBConnectionError:
             if not getattr(self, 'model_disconnected', False):
                 self.model_disconnected = True
-                LOG.exception(_('model server went away'))
+                LOG.exception(_LE('model server went away'))
 
 
 class WSGIService(object):
@@ -435,9 +437,9 @@ def wait():
         if ("_password" in flag or "_key" in flag or
                 (flag == "sql_connection" and
                     ("mysql:" in flag_get or "postgresql:" in flag_get))):
-            LOG.debug('%s : FLAG SET ' % flag)
+            LOG.debug('%s : FLAG SET ', flag)
         else:
-            LOG.debug('%(flag)s : %(flag_get)s' %
+            LOG.debug('%(flag)s : %(flag_get)s',
                       {'flag': flag, 'flag_get': flag_get})
     try:
         _launcher.wait()
index f53ebabd61e5c7fd81b07c06ced4dcb19bb2e040..e6b73637418b94b5e5e55f5c3f3f08106f916c1f 100644 (file)
@@ -25,6 +25,7 @@ from eventlet import pools
 from oslo_config import cfg
 from oslo_log import log as logging
 import paramiko
+import six
 
 from cinder import exception
 from cinder.i18n import _, _LI
@@ -80,12 +81,12 @@ class SSHPool(pools.Pool):
         if 'hosts_key_file' in kwargs.keys():
             self.hosts_key_file = kwargs.pop('hosts_key_file')
             LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
-                         "loaded along with %(conf)s from /etc/cinder.conf.") %
+                         "loaded along with %(conf)s from /etc/cinder.conf."),
                      {'kwargs': self.hosts_key_file,
                       'conf': CONF.ssh_hosts_key_file})
 
         LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
-                  "using ssh_hosts_key_file '%(key_file)s'." %
+                  "using ssh_hosts_key_file '%(key_file)s'.",
                   {'policy': CONF.strict_ssh_host_key_policy,
                    'key_file': CONF.ssh_hosts_key_file})
 
@@ -148,7 +149,7 @@ class SSHPool(pools.Pool):
                 transport.set_keepalive(self.conn_timeout)
             return ssh
         except Exception as e:
-            msg = _("Error connecting via ssh: %s") % e
+            msg = _("Error connecting via ssh: %s") % six.text_type(e)
             LOG.error(msg)
             raise paramiko.SSHException(msg)
 
index 91eb70e02aa8c8142390835c7555e3b1c37c61be..c159466cf2ec3cd75c9c8af36f818338d19b8e08 100644 (file)
@@ -178,4 +178,24 @@ class HackingTestCase(test.TestCase):
 
     def test_check_datetime_now_noqa(self):
         self.assertEqual(0, len(list(checks.check_datetime_now(
-                                     "datetime.now()  # noqa", True))))
\ No newline at end of file
+                                     "datetime.now()  # noqa", True))))
+
+    def test_validate_log_translations(self):
+        self.assertEqual(1, len(list(checks.validate_log_translations(
+            "LOG.info('foo')", "foo.py"))))
+        self.assertEqual(1, len(list(checks.validate_log_translations(
+            "LOG.warning('foo')", "foo.py"))))
+        self.assertEqual(1, len(list(checks.validate_log_translations(
+            "LOG.error('foo')", "foo.py"))))
+        self.assertEqual(1, len(list(checks.validate_log_translations(
+            "LOG.exception('foo')", "foo.py"))))
+        self.assertEqual(0, len(list(checks.validate_log_translations(
+            "LOG.info('foo')", "cinder/tests/foo.py"))))
+        self.assertEqual(0, len(list(checks.validate_log_translations(
+            "LOG.info(_LI('foo')", "foo.py"))))
+        self.assertEqual(0, len(list(checks.validate_log_translations(
+            "LOG.warning(_LW('foo')", "foo.py"))))
+        self.assertEqual(0, len(list(checks.validate_log_translations(
+            "LOG.error(_LE('foo')", "foo.py"))))
+        self.assertEqual(0, len(list(checks.validate_log_translations(
+            "LOG.exception(_LE('foo')", "foo.py"))))
\ No newline at end of file
index 08211757e70b60d1fc904afd9a8287d0542196aa..d173280130bb2f9c6a1c3df7008fe1daae170027 100644 (file)
@@ -28,7 +28,7 @@ from oslo_utils import excutils
 
 from cinder.db import base
 from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder import quota
 from cinder.volume import api as volume_api
 
@@ -65,8 +65,7 @@ class API(base.Base):
 
         volume_ref = self.db.volume_get(context, transfer.volume_id)
         if volume_ref['status'] != 'awaiting-transfer':
-            msg = _("Volume in unexpected state")
-            LOG.error(msg)
+            LOG.error(_LE("Volume in unexpected state"))
         self.db.transfer_destroy(context, transfer_id)
 
     def get_all(self, context, filters=None):
@@ -101,7 +100,7 @@ class API(base.Base):
     def create(self, context, volume_id, display_name):
         """Creates an entry in the transfers table."""
         volume_api.check_policy(context, 'create_transfer')
-        LOG.info("Generating transfer record for volume %s" % volume_id)
+        LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
         volume_ref = self.db.volume_get(context, volume_id)
         if volume_ref['status'] != "available":
             raise exception.InvalidVolume(reason=_("status must be available"))
@@ -122,7 +121,7 @@ class API(base.Base):
             transfer = self.db.transfer_create(context, transfer_rec)
         except Exception:
             LOG.error(_LE("Failed to create transfer record "
-                          "for %s") % volume_id)
+                          "for %s"), volume_id)
             raise
         return {'id': transfer['id'],
                 'volume_id': transfer['volume_id'],
@@ -159,23 +158,23 @@ class API(base.Base):
                 return (usages[name]['reserved'] + usages[name]['in_use'])
 
             if 'gigabytes' in overs:
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
-                        "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
-                        "already consumed)")
-                LOG.warn(msg % {'s_pid': context.project_id,
-                                's_size': vol_ref['size'],
-                                'd_consumed': _consumed('gigabytes'),
-                                'd_quota': quotas['gigabytes']})
+                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+                          "%(s_size)sG volume (%(d_consumed)dG of "
+                          "%(d_quota)dG already consumed)")
+                LOG.warning(msg, {'s_pid': context.project_id,
+                                  's_size': vol_ref['size'],
+                                  'd_consumed': _consumed('gigabytes'),
+                                  'd_quota': quotas['gigabytes']})
                 raise exception.VolumeSizeExceedsAvailableQuota(
                     requested=vol_ref['size'],
                     consumed=_consumed('gigabytes'),
                     quota=quotas['gigabytes'])
             elif 'volumes' in overs:
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
-                        "volume (%(d_consumed)d volumes "
-                        "already consumed)")
-                LOG.warn(msg % {'s_pid': context.project_id,
-                                'd_consumed': _consumed('volumes')})
+                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+                          "volume (%(d_consumed)d volumes "
+                          "already consumed)")
+                LOG.warning(msg, {'s_pid': context.project_id,
+                                  'd_consumed': _consumed('volumes')})
                 raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
         try:
             donor_id = vol_ref['project_id']
@@ -186,7 +185,7 @@ class API(base.Base):
         except Exception:
             donor_reservations = None
             LOG.exception(_LE("Failed to update quota donating volume"
-                              " transfer id %s") % transfer_id)
+                              " transfer id %s"), transfer_id)
 
         try:
             # Transfer ownership of the volume now, must use an elevated
@@ -202,7 +201,7 @@ class API(base.Base):
             QUOTAS.commit(context, reservations)
             if donor_reservations:
                 QUOTAS.commit(context, donor_reservations, project_id=donor_id)
-            LOG.info(_LI("Volume %s has been transferred.") % volume_id)
+            LOG.info(_LI("Volume %s has been transferred."), volume_id)
         except Exception:
             with excutils.save_and_reraise_exception():
                 QUOTAS.rollback(context, reservations)
index d01ca8f78ff79097d4050c2c7063cfa42df82118..c84f10ae99cb89977e1e7e0f48c87ac3de05dc65 100644 (file)
@@ -523,7 +523,8 @@ def tempdir(**kwargs):
         try:
             shutil.rmtree(tmpdir)
         except OSError as e:
-            LOG.debug('Could not remove tmpdir: %s', e)
+            LOG.debug('Could not remove tmpdir: %s',
+                      six.text_type(e))
 
 
 def walk_class_hierarchy(clazz, encountered=None):
@@ -593,7 +594,7 @@ def require_driver_initialized(driver):
     # we can't do anything if the driver didn't init
     if not driver.initialized:
         driver_name = driver.__class__.__name__
-        LOG.error(_LE("Volume driver %s not initialized") % driver_name)
+        LOG.error(_LE("Volume driver %s not initialized"), driver_name)
         raise exception.DriverNotInitialized()
 
 
@@ -736,8 +737,7 @@ def remove_invalid_filter_options(context, filters,
     unknown_options = [opt for opt in filters
                        if opt not in allowed_search_options]
     bad_options = ", ".join(unknown_options)
-    log_msg = "Removing options '%s' from query." % bad_options
-    LOG.debug(log_msg)
+    LOG.debug("Removing options '%s' from query.", bad_options)
     for opt in unknown_options:
         del filters[opt]
 
@@ -748,7 +748,7 @@ def is_blk_device(dev):
             return True
         return False
     except Exception:
-        LOG.debug('Path %s not found in is_blk_device check' % dev)
+        LOG.debug('Path %s not found in is_blk_device check', dev)
         return False
 
 
index cf9287199587fb1a266fa234e23eb5c81d7f1138..d26bca53eb009873b89083c0880bbcb4fcd42ec0 100644 (file)
@@ -187,7 +187,7 @@ class Server(object):
                                {'host': host, 'port': port})
 
         (self._host, self._port) = self._socket.getsockname()[0:2]
-        LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s") %
+        LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"),
                  {'name': self.name, '_host': self._host, '_port': self._port})
 
     def start(self):
@@ -234,7 +234,7 @@ class Server(object):
                 with excutils.save_and_reraise_exception():
                     LOG.error(_LE("Failed to start %(name)s on %(_host)s:"
                                   "%(_port)s with SSL "
-                                  "support.") % self.__dict__)
+                                  "support."), self.__dict__)
 
         wsgi_kwargs = {
             'func': eventlet.wsgi.server,
@@ -544,6 +544,6 @@ class Loader(object):
         """
         try:
             return deploy.loadapp("config:%s" % self.config_path, name=name)
-        except LookupError as err:
-            LOG.error(err)
+        except LookupError:
+            LOG.exception(_LE("Error loading app %s"), name)
             raise exception.PasteAppNotFound(name=name, path=self.config_path)