]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Logging not using oslo.i18n guidelines
authorSean McGinnis <sean_mcginnis@dell.com>
Fri, 20 Mar 2015 23:42:50 +0000 (18:42 -0500)
committerSean McGinnis <sean_mcginnis@dell.com>
Tue, 31 Mar 2015 03:06:54 +0000 (22:06 -0500)
Multi-patch set for easier chunks. This one addresses
the backup and common cinder directories.

There have been quite a few instances found where the
i18n guidelines are not being followed. I believe this
has helped lead to some of the confusion around how to
correctly do this. Other developers see this code and
assume it is an example of the correct usage.

This patch attempts to clean up most of those violations
in the existing codebase to hopefully help avoid some of
that confusion in reviews.

Some issues address:
* Correct log translation markers for different log levels
* Passing format values as arguments to call, not preformatting
* Not forcing translation via six.text_type and others

Guidelines can be found here:
http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Hacking checks will not be able to identify all violations of
the guidelines, but it could be useful for catching obvious ones
such as LOG.info("No markers!").

Change-Id: I2560d9dee564f5a9b214fd63ccd2600d78a30464
Partial-bug: 1433216

cinder/backup/api.py
cinder/backup/chunkeddriver.py
cinder/backup/driver.py
cinder/backup/drivers/ceph.py
cinder/backup/drivers/tsm.py
cinder/backup/manager.py
cinder/backup/rpcapi.py
cinder/common/sqlalchemyutils.py
cinder/hacking/checks.py

index 6276353309def3199b5e2d03a2b2ff1da138cc13..fd41e97a02e1bdc419edd9e900da9c191b36387b 100644 (file)
@@ -153,10 +153,10 @@ class API(base.Base):
                     msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                               "%(s_size)sG backup (%(d_consumed)dG of "
                               "%(d_quota)dG already consumed)")
-                    LOG.warn(msg % {'s_pid': context.project_id,
-                                    's_size': volume['size'],
-                                    'd_consumed': _consumed(over),
-                                    'd_quota': quotas[over]})
+                    LOG.warning(msg, {'s_pid': context.project_id,
+                                      's_size': volume['size'],
+                                      'd_consumed': _consumed(over),
+                                      'd_quota': quotas[over]})
                     raise exception.VolumeBackupSizeExceedsAvailableQuota(
                         requested=volume['size'],
                         consumed=_consumed('backup_gigabytes'),
@@ -166,8 +166,8 @@ class API(base.Base):
                               "backups (%(d_consumed)d backups "
                               "already consumed)")
 
-                    LOG.warn(msg % {'s_pid': context.project_id,
-                                    'd_consumed': _consumed(over)})
+                    LOG.warning(msg, {'s_pid': context.project_id,
+                                      'd_consumed': _consumed(over)})
                     raise exception.BackupLimitExceeded(
                         allowed=quotas[over])
 
@@ -260,8 +260,8 @@ class API(base.Base):
             msg = _('Volume to be restored to must be available')
             raise exception.InvalidVolume(reason=msg)
 
-        LOG.debug('Checking backup size %s against volume size %s',
-                  size, volume['size'])
+        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s',
+                  {'bs': size, 'vs': volume['size']})
         if size > volume['size']:
             msg = (_('volume size %(volume_size)d is too small to restore '
                      'backup of size %(size)d.') %
index 1750fdd04744fd2f89f51ee655d522c9b1cf3fc3..db8113e0bdf50a59191a20e99667b47188c45c7f 100644 (file)
@@ -306,7 +306,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
             comp_size_bytes = len(data)
             LOG.debug('compressed %(data_size_bytes)d bytes of data '
                       'to %(comp_size_bytes)d bytes using '
-                      '%(algorithm)s' %
+                      '%(algorithm)s',
                       {
                           'data_size_bytes': data_size_bytes,
                           'comp_size_bytes': comp_size_bytes,
@@ -323,7 +323,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
             writer.write(data)
         md5 = hashlib.md5(data).hexdigest()
         obj[object_name]['md5'] = md5
-        LOG.debug('backup MD5 for %(object_name)s: %(md5)s' %
+        LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
                   {'object_name': object_name, 'md5': md5})
         object_list.append(obj)
         object_id += 1
@@ -526,9 +526,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
             # Whatever goes wrong, we want to log, cleanup, and re-raise.
             except Exception as err:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(
-                        _LE("Backup volume metadata failed: %s."),
-                        err)
+                    LOG.exception(_LE("Backup volume metadata failed: %s."),
+                                  err)
                     self.delete(backup)
 
         self._finalize_backup(backup, container, object_meta, object_sha256)
@@ -573,7 +572,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
             decompressor = self._get_compressor(compression_algorithm)
             volume_file.seek(metadata_object.values()[0]['offset'])
             if decompressor is not None:
-                LOG.debug('decompressing data using %s algorithm' %
+                LOG.debug('decompressing data using %s algorithm',
                           compression_algorithm)
                 decompressed = decompressor.decompress(body)
                 volume_file.write(decompressed)
@@ -588,7 +587,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
                 fileno = volume_file.fileno()
             except IOError:
                 LOG.info(_LI("volume_file does not support "
-                             "fileno() so skipping"
+                             "fileno() so skipping "
                              "fsync()"))
             else:
                 os.fsync(fileno)
@@ -605,8 +604,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
         backup_id = backup['id']
         container = backup['container']
         object_prefix = backup['service_metadata']
-        LOG.debug('starting restore of backup %(object_prefix)s'
-                  ' container: %(container)s, to volume %(volume_id)s, '
+        LOG.debug('starting restore of backup %(object_prefix)s '
+                  'container: %(container)s, to volume %(volume_id)s, '
                   'backup: %(backup_id)s.',
                   {
                       'object_prefix': object_prefix,
@@ -662,16 +661,19 @@ class ChunkedBackupDriver(driver.BackupDriver):
     def delete(self, backup):
         """Delete the given backup."""
         container = backup['container']
-        LOG.debug('delete started, backup: %s, container: %s, prefix: %s.',
-                  backup['id'], container, backup['service_metadata'])
+        LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
+                  'prefix: %(pre)s.',
+                  {'id': backup['id'],
+                   'cont': container,
+                   'pre': backup['service_metadata']})
 
         if container is not None:
             object_names = []
             try:
                 object_names = self._generate_object_names(backup)
             except Exception:
-                LOG.warn(_LW('swift error while listing objects, continuing'
-                             ' with delete.'))
+                LOG.warning(_LW('swift error while listing objects, continuing'
+                                ' with delete.'))
 
             for object_name in object_names:
                 self.delete_object(container, object_name)
index 88c609de82dfb581e7fa5486b9229239041a8533..3317118f69d17663b765488844679520e1431ebe 100644 (file)
@@ -24,7 +24,7 @@ import six
 
 from cinder.db import base
 from cinder import exception
-from cinder.i18n import _, _LI, _LE, _LW
+from cinder.i18n import _, _LI, _LW
 from cinder import keymgr
 
 service_opts = [
@@ -64,7 +64,7 @@ class BackupMetadataAPI(base.Base):
         try:
             jsonutils.dumps(value)
         except TypeError:
-            LOG.info(_LI("Value with type=%s is not serializable") %
+            LOG.info(_LI("Value with type=%s is not serializable"),
                      type(value))
             return False
 
@@ -77,7 +77,7 @@ class BackupMetadataAPI(base.Base):
         save them in the provided container dictionary.
         """
         type_tag = self.TYPE_TAG_VOL_BASE_META
-        LOG.debug("Getting metadata type '%s'" % type_tag)
+        LOG.debug("Getting metadata type '%s'", type_tag)
         meta = self.db.volume_get(self.context, volume_id)
         if meta:
             container[type_tag] = {}
@@ -85,7 +85,7 @@ class BackupMetadataAPI(base.Base):
                 # Exclude fields that are "not JSON serializable"
                 if not self._is_serializable(value):
                     LOG.info(_LI("Unable to serialize field '%s' - excluding "
-                                 "from backup") % (key))
+                                 "from backup"), key)
                     continue
                 # Copy the encryption key uuid for backup
                 if key is 'encryption_key_id' and value is not None:
@@ -93,9 +93,9 @@ class BackupMetadataAPI(base.Base):
                     LOG.debug("Copying encryption key uuid for backup.")
                 container[type_tag][key] = value
 
-            LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+            LOG.debug("Completed fetching metadata type '%s'", type_tag)
         else:
-            LOG.debug("No metadata type '%s' available" % type_tag)
+            LOG.debug("No metadata type '%s' available", type_tag)
 
     def _save_vol_meta(self, container, volume_id):
         """Save volume metadata to container.
@@ -104,7 +104,7 @@ class BackupMetadataAPI(base.Base):
         volume_id and save them in the provided container dictionary.
         """
         type_tag = self.TYPE_TAG_VOL_META
-        LOG.debug("Getting metadata type '%s'" % type_tag)
+        LOG.debug("Getting metadata type '%s'", type_tag)
         meta = self.db.volume_metadata_get(self.context, volume_id)
         if meta:
             container[type_tag] = {}
@@ -112,13 +112,13 @@ class BackupMetadataAPI(base.Base):
                 # Exclude fields that are "not JSON serializable"
                 if not self._is_serializable(meta[entry]):
                     LOG.info(_LI("Unable to serialize field '%s' - excluding "
-                                 "from backup") % (entry))
+                                 "from backup"), entry)
                     continue
                 container[type_tag][entry] = meta[entry]
 
-            LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+            LOG.debug("Completed fetching metadata type '%s'", type_tag)
         else:
-            LOG.debug("No metadata type '%s' available" % type_tag)
+            LOG.debug("No metadata type '%s' available", type_tag)
 
     def _save_vol_glance_meta(self, container, volume_id):
         """Save volume Glance metadata to container.
@@ -127,7 +127,7 @@ class BackupMetadataAPI(base.Base):
         volume_id and save them in the provided container dictionary.
         """
         type_tag = self.TYPE_TAG_VOL_GLANCE_META
-        LOG.debug("Getting metadata type '%s'" % type_tag)
+        LOG.debug("Getting metadata type '%s'", type_tag)
         try:
             meta = self.db.volume_glance_metadata_get(self.context, volume_id)
             if meta:
@@ -136,13 +136,13 @@ class BackupMetadataAPI(base.Base):
                     # Exclude fields that are "not JSON serializable"
                     if not self._is_serializable(entry.value):
                         LOG.info(_LI("Unable to serialize field '%s' - "
-                                     "excluding from backup") % (entry))
+                                     "excluding from backup"), entry)
                         continue
                     container[type_tag][entry.key] = entry.value
 
-            LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+            LOG.debug("Completed fetching metadata type '%s'", type_tag)
         except exception.GlanceMetadataNotFound:
-            LOG.debug("No metadata type '%s' available" % type_tag)
+            LOG.debug("No metadata type '%s' available", type_tag)
 
     @staticmethod
     def _filter(metadata, fields):
@@ -158,7 +158,7 @@ class BackupMetadataAPI(base.Base):
             if field in metadata:
                 subset[field] = metadata[field]
             else:
-                LOG.debug("Excluding field '%s'" % (field))
+                LOG.debug("Excluding field '%s'", field)
 
         return subset
 
@@ -197,8 +197,8 @@ class BackupMetadataAPI(base.Base):
                     LOG.debug("Volume type of source volume has been "
                               "deleted. Encrypted backup restore has "
                               "failed.")
-                    msg = _LE("The source volume type '%s' is not "
-                              "available.") % (src_volume_type_id)
+                    msg = _("The source volume type '%s' is not "
+                            "available.") % (src_volume_type_id)
                     raise exception.EncryptedBackupOperationFailed(msg)
                 # Update dest volume with src volume's volume_type_id.
                 LOG.debug("The volume type of the destination volume "
@@ -209,13 +209,13 @@ class BackupMetadataAPI(base.Base):
             else:
                 # Volume type id's do not match, and destination volume
                 # has a volume type. Throw exception.
-                LOG.warn(_LW("Destination volume type is different from "
-                             "source volume type for an encrypted volume. "
-                             "Encrypted backup restore has failed."))
-                msg = _LE("The source volume type '%(src)s' is different "
-                          "than the destination volume type '%(dest)s'.") % \
-                    {'src': src_volume_type_id,
-                     'dest': dest_vol['volume_type_id']}
+                LOG.warning(_LW("Destination volume type is different from "
+                                "source volume type for an encrypted volume. "
+                                "Encrypted backup restore has failed."))
+                msg = (_("The source volume type '%(src)s' is different "
+                         "than the destination volume type '%(dest)s'.") %
+                       {'src': src_volume_type_id,
+                        'dest': dest_vol['volume_type_id']})
                 raise exception.EncryptedBackupOperationFailed(msg)
 
     def _restore_vol_meta(self, metadata, volume_id, fields):
@@ -313,8 +313,7 @@ class BackupMetadataAPI(base.Base):
             if type in meta_container:
                 func(meta_container[type], volume_id, fields)
             else:
-                msg = "No metadata of type '%s' to restore" % (type)
-                LOG.debug(msg)
+                LOG.debug("No metadata of type '%s' to restore", type)
 
 
 @six.add_metaclass(abc.ABCMeta)
index f0ee60d38c436c3ff824e96c8d1cce44d3a16f80..001c39a17ef497bc2f62d016bda8c1b6934b633b 100644 (file)
@@ -138,8 +138,7 @@ class VolumeMetadataBackup(object):
         """
         meta_obj = rados.Object(self._client.ioctx, self.name)
         if not self._exists(meta_obj):
-            msg = "Metadata backup object %s does not exist" % self.name
-            LOG.debug(msg)
+            LOG.debug("Metadata backup object %s does not exist", self.name)
             return None
 
         return meta_obj.read()
@@ -149,9 +148,8 @@ class VolumeMetadataBackup(object):
         try:
             meta_obj.remove()
         except rados.ObjectNotFound:
-            msg = ("Metadata backup object '%s' not found - ignoring" %
-                   self.name)
-            LOG.debug(msg)
+            LOG.debug("Metadata backup object '%s' not found - ignoring",
+                      self.name)
 
 
 class CephBackupDriver(driver.BackupDriver):
@@ -279,7 +277,7 @@ class CephBackupDriver(driver.BackupDriver):
         and pad with zeroes.
         """
         if length:
-            LOG.debug("Discarding %(length)s bytes from offset %(offset)s" %
+            LOG.debug("Discarding %(length)s bytes from offset %(offset)s",
                       {'length': length, 'offset': offset})
             if self._file_is_rbd(volume):
                 volume.rbd_image.discard(offset, length)
@@ -287,7 +285,7 @@ class CephBackupDriver(driver.BackupDriver):
                 zeroes = '\0' * length
                 chunks = int(length / self.chunk_size)
                 for chunk in xrange(0, chunks):
-                    LOG.debug("Writing zeroes chunk %d" % chunk)
+                    LOG.debug("Writing zeroes chunk %d", chunk)
                     volume.write(zeroes)
                     volume.flush()
                     # yield to any other pending backups
@@ -301,11 +299,11 @@ class CephBackupDriver(driver.BackupDriver):
 
     def _transfer_data(self, src, src_name, dest, dest_name, length):
         """Transfer data between files (Python IO objects)."""
-        LOG.debug("Transferring data between '%(src)s' and '%(dest)s'" %
+        LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
                   {'src': src_name, 'dest': dest_name})
 
         chunks = int(length / self.chunk_size)
-        LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred" %
+        LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
                   {'chunks': chunks, 'bytes': self.chunk_size})
 
         for chunk in xrange(0, chunks):
@@ -324,17 +322,18 @@ class CephBackupDriver(driver.BackupDriver):
             dest.flush()
             delta = (time.time() - before)
             rate = (self.chunk_size / delta) / 1024
-            LOG.debug((_("Transferred chunk %(chunk)s of %(chunks)s "
-                         "(%(rate)dK/s)") %
-                       {'chunk': chunk + 1, 'chunks': chunks,
-                        'rate': rate}))
+            LOG.debug("Transferred chunk %(chunk)s of %(chunks)s "
+                      "(%(rate)dK/s)",
+                      {'chunk': chunk + 1,
+                       'chunks': chunks,
+                       'rate': rate})
 
             # yield to any other pending backups
             eventlet.sleep(0)
 
         rem = int(length % self.chunk_size)
         if rem:
-            LOG.debug("Transferring remaining %s bytes" % rem)
+            LOG.debug("Transferring remaining %s bytes", rem)
             data = src.read(rem)
             if data == '':
                 if CONF.restore_discard_excess_bytes:
@@ -350,7 +349,7 @@ class CephBackupDriver(driver.BackupDriver):
 
         This will be the base image used for storing differential exports.
         """
-        LOG.debug("Creating base image '%s'" % name)
+        LOG.debug("Creating base image '%s'", name)
         old_format, features = self._get_rbd_support()
         self.rbd.RBD().create(ioctx=rados_client.ioctx,
                               name=name,
@@ -377,7 +376,7 @@ class CephBackupDriver(driver.BackupDriver):
             snap_name = self._get_backup_snap_name(base_rbd, base_name,
                                                    backup_id)
             if snap_name:
-                LOG.debug("Deleting backup snapshot='%s'" % snap_name)
+                LOG.debug("Deleting backup snapshot='%s'", snap_name)
                 base_rbd.remove_snap(snap_name)
             else:
                 LOG.debug("No backup snapshot to delete")
@@ -417,7 +416,7 @@ class CephBackupDriver(driver.BackupDriver):
 
             base_name = self._get_backup_base_name(volume_id, backup_id)
             LOG.debug("Trying diff format basename='%(basename)s' for "
-                      "backup base image of volume %(volume)s." %
+                      "backup base image of volume %(volume)s.",
                       {'basename': base_name, 'volume': volume_id})
 
         with rbd_driver.RADOSClient(self) as client:
@@ -433,25 +432,25 @@ class CephBackupDriver(driver.BackupDriver):
                 snap, rem = self._delete_backup_snapshot(client, base_name,
                                                          backup_id)
                 if rem:
-                    msg = (_LI("Backup base image of volume %(volume)s still "
-                               "has %(snapshots)s snapshots so skipping base "
-                               "image delete.") %
-                           {'snapshots': rem, 'volume': volume_id})
-                    LOG.info(msg)
+                    LOG.info(
+                        _LI("Backup base image of volume %(volume)s still "
+                            "has %(snapshots)s snapshots so skipping base "
+                            "image delete."),
+                        {'snapshots': rem, 'volume': volume_id})
                     return
 
                 LOG.info(_LI("Deleting backup base image='%(basename)s' of "
-                             "volume %(volume)s.") %
+                             "volume %(volume)s."),
                          {'basename': base_name, 'volume': volume_id})
                 # Delete base if no more snapshots
                 try:
                     self.rbd.RBD().remove(client.ioctx, base_name)
-                except self.rbd.ImageBusy as exc:
+                except self.rbd.ImageBusy:
                     # Allow a retry if the image is busy
                     if retries > 0:
                         LOG.info(_LI("Backup image of volume %(volume)s is "
                                      "busy, retrying %(retries)s more time(s) "
-                                     "in %(delay)ss.") %
+                                     "in %(delay)ss."),
                                  {'retries': retries,
                                   'delay': delay,
                                   'volume': volume_id})
@@ -459,13 +458,13 @@ class CephBackupDriver(driver.BackupDriver):
                     else:
                         LOG.error(_LE("Max retries reached deleting backup "
                                       "%(basename)s image of volume "
-                                      "%(volume)s.")
-                                  {'volume': volume_id,
-                                     'basename': base_name})
-                        raise exc
+                                      "%(volume)s."),
+                                  {'volume': volume_id,
+                                   'basename': base_name})
+                        raise
                 else:
                     LOG.debug("Base backup image='%(basename)s' of volume "
-                              "%(volume)s deleted." %
+                              "%(volume)s deleted.",
                               {'basename': base_name, 'volume': volume_id})
                     retries = 0
                 finally:
@@ -476,7 +475,7 @@ class CephBackupDriver(driver.BackupDriver):
             src_name = encodeutils.safe_encode(volume_id)
             if src_name in self.rbd.RBD().list(client.ioctx):
                 LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
-                          "for backup %(basename)s." %
+                          "for backup %(basename)s.",
                           {'snapshot': snap, 'basename': base_name})
                 src_rbd = self.rbd.Image(client.ioctx, src_name)
                 try:
@@ -486,14 +485,14 @@ class CephBackupDriver(driver.BackupDriver):
 
     def _piped_execute(self, cmd1, cmd2):
         """Pipe output of cmd1 into cmd2."""
-        LOG.debug("Piping cmd1='%s' into..." % ' '.join(cmd1))
-        LOG.debug("cmd2='%s'" % ' '.join(cmd2))
+        LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
+        LOG.debug("cmd2='%s'", ' '.join(cmd2))
 
         try:
             p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
         except OSError as e:
-            LOG.error(_LE("Pipe1 failed - %s ") % unicode(e))
+            LOG.error(_LE("Pipe1 failed - %s "), e)
             raise
 
         # NOTE(dosaboy): ensure that the pipe is blocking. This is to work
@@ -507,7 +506,7 @@ class CephBackupDriver(driver.BackupDriver):
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
         except OSError as e:
-            LOG.error(_LE("Pipe2 failed - %s ") % unicode(e))
+            LOG.error(_LE("Pipe2 failed - %s "), e)
             raise
 
         p1.stdout.close()
@@ -524,7 +523,7 @@ class CephBackupDriver(driver.BackupDriver):
         changed since the snapshot was created.
         """
         LOG.debug("Performing differential transfer from '%(src)s' to "
-                  "'%(dest)s'" %
+                  "'%(dest)s'",
                   {'src': src_name, 'dest': dest_name})
 
         # NOTE(dosaboy): Need to be tolerant of clusters/clients that do
@@ -561,13 +560,11 @@ class CephBackupDriver(driver.BackupDriver):
         """Return tuple (exists, name)."""
         rbds = self.rbd.RBD().list(client.ioctx)
         if name not in rbds:
-            msg = "Image '%s' not found - trying diff format name" % name
-            LOG.debug(msg)
+            LOG.debug("Image '%s' not found - trying diff format name", name)
             if try_diff_format:
                 name = self._get_backup_base_name(volume_id, diff_format=True)
                 if name not in rbds:
-                    msg = "Diff format image '%s' not found" % name
-                    LOG.debug(msg)
+                    LOG.debug("Diff format image '%s' not found", name)
                     return False, name
             else:
                 return False, name
@@ -602,7 +599,7 @@ class CephBackupDriver(driver.BackupDriver):
         # Identify our --from-snap point (if one exists)
         from_snap = self._get_most_recent_snap(source_rbd_image)
         LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
-                  "volume %(volume)s." %
+                  "volume %(volume)s.",
                   {'snap': from_snap, 'volume': volume_id})
 
         base_name = self._get_backup_base_name(volume_id, diff_format=True)
@@ -619,7 +616,7 @@ class CephBackupDriver(driver.BackupDriver):
                 # ignore it since it is stale and waiting to be cleaned up.
                 if from_snap:
                     LOG.debug("Source snapshot '%(snapshot)s' of volume "
-                              "%(volume)s is stale so deleting." %
+                              "%(volume)s is stale so deleting.",
                               {'snapshot': from_snap, 'volume': volume_id})
                     source_rbd_image.remove_snap(from_snap)
                     from_snap = None
@@ -642,7 +639,7 @@ class CephBackupDriver(driver.BackupDriver):
 
         # Snapshot source volume so that we have a new point-in-time
         new_snap = self._get_new_snap_name(backup_id)
-        LOG.debug("Creating backup snapshot='%s'" % new_snap)
+        LOG.debug("Creating backup snapshot='%s'", new_snap)
         source_rbd_image.create_snap(new_snap)
 
         # Attempt differential backup. If this fails, perhaps because librbd
@@ -662,7 +659,7 @@ class CephBackupDriver(driver.BackupDriver):
                                     src_snap=new_snap,
                                     from_snap=from_snap)
 
-            LOG.debug("Differential backup transfer completed in %.4fs" %
+            LOG.debug("Differential backup transfer completed in %.4fs",
                       (time.time() - before))
 
             # We don't need the previous snapshot (if there was one) anymore so
@@ -681,7 +678,7 @@ class CephBackupDriver(driver.BackupDriver):
 
                 # Delete snapshot
                 LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
-                          "source volume='%(volume)s'." %
+                          "source volume='%(volume)s'.",
                           {'snapshot': new_snap, 'volume': volume_id})
                 source_rbd_image.remove_snap(new_snap)
 
@@ -702,8 +699,8 @@ class CephBackupDriver(driver.BackupDriver):
             # First create base backup image
             old_format, features = self._get_rbd_support()
             LOG.debug("Creating backup base image='%(name)s' for volume "
-                      "%(volume)s."
-                      {'name': backup_name, 'volume': volume_id})
+                      "%(volume)s.",
+                      {'name': backup_name, 'volume': volume_id})
             self.rbd.RBD().create(ioctx=client.ioctx,
                                   name=backup_name,
                                   size=length,
@@ -712,7 +709,7 @@ class CephBackupDriver(driver.BackupDriver):
                                   stripe_unit=self.rbd_stripe_unit,
                                   stripe_count=self.rbd_stripe_count)
 
-            LOG.debug("Copying data from volume %s." % volume_id)
+            LOG.debug("Copying data from volume %s.", volume_id)
             dest_rbd = self.rbd.Image(client.ioctx, backup_name)
             try:
                 rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd,
@@ -773,17 +770,17 @@ class CephBackupDriver(driver.BackupDriver):
         """
         snaps = self.get_backup_snaps(rbd_image)
 
-        LOG.debug("Looking for snapshot of backup base '%s'" % name)
+        LOG.debug("Looking for snapshot of backup base '%s'", name)
 
         if not snaps:
-            LOG.debug("Backup base '%s' has no snapshots" % name)
+            LOG.debug("Backup base '%s' has no snapshots", name)
             return None
 
         snaps = [snap['name'] for snap in snaps
                  if snap['backup_id'] == backup_id]
 
         if not snaps:
-            LOG.debug("Backup '%s' has no snapshot" % backup_id)
+            LOG.debug("Backup '%s' has no snapshot", backup_id)
             return None
 
         if len(snaps) > 1:
@@ -792,7 +789,7 @@ class CephBackupDriver(driver.BackupDriver):
             LOG.error(msg)
             raise exception.BackupOperationError(msg)
 
-        LOG.debug("Found snapshot '%s'" % (snaps[0]))
+        LOG.debug("Found snapshot '%s'", snaps[0])
         return snaps[0]
 
     def _get_most_recent_snap(self, rbd_image):
@@ -828,11 +825,11 @@ class CephBackupDriver(driver.BackupDriver):
         """
         json_meta = self.get_metadata(backup['volume_id'])
         if not json_meta:
-            LOG.debug("No metadata to backup for volume %s." %
+            LOG.debug("No metadata to backup for volume %s.",
                       backup['volume_id'])
             return
 
-        LOG.debug("Backing up metadata for volume %s." %
+        LOG.debug("Backing up metadata for volume %s.",
                   backup['volume_id'])
         try:
             with rbd_driver.RADOSClient(self) as client:
@@ -854,7 +851,7 @@ class CephBackupDriver(driver.BackupDriver):
         volume_id = volume['id']
         volume_name = volume['name']
 
-        LOG.debug("Starting backup of volume='%s'." % volume_id)
+        LOG.debug("Starting backup of volume='%s'.", volume_id)
 
         # Ensure we are at the beginning of the volume
         volume_file.seek(0)
@@ -867,7 +864,7 @@ class CephBackupDriver(driver.BackupDriver):
                 self._backup_rbd(backup_id, volume_id, volume_file,
                                  volume_name, length)
             except exception.BackupRBDOperationFailed:
-                LOG.debug("Forcing full backup of volume %s." % volume_id)
+                LOG.debug("Forcing full backup of volume %s.", volume_id)
                 do_full_backup = True
         else:
             do_full_backup = True
@@ -887,8 +884,8 @@ class CephBackupDriver(driver.BackupDriver):
                     # Cleanup.
                     self.delete(backup)
 
-        LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished."
-                  {'backup_id': backup_id, 'volume_id': volume_id})
+        LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
+                  {'backup_id': backup_id, 'volume_id': volume_id})
 
     def _full_restore(self, backup_id, volume_id, dest_file, dest_name,
                       length, src_snap=None):
@@ -960,7 +957,7 @@ class CephBackupDriver(driver.BackupDriver):
         rbd_conf = restore_file.rbd_conf
 
         LOG.debug("Attempting incremental restore from base='%(base)s' "
-                  "snap='%(snap)s'" %
+                  "snap='%(snap)s'",
                   {'base': base_name, 'snap': restore_point})
         before = time.time()
         try:
@@ -982,7 +979,7 @@ class CephBackupDriver(driver.BackupDriver):
         self._check_restore_vol_size(base_name, restore_name, restore_length,
                                      rbd_pool)
 
-        LOG.debug("Restore transfer completed in %.4fs" %
+        LOG.debug("Restore transfer completed in %.4fs",
                   (time.time() - before))
 
     def _num_backup_snaps(self, backup_base_name):
@@ -1031,7 +1028,7 @@ class CephBackupDriver(driver.BackupDriver):
         rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)
 
         if extents:
-            LOG.debug("RBD has %s extents" % sum(extents))
+            LOG.debug("RBD has %s extents", sum(extents))
             return True
 
         return False
@@ -1068,9 +1065,8 @@ class CephBackupDriver(driver.BackupDriver):
             # made from, force a full restore since a diff will not work in
             # this case.
             if volume['id'] == backup['volume_id']:
-                msg = ("Destination volume is same as backup source volume "
-                       "%s - forcing full copy." % volume['id'])
-                LOG.debug(msg)
+                LOG.debug("Destination volume is same as backup source volume "
+                          "%s - forcing full copy.", volume['id'])
                 return False, restore_point
 
             if restore_point:
@@ -1086,7 +1082,7 @@ class CephBackupDriver(driver.BackupDriver):
             else:
                 LOG.info(_LI("No restore point found for "
                              "backup='%(backup)s' of "
-                             "volume %(volume)s - forcing full copy.") %
+                             "volume %(volume)s - forcing full copy."),
                          {'backup': backup['id'],
                           'volume': backup['volume_id']})
 
@@ -1118,7 +1114,7 @@ class CephBackupDriver(driver.BackupDriver):
                                        restore_point, length)
                 do_full_restore = False
             except exception.BackupRBDOperationFailed:
-                LOG.debug("Forcing full restore to volume %s." %
+                LOG.debug("Forcing full restore to volume %s.",
                           volume['id'])
 
         if do_full_restore:
@@ -1139,7 +1135,7 @@ class CephBackupDriver(driver.BackupDriver):
                 if meta is not None:
                     self.put_metadata(volume_id, meta)
                 else:
-                    LOG.debug("Volume %s has no backed up metadata." %
+                    LOG.debug("Volume %s has no backed up metadata.",
                               backup['volume_id'])
         except exception.BackupMetadataUnsupportedVersion:
             msg = _("Metadata restore failed due to incompatible version")
@@ -1153,7 +1149,7 @@ class CephBackupDriver(driver.BackupDriver):
         """
         target_volume = self.db.volume_get(self.context, volume_id)
         LOG.debug('Starting restore from Ceph backup=%(src)s to '
-                  'volume=%(dest)s' %
+                  'volume=%(dest)s',
                   {'src': backup['id'], 'dest': target_volume['name']})
 
         try:
@@ -1170,25 +1166,25 @@ class CephBackupDriver(driver.BackupDriver):
 
             self._restore_metadata(backup, volume_id)
 
-            LOG.debug('Restore to volume %s finished successfully.' %
+            LOG.debug('Restore to volume %s finished successfully.',
                       volume_id)
         except exception.BackupOperationError as e:
             LOG.error(_LE('Restore to volume %(volume)s finished with error - '
-                          '%(error)s.') % {'error': e, 'volume': volume_id})
+                          '%(error)s.'), {'error': e, 'volume': volume_id})
             raise
 
     def delete(self, backup):
         """Delete the given backup from Ceph object store."""
-        LOG.debug('Delete started for backup=%s' % backup['id'])
+        LOG.debug('Delete started for backup=%s', backup['id'])
 
         delete_failed = False
         try:
             self._try_delete_base_image(backup['id'], backup['volume_id'])
         except self.rbd.ImageNotFound:
-            msg = (_LW("RBD image for backup %(backup)s of volume %(volume)s "
-                       "not found. Deleting backup metadata.")
-                   % {'backup': backup['id'], 'volume': backup['volume_id']})
-            LOG.warning(msg)
+            LOG.warning(
+                _LW("RBD image for backup %(backup)s of volume %(volume)s "
+                    "not found. Deleting backup metadata."),
+                {'backup': backup['id'], 'volume': backup['volume_id']})
             delete_failed = True
 
         with rbd_driver.RADOSClient(self) as client:
@@ -1197,11 +1193,11 @@ class CephBackupDriver(driver.BackupDriver):
         if delete_failed:
             LOG.info(_LI("Delete of backup '%(backup)s' "
                          "for volume '%(volume)s' "
-                         "finished with warning.") %
+                         "finished with warning."),
                      {'backup': backup['id'], 'volume': backup['volume_id']})
         else:
             LOG.debug("Delete of backup '%(backup)s' for volume "
-                      "'%(volume)s' finished." %
+                      "'%(volume)s' finished.",
                       {'backup': backup['id'], 'volume': backup['volume_id']})
 
 
index 52b5a95bc010698f4550bedc3c76a3d5c375d847..4bcd903b57d29b86e0918801ff1dc0aa3da26ffb 100644 (file)
@@ -250,15 +250,14 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
                       hardlink_path,
                       run_as_root=True)
     except processutils.ProcessExecutionError as exc:
-        err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
-                   ' from %(vpath)s to %(bpath)s.\n'
-                   'stdout: %(out)s\n stderr: %(err)s.')
-               % {'vol_id': volume_id,
-                  'vpath': volume_path,
-                  'bpath': hardlink_path,
-                  'out': exc.stdout,
-                  'err': exc.stderr})
-        LOG.error(err)
+        LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
+                      'from %(vpath)s to %(bpath)s.\n'
+                      'stdout: %(out)s\n stderr: %(err)s.'),
+                  {'vol_id': volume_id,
+                   'vpath': volume_path,
+                   'bpath': hardlink_path,
+                   'out': exc.stdout,
+                   'err': exc.stderr})
 
 
 class TSMBackupDriver(driver.BackupDriver):
@@ -370,10 +369,10 @@ class TSMBackupDriver(driver.BackupDriver):
         volume_path, backup_mode = _get_volume_realpath(volume_file,
                                                         volume_id)
         LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
-                  ' volume path: %(volume_path)s, mode: %(mode)s.'
-                  {'volume_id': volume_id,
-                     'volume_path': volume_path,
-                     'mode': backup_mode})
+                  ' volume path: %(volume_path)s, mode: %(mode)s.',
+                  {'volume_id': volume_id,
+                   'volume_path': volume_path,
+                   'mode': backup_mode})
 
         backup_path = _create_unique_device_link(backup_id,
                                                  volume_path,
@@ -414,7 +413,7 @@ class TSMBackupDriver(driver.BackupDriver):
         finally:
             _cleanup_device_hardlink(backup_path, volume_path, volume_id)
 
-        LOG.debug('Backup %s finished.' % backup_id)
+        LOG.debug('Backup %s finished.', backup_id)
 
     def restore(self, backup, volume_id, volume_file):
         """Restore the given volume backup from TSM server.
@@ -433,7 +432,7 @@ class TSMBackupDriver(driver.BackupDriver):
         LOG.debug('Starting restore of backup from TSM '
                   'to volume %(volume_id)s, '
                   'backup: %(backup_id)s, '
-                  'mode: %(mode)s.' %
+                  'mode: %(mode)s.',
                   {'volume_id': volume_id,
                    'backup_id': backup_id,
                    'mode': backup_mode})
@@ -475,9 +474,9 @@ class TSMBackupDriver(driver.BackupDriver):
         finally:
             _cleanup_device_hardlink(restore_path, volume_path, volume_id)
 
-        LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.'
-                  {'backup_id': backup_id,
-                     'volume_id': volume_id})
+        LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
+                  {'backup_id': backup_id,
+                   'volume_id': volume_id})
 
     def delete(self, backup):
         """Delete the given backup from TSM server.
@@ -529,14 +528,13 @@ class TSMBackupDriver(driver.BackupDriver):
             # log error if tsm cannot delete the backup object
             # but do not raise exception so that cinder backup
             # object can be removed.
-            err = (_LE('delete: %(vol_id)s failed with '
-                       'stdout: %(out)s\n stderr: %(err)s')
-                   % {'vol_id': volume_id,
-                      'out': out,
-                      'err': err})
-            LOG.error(err)
+            LOG.error(_LE('delete: %(vol_id)s failed with '
+                          'stdout: %(out)s\n stderr: %(err)s'),
+                      {'vol_id': volume_id,
+                       'out': out,
+                       'err': err})
 
-        LOG.debug('Delete %s finished.' % backup['id'])
+        LOG.debug('Delete %s finished.', backup['id'])
 
 
 def get_backup_driver(context):
index 0c6096bc05f7695fbdf82b9a818615b46954e1ed..0f8eaa5e1f02992617921823c3210b8726561c1d 100644 (file)
@@ -108,15 +108,15 @@ class BackupManager(manager.SchedulerDependentManager):
                 msg = _("NULL host not allowed for volume backend lookup.")
                 raise exception.BackupFailedToGetVolumeBackend(msg)
         else:
-            LOG.debug("Checking hostname '%s' for backend info." % (host))
+            LOG.debug("Checking hostname '%s' for backend info.", host)
             part = host.partition('@')
             if (part[1] == '@') and (part[2] != ''):
                 backend = part[2]
-                LOG.debug("Got backend '%s'." % (backend))
+                LOG.debug("Got backend '%s'.", backend)
                 return backend
 
-        LOG.info(_LI("Backend not found in hostname (%s) so using default.") %
-                 (host))
+        LOG.info(_LI("Backend not found in hostname (%s) so using default."),
+                 host)
 
         if 'default' not in self.volume_managers:
             # For multi-backend we just pick the top of the list.
@@ -125,8 +125,8 @@ class BackupManager(manager.SchedulerDependentManager):
         return 'default'
 
     def _get_manager(self, backend):
-        LOG.debug("Manager requested for volume_backend '%s'." %
-                  (backend))
+        LOG.debug("Manager requested for volume_backend '%s'.",
+                  backend)
         if backend is None:
             LOG.debug("Fetching default backend.")
             backend = self._get_volume_backend(allow_null_host=True)
@@ -137,8 +137,8 @@ class BackupManager(manager.SchedulerDependentManager):
         return self.volume_managers[backend]
 
     def _get_driver(self, backend=None):
-        LOG.debug("Driver requested for volume_backend '%s'." %
-                  (backend))
+        LOG.debug("Driver requested for volume_backend '%s'.",
+                  backend)
         if backend is None:
             LOG.debug("Fetching default backend.")
             backend = self._get_volume_backend(allow_null_host=True)
@@ -156,27 +156,26 @@ class BackupManager(manager.SchedulerDependentManager):
                 config = mgr.configuration
                 backend_name = config.safe_get('volume_backend_name')
                 LOG.debug("Registering backend %(backend)s (host=%(host)s "
-                          "backend_name=%(backend_name)s)." %
+                          "backend_name=%(backend_name)s).",
                           {'backend': backend, 'host': host,
                            'backend_name': backend_name})
                 self.volume_managers[backend] = mgr
         else:
             default = importutils.import_object(CONF.volume_manager)
-            LOG.debug("Registering default backend %s." % (default))
+            LOG.debug("Registering default backend %s.", default)
             self.volume_managers['default'] = default
 
     def _init_volume_driver(self, ctxt, driver):
-        LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s).") %
+        LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
                  {'driver_name': driver.__class__.__name__,
                   'version': driver.get_version()})
         try:
             driver.do_setup(ctxt)
             driver.check_for_setup_error()
-        except Exception as ex:
-            LOG.error(_LE("Error encountered during initialization of driver: "
-                          "%(name)s.") %
-                      {'name': driver.__class__.__name__})
-            LOG.exception(ex)
+        except Exception:
+            LOG.exception(_LE("Error encountered during initialization of "
+                              "driver: %(name)s."),
+                          {'name': driver.__class__.__name__})
             # we don't want to continue since we failed
             # to initialize the driver correctly.
             return
@@ -225,19 +224,19 @@ class BackupManager(manager.SchedulerDependentManager):
         backups = self.db.backup_get_all_by_host(ctxt, self.host)
         for backup in backups:
             if backup['status'] == 'creating':
-                LOG.info(_LI('Resetting backup %s to error (was creating).')
-                         backup['id'])
+                LOG.info(_LI('Resetting backup %s to error (was creating).'),
+                         backup['id'])
                 err = 'incomplete backup reset on manager restart'
                 self.db.backup_update(ctxt, backup['id'], {'status': 'error',
                                                            'fail_reason': err})
             if backup['status'] == 'restoring':
                 LOG.info(_LI('Resetting backup %s to '
-                             ' available (was restoring).')
-                         backup['id'])
+                             'available (was restoring).'),
+                         backup['id'])
                 self.db.backup_update(ctxt, backup['id'],
                                       {'status': 'available'})
             if backup['status'] == 'deleting':
-                LOG.info(_LI('Resuming delete on backup: %s.') % backup['id'])
+                LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
                 self.delete_backup(ctxt, backup['id'])
 
     def create_backup(self, context, backup_id):
@@ -246,7 +245,7 @@ class BackupManager(manager.SchedulerDependentManager):
         volume_id = backup['volume_id']
         volume = self.db.volume_get(context, volume_id)
         LOG.info(_LI('Create backup started, backup: %(backup_id)s '
-                     'volume: %(volume_id)s.') %
+                     'volume: %(volume_id)s.'),
                  {'backup_id': backup_id, 'volume_id': volume_id})
 
         self._notify_about_backup_usage(context, backup, "create.start")
@@ -311,7 +310,7 @@ class BackupManager(manager.SchedulerDependentManager):
     def restore_backup(self, context, backup_id, volume_id):
         """Restore volume backups from configured backup service."""
         LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
-                     'volume: %(volume_id)s.') %
+                     'volume: %(volume_id)s.'),
                  {'backup_id': backup_id, 'volume_id': volume_id})
 
         backup = self.db.backup_get(context, backup_id)
@@ -389,7 +388,7 @@ class BackupManager(manager.SchedulerDependentManager):
         backup = self.db.backup_update(context, backup_id,
                                        {'status': 'available'})
         LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
-                     ' to volume %(volume_id)s.') %
+                     ' to volume %(volume_id)s.'),
                  {'backup_id': backup_id, 'volume_id': volume_id})
         self._notify_about_backup_usage(context, backup, "restore.end")
 
@@ -617,11 +616,11 @@ class BackupManager(manager.SchedulerDependentManager):
                 if isinstance(backup_service, driver.BackupDriverWithVerify):
                     backup_service.verify(backup_id)
                 else:
-                    LOG.warn(_LW('Backup service %(service)s does not support '
-                                 'verify. Backup id %(id)s is not verified. '
-                                 'Skipping verify.') % {'service':
-                                                        self.driver_name,
-                                                        'id': backup_id})
+                    LOG.warning(_LW('Backup service %(service)s does not '
+                                    'support verify. Backup id %(id)s is '
+                                    'not verified. Skipping verify.'),
+                                {'service': self.driver_name,
+                                 'id': backup_id})
             except exception.InvalidBackup as err:
                 with excutils.save_and_reraise_exception():
                     self.db.backup_update(context, backup_id,
@@ -630,7 +629,7 @@ class BackupManager(manager.SchedulerDependentManager):
                                            unicode(err)})
 
             LOG.info(_LI('Import record id %s metadata from driver '
-                         'finished.') % backup_id)
+                         'finished.'), backup_id)
 
     def reset_status(self, context, backup_id, status):
         """Reset volume backup status.
@@ -698,18 +697,16 @@ class BackupManager(manager.SchedulerDependentManager):
                                               {'status': status})
             except exception.InvalidBackup:
                 with excutils.save_and_reraise_exception():
-                    msg = (_("Backup id %(id)s is not invalid. "
-                             "Skipping reset.") % {'id': backup_id})
-                    LOG.error(msg)
+                    LOG.error(_LE("Backup id %s is not invalid. "
+                                  "Skipping reset."), backup_id)
             except exception.BackupVerifyUnsupportedDriver:
                 with excutils.save_and_reraise_exception():
-                    msg = (_('Backup service %(configured_service)s '
-                             'does not support verify. Backup id'
-                             ' %(id)s is not verified. '
-                             'Skipping verify.') %
-                           {'configured_service': self.driver_name,
-                            'id': backup_id})
-                    LOG.error(msg)
+                    LOG.error(_LE('Backup service %(configured_service)s '
+                                  'does not support verify. Backup id '
+                                  '%(id)s is not verified. '
+                                  'Skipping verify.'),
+                              {'configured_service': self.driver_name,
+                               'id': backup_id})
             except AttributeError:
                 msg = (_('Backup service %(service)s does not support '
                          'verify. Backup id %(id)s is not verified. '
@@ -723,5 +720,5 @@ class BackupManager(manager.SchedulerDependentManager):
             # send notification to ceilometer
             notifier_info = {'id': backup_id, 'update': {'status': status}}
             notifier = rpc.get_notifier('backupStatusUpdate')
-            notifier.info(context, "backups" + '.reset_status.end',
+            notifier.info(context, "backups.reset_status.end",
                           notifier_info)
index dc2ebf48be766c0fa8983ce7c2f297636e3ac419..690dd2da7ca61a1d9b29917875792428328dbdf4 100644 (file)
@@ -78,10 +78,10 @@ class BackupAPI(object):
                       backup_url,
                       backup_hosts):
         LOG.debug("import_record rpcapi backup id %(id)s "
-                  "on host %(host)s "
-                  "for backup_url %(url)s." % {'id': backup_id,
-                                               'host': host,
-                                               'url': backup_url})
+                  "on host %(host)s for backup_url %(url)s.",
+                  {'id': backup_id,
+                   'host': host,
+                   'url': backup_url})
         cctxt = self.client.prepare(server=host)
         cctxt.cast(ctxt, 'import_record',
                    backup_id=backup_id,
index baa0e982d7f8b92890c5a788ddd9ff95f6adc477..4ff6711d2bfddd9bd995a97e6683e2f68c9397da 100644 (file)
@@ -64,7 +64,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
     if 'id' not in sort_keys:
         # TODO(justinsb): If this ever gives a false-positive, check
         # the actual primary key, rather than assuming its id
-        LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))
+        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
 
     assert(not (sort_dir and sort_dirs))
 
index 05c585d7d6e3c47f8eac8f2d2bfbdd168d4d5e75..b6dbdc42fcd4b4ceec2839932985845a4cec1052 100644 (file)
@@ -142,9 +142,7 @@ def validate_log_translations(logical_line, filename):
     # of patches are done to address these issues. It should be
     # removed completely when bug 1433216 is closed.
     ignore_dirs = [
-        "cinder/backup",
         "cinder/brick",
-        "cinder/common",
         "cinder/db",
         "cinder/openstack",
         "cinder/scheduler",