msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG backup (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 's_size': volume['size'],
- 'd_consumed': _consumed(over),
- 'd_quota': quotas[over]})
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 's_size': volume['size'],
+ 'd_consumed': _consumed(over),
+ 'd_quota': quotas[over]})
raise exception.VolumeBackupSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('backup_gigabytes'),
"backups (%(d_consumed)d backups "
"already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 'd_consumed': _consumed(over)})
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 'd_consumed': _consumed(over)})
raise exception.BackupLimitExceeded(
allowed=quotas[over])
msg = _('Volume to be restored to must be available')
raise exception.InvalidVolume(reason=msg)
- LOG.debug('Checking backup size %s against volume size %s',
- size, volume['size'])
+ LOG.debug('Checking backup size %(bs)s against volume size %(vs)s',
+ {'bs': size, 'vs': volume['size']})
if size > volume['size']:
msg = (_('volume size %(volume_size)d is too small to restore '
'backup of size %(size)d.') %
comp_size_bytes = len(data)
LOG.debug('compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using '
- '%(algorithm)s' %
+ '%(algorithm)s',
{
'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
writer.write(data)
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
- LOG.debug('backup MD5 for %(object_name)s: %(md5)s' %
+ LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception as err:
with excutils.save_and_reraise_exception():
- LOG.exception(
- _LE("Backup volume metadata failed: %s."),
- err)
+ LOG.exception(_LE("Backup volume metadata failed: %s."),
+ err)
self.delete(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(metadata_object.values()[0]['offset'])
if decompressor is not None:
- LOG.debug('decompressing data using %s algorithm' %
+ LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
- "fileno() so skipping"
+ "fileno() so skipping "
"fsync()"))
else:
os.fsync(fileno)
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
- LOG.debug('starting restore of backup %(object_prefix)s'
- ' container: %(container)s, to volume %(volume_id)s, '
+ LOG.debug('starting restore of backup %(object_prefix)s '
+ 'container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
def delete(self, backup):
"""Delete the given backup."""
container = backup['container']
- LOG.debug('delete started, backup: %s, container: %s, prefix: %s.',
- backup['id'], container, backup['service_metadata'])
+ LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
+ 'prefix: %(pre)s.',
+ {'id': backup['id'],
+ 'cont': container,
+ 'pre': backup['service_metadata']})
if container is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
- LOG.warn(_LW('swift error while listing objects, continuing'
- ' with delete.'))
+ LOG.warning(_LW('swift error while listing objects, continuing'
+ ' with delete.'))
for object_name in object_names:
self.delete_object(container, object_name)
from cinder.db import base
from cinder import exception
-from cinder.i18n import _, _LI, _LE, _LW
+from cinder.i18n import _, _LI, _LW
from cinder import keymgr
service_opts = [
try:
jsonutils.dumps(value)
except TypeError:
- LOG.info(_LI("Value with type=%s is not serializable") %
+ LOG.info(_LI("Value with type=%s is not serializable"),
type(value))
return False
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
- LOG.debug("Getting metadata type '%s'" % type_tag)
+ LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
- "from backup") % (key))
+ "from backup"), key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
LOG.debug("Copying encryption key uuid for backup.")
container[type_tag][key] = value
- LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+ LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
- LOG.debug("No metadata type '%s' available" % type_tag)
+ LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
- LOG.debug("Getting metadata type '%s'" % type_tag)
+ LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
- "from backup") % (entry))
+ "from backup"), entry)
continue
container[type_tag][entry] = meta[entry]
- LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+ LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
- LOG.debug("No metadata type '%s' available" % type_tag)
+ LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
- LOG.debug("Getting metadata type '%s'" % type_tag)
+ LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
- "excluding from backup") % (entry))
+ "excluding from backup"), entry)
continue
container[type_tag][entry.key] = entry.value
- LOG.debug("Completed fetching metadata type '%s'" % type_tag)
+ LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
- LOG.debug("No metadata type '%s' available" % type_tag)
+ LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields):
if field in metadata:
subset[field] = metadata[field]
else:
- LOG.debug("Excluding field '%s'" % (field))
+ LOG.debug("Excluding field '%s'", field)
return subset
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
- msg = _LE("The source volume type '%s' is not "
- "available.") % (src_volume_type_id)
+ msg = _("The source volume type '%s' is not "
+ "available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
- LOG.warn(_LW("Destination volume type is different from "
- "source volume type for an encrypted volume. "
- "Encrypted backup restore has failed."))
- msg = _LE("The source volume type '%(src)s' is different "
- "than the destination volume type '%(dest)s'.") % \
- {'src': src_volume_type_id,
- 'dest': dest_vol['volume_type_id']}
+ LOG.warning(_LW("Destination volume type is different from "
+ "source volume type for an encrypted volume. "
+ "Encrypted backup restore has failed."))
+ msg = (_("The source volume type '%(src)s' is different "
+ "than the destination volume type '%(dest)s'.") %
+ {'src': src_volume_type_id,
+ 'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
- msg = "No metadata of type '%s' to restore" % (type)
- LOG.debug(msg)
+ LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
"""
meta_obj = rados.Object(self._client.ioctx, self.name)
if not self._exists(meta_obj):
- msg = "Metadata backup object %s does not exist" % self.name
- LOG.debug(msg)
+ LOG.debug("Metadata backup object %s does not exist", self.name)
return None
return meta_obj.read()
try:
meta_obj.remove()
except rados.ObjectNotFound:
- msg = ("Metadata backup object '%s' not found - ignoring" %
- self.name)
- LOG.debug(msg)
+ LOG.debug("Metadata backup object '%s' not found - ignoring",
+ self.name)
class CephBackupDriver(driver.BackupDriver):
and pad with zeroes.
"""
if length:
- LOG.debug("Discarding %(length)s bytes from offset %(offset)s" %
+ LOG.debug("Discarding %(length)s bytes from offset %(offset)s",
{'length': length, 'offset': offset})
if self._file_is_rbd(volume):
volume.rbd_image.discard(offset, length)
zeroes = '\0' * length
chunks = int(length / self.chunk_size)
for chunk in xrange(0, chunks):
- LOG.debug("Writing zeroes chunk %d" % chunk)
+ LOG.debug("Writing zeroes chunk %d", chunk)
volume.write(zeroes)
volume.flush()
# yield to any other pending backups
def _transfer_data(self, src, src_name, dest, dest_name, length):
"""Transfer data between files (Python IO objects)."""
- LOG.debug("Transferring data between '%(src)s' and '%(dest)s'" %
+ LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
{'src': src_name, 'dest': dest_name})
chunks = int(length / self.chunk_size)
- LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred" %
+ LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
{'chunks': chunks, 'bytes': self.chunk_size})
for chunk in xrange(0, chunks):
dest.flush()
delta = (time.time() - before)
rate = (self.chunk_size / delta) / 1024
- LOG.debug((_("Transferred chunk %(chunk)s of %(chunks)s "
- "(%(rate)dK/s)") %
- {'chunk': chunk + 1, 'chunks': chunks,
- 'rate': rate}))
+ LOG.debug("Transferred chunk %(chunk)s of %(chunks)s "
+ "(%(rate)dK/s)",
+ {'chunk': chunk + 1,
+ 'chunks': chunks,
+ 'rate': rate})
# yield to any other pending backups
eventlet.sleep(0)
rem = int(length % self.chunk_size)
if rem:
- LOG.debug("Transferring remaining %s bytes" % rem)
+ LOG.debug("Transferring remaining %s bytes", rem)
data = src.read(rem)
if data == '':
if CONF.restore_discard_excess_bytes:
This will be the base image used for storing differential exports.
"""
- LOG.debug("Creating base image '%s'" % name)
+ LOG.debug("Creating base image '%s'", name)
old_format, features = self._get_rbd_support()
self.rbd.RBD().create(ioctx=rados_client.ioctx,
name=name,
snap_name = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
if snap_name:
- LOG.debug("Deleting backup snapshot='%s'" % snap_name)
+ LOG.debug("Deleting backup snapshot='%s'", snap_name)
base_rbd.remove_snap(snap_name)
else:
LOG.debug("No backup snapshot to delete")
base_name = self._get_backup_base_name(volume_id, backup_id)
LOG.debug("Trying diff format basename='%(basename)s' for "
- "backup base image of volume %(volume)s." %
+ "backup base image of volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
with rbd_driver.RADOSClient(self) as client:
snap, rem = self._delete_backup_snapshot(client, base_name,
backup_id)
if rem:
- msg = (_LI("Backup base image of volume %(volume)s still "
- "has %(snapshots)s snapshots so skipping base "
- "image delete.") %
- {'snapshots': rem, 'volume': volume_id})
- LOG.info(msg)
+ LOG.info(
+ _LI("Backup base image of volume %(volume)s still "
+ "has %(snapshots)s snapshots so skipping base "
+ "image delete."),
+ {'snapshots': rem, 'volume': volume_id})
return
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
- "volume %(volume)s.") %
+ "volume %(volume)s."),
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
self.rbd.RBD().remove(client.ioctx, base_name)
- except self.rbd.ImageBusy as exc:
+ except self.rbd.ImageBusy:
# Allow a retry if the image is busy
if retries > 0:
LOG.info(_LI("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
- "in %(delay)ss.") %
+ "in %(delay)ss."),
{'retries': retries,
'delay': delay,
'volume': volume_id})
else:
LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume "
- "%(volume)s.")
- % {'volume': volume_id,
- 'basename': base_name})
- raise exc
+ "%(volume)s."),
+ {'volume': volume_id,
+ 'basename': base_name})
+ raise
else:
LOG.debug("Base backup image='%(basename)s' of volume "
- "%(volume)s deleted." %
+ "%(volume)s deleted.",
{'basename': base_name, 'volume': volume_id})
retries = 0
finally:
src_name = encodeutils.safe_encode(volume_id)
if src_name in self.rbd.RBD().list(client.ioctx):
LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
- "for backup %(basename)s." %
+ "for backup %(basename)s.",
{'snapshot': snap, 'basename': base_name})
src_rbd = self.rbd.Image(client.ioctx, src_name)
try:
def _piped_execute(self, cmd1, cmd2):
"""Pipe output of cmd1 into cmd2."""
- LOG.debug("Piping cmd1='%s' into..." % ' '.join(cmd1))
- LOG.debug("cmd2='%s'" % ' '.join(cmd2))
+ LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
+ LOG.debug("cmd2='%s'", ' '.join(cmd2))
try:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
- LOG.error(_LE("Pipe1 failed - %s ") % unicode(e))
+ LOG.error(_LE("Pipe1 failed - %s "), e)
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
- LOG.error(_LE("Pipe2 failed - %s ") % unicode(e))
+ LOG.error(_LE("Pipe2 failed - %s "), e)
raise
p1.stdout.close()
changed since the snapshot was created.
"""
LOG.debug("Performing differential transfer from '%(src)s' to "
- "'%(dest)s'" %
+ "'%(dest)s'",
{'src': src_name, 'dest': dest_name})
# NOTE(dosaboy): Need to be tolerant of clusters/clients that do
"""Return tuple (exists, name)."""
rbds = self.rbd.RBD().list(client.ioctx)
if name not in rbds:
- msg = "Image '%s' not found - trying diff format name" % name
- LOG.debug(msg)
+ LOG.debug("Image '%s' not found - trying diff format name", name)
if try_diff_format:
name = self._get_backup_base_name(volume_id, diff_format=True)
if name not in rbds:
- msg = "Diff format image '%s' not found" % name
- LOG.debug(msg)
+ LOG.debug("Diff format image '%s' not found", name)
return False, name
else:
return False, name
# Identify our --from-snap point (if one exists)
from_snap = self._get_most_recent_snap(source_rbd_image)
LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
- "volume %(volume)s." %
+ "volume %(volume)s.",
{'snap': from_snap, 'volume': volume_id})
base_name = self._get_backup_base_name(volume_id, diff_format=True)
# ignore it since it is stale and waiting to be cleaned up.
if from_snap:
LOG.debug("Source snapshot '%(snapshot)s' of volume "
- "%(volume)s is stale so deleting." %
+ "%(volume)s is stale so deleting.",
{'snapshot': from_snap, 'volume': volume_id})
source_rbd_image.remove_snap(from_snap)
from_snap = None
# Snapshot source volume so that we have a new point-in-time
new_snap = self._get_new_snap_name(backup_id)
- LOG.debug("Creating backup snapshot='%s'" % new_snap)
+ LOG.debug("Creating backup snapshot='%s'", new_snap)
source_rbd_image.create_snap(new_snap)
# Attempt differential backup. If this fails, perhaps because librbd
src_snap=new_snap,
from_snap=from_snap)
- LOG.debug("Differential backup transfer completed in %.4fs" %
+ LOG.debug("Differential backup transfer completed in %.4fs",
(time.time() - before))
# We don't need the previous snapshot (if there was one) anymore so
# Delete snapshot
LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
- "source volume='%(volume)s'." %
+ "source volume='%(volume)s'.",
{'snapshot': new_snap, 'volume': volume_id})
source_rbd_image.remove_snap(new_snap)
# First create base backup image
old_format, features = self._get_rbd_support()
LOG.debug("Creating backup base image='%(name)s' for volume "
- "%(volume)s."
- % {'name': backup_name, 'volume': volume_id})
+ "%(volume)s.",
+ {'name': backup_name, 'volume': volume_id})
self.rbd.RBD().create(ioctx=client.ioctx,
name=backup_name,
size=length,
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
- LOG.debug("Copying data from volume %s." % volume_id)
+ LOG.debug("Copying data from volume %s.", volume_id)
dest_rbd = self.rbd.Image(client.ioctx, backup_name)
try:
rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd,
"""
snaps = self.get_backup_snaps(rbd_image)
- LOG.debug("Looking for snapshot of backup base '%s'" % name)
+ LOG.debug("Looking for snapshot of backup base '%s'", name)
if not snaps:
- LOG.debug("Backup base '%s' has no snapshots" % name)
+ LOG.debug("Backup base '%s' has no snapshots", name)
return None
snaps = [snap['name'] for snap in snaps
if snap['backup_id'] == backup_id]
if not snaps:
- LOG.debug("Backup '%s' has no snapshot" % backup_id)
+ LOG.debug("Backup '%s' has no snapshot", backup_id)
return None
if len(snaps) > 1:
LOG.error(msg)
raise exception.BackupOperationError(msg)
- LOG.debug("Found snapshot '%s'" % (snaps[0]))
+ LOG.debug("Found snapshot '%s'", snaps[0])
return snaps[0]
def _get_most_recent_snap(self, rbd_image):
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
- LOG.debug("No metadata to backup for volume %s." %
+ LOG.debug("No metadata to backup for volume %s.",
backup['volume_id'])
return
- LOG.debug("Backing up metadata for volume %s." %
+ LOG.debug("Backing up metadata for volume %s.",
backup['volume_id'])
try:
with rbd_driver.RADOSClient(self) as client:
volume_id = volume['id']
volume_name = volume['name']
- LOG.debug("Starting backup of volume='%s'." % volume_id)
+ LOG.debug("Starting backup of volume='%s'.", volume_id)
# Ensure we are at the beginning of the volume
volume_file.seek(0)
self._backup_rbd(backup_id, volume_id, volume_file,
volume_name, length)
except exception.BackupRBDOperationFailed:
- LOG.debug("Forcing full backup of volume %s." % volume_id)
+ LOG.debug("Forcing full backup of volume %s.", volume_id)
do_full_backup = True
else:
do_full_backup = True
# Cleanup.
self.delete(backup)
- LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished."
- % {'backup_id': backup_id, 'volume_id': volume_id})
+ LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
+ {'backup_id': backup_id, 'volume_id': volume_id})
def _full_restore(self, backup_id, volume_id, dest_file, dest_name,
length, src_snap=None):
rbd_conf = restore_file.rbd_conf
LOG.debug("Attempting incremental restore from base='%(base)s' "
- "snap='%(snap)s'" %
+ "snap='%(snap)s'",
{'base': base_name, 'snap': restore_point})
before = time.time()
try:
self._check_restore_vol_size(base_name, restore_name, restore_length,
rbd_pool)
- LOG.debug("Restore transfer completed in %.4fs" %
+ LOG.debug("Restore transfer completed in %.4fs",
(time.time() - before))
def _num_backup_snaps(self, backup_base_name):
rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)
if extents:
- LOG.debug("RBD has %s extents" % sum(extents))
+ LOG.debug("RBD has %s extents", sum(extents))
return True
return False
# made from, force a full restore since a diff will not work in
# this case.
if volume['id'] == backup['volume_id']:
- msg = ("Destination volume is same as backup source volume "
- "%s - forcing full copy." % volume['id'])
- LOG.debug(msg)
+ LOG.debug("Destination volume is same as backup source volume "
+ "%s - forcing full copy.", volume['id'])
return False, restore_point
if restore_point:
else:
LOG.info(_LI("No restore point found for "
"backup='%(backup)s' of "
- "volume %(volume)s - forcing full copy.") %
+ "volume %(volume)s - forcing full copy."),
{'backup': backup['id'],
'volume': backup['volume_id']})
restore_point, length)
do_full_restore = False
except exception.BackupRBDOperationFailed:
- LOG.debug("Forcing full restore to volume %s." %
+ LOG.debug("Forcing full restore to volume %s.",
volume['id'])
if do_full_restore:
if meta is not None:
self.put_metadata(volume_id, meta)
else:
- LOG.debug("Volume %s has no backed up metadata." %
+ LOG.debug("Volume %s has no backed up metadata.",
backup['volume_id'])
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
"""
target_volume = self.db.volume_get(self.context, volume_id)
LOG.debug('Starting restore from Ceph backup=%(src)s to '
- 'volume=%(dest)s' %
+ 'volume=%(dest)s',
{'src': backup['id'], 'dest': target_volume['name']})
try:
self._restore_metadata(backup, volume_id)
- LOG.debug('Restore to volume %s finished successfully.' %
+ LOG.debug('Restore to volume %s finished successfully.',
volume_id)
except exception.BackupOperationError as e:
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
- '%(error)s.') % {'error': e, 'volume': volume_id})
+ '%(error)s.'), {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
"""Delete the given backup from Ceph object store."""
- LOG.debug('Delete started for backup=%s' % backup['id'])
+ LOG.debug('Delete started for backup=%s', backup['id'])
delete_failed = False
try:
self._try_delete_base_image(backup['id'], backup['volume_id'])
except self.rbd.ImageNotFound:
- msg = (_LW("RBD image for backup %(backup)s of volume %(volume)s "
- "not found. Deleting backup metadata.")
- % {'backup': backup['id'], 'volume': backup['volume_id']})
- LOG.warning(msg)
+ LOG.warning(
+ _LW("RBD image for backup %(backup)s of volume %(volume)s "
+ "not found. Deleting backup metadata."),
+ {'backup': backup['id'], 'volume': backup['volume_id']})
delete_failed = True
with rbd_driver.RADOSClient(self) as client:
if delete_failed:
LOG.info(_LI("Delete of backup '%(backup)s' "
"for volume '%(volume)s' "
- "finished with warning.") %
+ "finished with warning."),
{'backup': backup['id'], 'volume': backup['volume_id']})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "
- "'%(volume)s' finished." %
+ "'%(volume)s' finished.",
{'backup': backup['id'], 'volume': backup['volume_id']})
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
- err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
- ' from %(vpath)s to %(bpath)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s.')
- % {'vol_id': volume_id,
- 'vpath': volume_path,
- 'bpath': hardlink_path,
- 'out': exc.stdout,
- 'err': exc.stderr})
- LOG.error(err)
+ LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
+ 'from %(vpath)s to %(bpath)s.\n'
+ 'stdout: %(out)s\n stderr: %(err)s.'),
+ {'vol_id': volume_id,
+ 'vpath': volume_path,
+ 'bpath': hardlink_path,
+ 'out': exc.stdout,
+ 'err': exc.stderr})
class TSMBackupDriver(driver.BackupDriver):
volume_path, backup_mode = _get_volume_realpath(volume_file,
volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
- ' volume path: %(volume_path)s, mode: %(mode)s.'
- % {'volume_id': volume_id,
- 'volume_path': volume_path,
- 'mode': backup_mode})
+ ' volume path: %(volume_path)s, mode: %(mode)s.',
+ {'volume_id': volume_id,
+ 'volume_path': volume_path,
+ 'mode': backup_mode})
backup_path = _create_unique_device_link(backup_id,
volume_path,
finally:
_cleanup_device_hardlink(backup_path, volume_path, volume_id)
- LOG.debug('Backup %s finished.' % backup_id)
+ LOG.debug('Backup %s finished.', backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
- 'mode: %(mode)s.' %
+ 'mode: %(mode)s.',
{'volume_id': volume_id,
'backup_id': backup_id,
'mode': backup_mode})
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
- LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.'
- % {'backup_id': backup_id,
- 'volume_id': volume_id})
+ LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
+ {'backup_id': backup_id,
+ 'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from TSM server.
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
- err = (_LE('delete: %(vol_id)s failed with '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'vol_id': volume_id,
- 'out': out,
- 'err': err})
- LOG.error(err)
+ LOG.error(_LE('delete: %(vol_id)s failed with '
+ 'stdout: %(out)s\n stderr: %(err)s'),
+ {'vol_id': volume_id,
+ 'out': out,
+ 'err': err})
- LOG.debug('Delete %s finished.' % backup['id'])
+ LOG.debug('Delete %s finished.', backup['id'])
def get_backup_driver(context):
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
- LOG.debug("Checking hostname '%s' for backend info." % (host))
+ LOG.debug("Checking hostname '%s' for backend info.", host)
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
- LOG.debug("Got backend '%s'." % (backend))
+ LOG.debug("Got backend '%s'.", backend)
return backend
- LOG.info(_LI("Backend not found in hostname (%s) so using default.") %
- (host))
+ LOG.info(_LI("Backend not found in hostname (%s) so using default."),
+ host)
if 'default' not in self.volume_managers:
# For multi-backend we just pick the top of the list.
return 'default'
def _get_manager(self, backend):
- LOG.debug("Manager requested for volume_backend '%s'." %
- (backend))
+ LOG.debug("Manager requested for volume_backend '%s'.",
+ backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
return self.volume_managers[backend]
def _get_driver(self, backend=None):
- LOG.debug("Driver requested for volume_backend '%s'." %
- (backend))
+ LOG.debug("Driver requested for volume_backend '%s'.",
+ backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug("Registering backend %(backend)s (host=%(host)s "
- "backend_name=%(backend_name)s)." %
+ "backend_name=%(backend_name)s).",
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
- LOG.debug("Registering default backend %s." % (default))
+ LOG.debug("Registering default backend %s.", default)
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
- LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s).") %
+ LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
- except Exception as ex:
- LOG.error(_LE("Error encountered during initialization of driver: "
- "%(name)s.") %
- {'name': driver.__class__.__name__})
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE("Error encountered during initialization of "
+ "driver: %(name)s."),
+ {'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
backups = self.db.backup_get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
- LOG.info(_LI('Resetting backup %s to error (was creating).')
- % backup['id'])
+ LOG.info(_LI('Resetting backup %s to error (was creating).'),
+ backup['id'])
err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error',
'fail_reason': err})
if backup['status'] == 'restoring':
LOG.info(_LI('Resetting backup %s to '
- ' available (was restoring).')
- % backup['id'])
+ 'available (was restoring).'),
+ backup['id'])
self.db.backup_update(ctxt, backup['id'],
{'status': 'available'})
if backup['status'] == 'deleting':
- LOG.info(_LI('Resuming delete on backup: %s.') % backup['id'])
+ LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
self.delete_backup(ctxt, backup['id'])
def create_backup(self, context, backup_id):
volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
- 'volume: %(volume_id)s.') %
+ 'volume: %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
- 'volume: %(volume_id)s.') %
+ 'volume: %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
backup = self.db.backup_update(context, backup_id,
{'status': 'available'})
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
- ' to volume %(volume_id)s.') %
+ ' to volume %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup_id)
else:
- LOG.warn(_LW('Backup service %(service)s does not support '
- 'verify. Backup id %(id)s is not verified. '
- 'Skipping verify.') % {'service':
- self.driver_name,
- 'id': backup_id})
+ LOG.warning(_LW('Backup service %(service)s does not '
+ 'support verify. Backup id %(id)s is '
+ 'not verified. Skipping verify.'),
+ {'service': self.driver_name,
+ 'id': backup_id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id,
unicode(err)})
LOG.info(_LI('Import record id %s metadata from driver '
- 'finished.') % backup_id)
+ 'finished.'), backup_id)
def reset_status(self, context, backup_id, status):
"""Reset volume backup status.
{'status': status})
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
- msg = (_("Backup id %(id)s is not invalid. "
- "Skipping reset.") % {'id': backup_id})
- LOG.error(msg)
+ LOG.error(_LE("Backup id %s is not invalid. "
+ "Skipping reset."), backup_id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
- msg = (_('Backup service %(configured_service)s '
- 'does not support verify. Backup id'
- ' %(id)s is not verified. '
- 'Skipping verify.') %
- {'configured_service': self.driver_name,
- 'id': backup_id})
- LOG.error(msg)
+ LOG.error(_LE('Backup service %(configured_service)s '
+ 'does not support verify. Backup id '
+ '%(id)s is not verified. '
+ 'Skipping verify.'),
+ {'configured_service': self.driver_name,
+ 'id': backup_id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
# send notification to ceilometer
notifier_info = {'id': backup_id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
- notifier.info(context, "backups" + '.reset_status.end',
+ notifier.info(context, "backups.reset_status.end",
notifier_info)
backup_url,
backup_hosts):
LOG.debug("import_record rpcapi backup id %(id)s "
- "on host %(host)s "
- "for backup_url %(url)s." % {'id': backup_id,
- 'host': host,
- 'url': backup_url})
+ "on host %(host)s for backup_url %(url)s.",
+ {'id': backup_id,
+ 'host': host,
+ 'url': backup_url})
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'import_record',
backup_id=backup_id,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
- LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))
+ LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
ignore_dirs = [
- "cinder/backup",
"cinder/brick",
- "cinder/common",
"cinder/db",
"cinder/openstack",
"cinder/scheduler",