From ab937afadff3e368125e39ca1aa96ae135663144 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 6 Feb 2014 11:06:02 +0000 Subject: [PATCH] Ceph backup driver tidyup Fixed typos and cleaned up log messages and docstrings. Closes-Bug: 1276969 Change-Id: Idb1ba4f3315edb76fefc993b38b0590b4794345b --- cinder/backup/drivers/ceph.py | 198 +++++++++++++++++----------------- etc/cinder/cinder.conf.sample | 23 ++-- 2 files changed, 114 insertions(+), 107 deletions(-) diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py index 94d5a401d..9312e7dc6 100644 --- a/cinder/backup/drivers/ceph.py +++ b/cinder/backup/drivers/ceph.py @@ -15,12 +15,12 @@ """Ceph Backup Service Implementation. -This driver supports backing up volumes of any type to a Ceph backend store. It +This driver supports backing up volumes of any type to a Ceph object store. It is also capable of detecting whether the volume to be backed up is a Ceph RBD -volume and if so, attempts to perform incremental/differential backups. +volume and, if so, attempts to perform incremental/differential backups. -Support is also included for the following in the case of source volume being a -Ceph RBD volume: +Support is also included for the following in the case of a source volume being +a Ceph RBD volume: * backing up within the same Ceph pool (not recommended) * backing up between different Ceph pools @@ -28,10 +28,11 @@ Ceph RBD volume: At the time of writing, differential backup support in Ceph/librbd was quite new so this driver accounts for this by first attempting differential backup -and falling back to full backup/copy if the former fails. +and falling back to full backup/copy if the former fails. It is recommended +that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results. If incremental backups are used, multiple backups of the same volume are stored -as snapshots so that minimal space is consumed in the backup store and +as snapshots so that minimal space is consumed in the object store and restoring the volume takes a far reduced amount of time compared to a full copy. @@ -41,18 +42,19 @@ was deemed the safest action to take. It is therefore recommended to always restore to a new volume (default). """ +import eventlet import fcntl import os import re import subprocess import time -import eventlet from oslo.config import cfg from cinder.backup.driver import BackupDriver from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils from cinder import units from cinder import utils import cinder.volume.drivers.rbd as rbd_driver @@ -68,21 +70,23 @@ LOG = logging.getLogger(__name__) service_opts = [ cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf', - help='Ceph config file to use.'), + help='Ceph configuration file to use.'), cfg.StrOpt('backup_ceph_user', default='cinder', - help='the Ceph user to connect with'), + help='The Ceph user to connect with. Default here is to use ' + 'the same user as for Cinder volumes. If not using cephx ' + 'this should be set to None.'), cfg.IntOpt('backup_ceph_chunk_size', default=(units.MiB * 128), - help='the chunk size in bytes that a backup will be broken ' - 'into before transfer to backup store'), + help='The chunk size, in bytes, that a backup is broken into ' + 'before transfer to the Ceph object store.'), cfg.StrOpt('backup_ceph_pool', default='backups', - help='the Ceph pool to backup to'), + help='The Ceph pool where volume backups are stored.'), cfg.IntOpt('backup_ceph_stripe_unit', default=0, - help='RBD stripe unit to use when creating a backup image'), + help='RBD stripe unit to use when creating a backup image.'), cfg.IntOpt('backup_ceph_stripe_count', default=0, - help='RBD stripe count to use when creating a backup image'), + help='RBD stripe count to use when creating a backup image.'), cfg.BoolOpt('restore_discard_excess_bytes', default=True, help='If True, always discard excess bytes when restoring ' - 'volumes.') + 'volumes i.e. pad with zeroes.') ] CONF = cfg.CONF @@ -90,11 +94,11 @@ CONF.register_opts(service_opts) class CephBackupDriver(BackupDriver): - """Backup up Cinder volumes to Ceph Object Store. + """Backup Cinder volumes to Ceph Object Store. This class enables backing up Cinder volumes to a Ceph object store. Backups may be stored in their own pool or even cluster. Store location is - defined by the Ceph conf file and Service config options supplied. + defined by the Ceph conf file and service config options supplied. If the source volume is itself an RBD volume, the backup will be performed using incremental differential backups which *should* give a performance @@ -113,22 +117,14 @@ class CephBackupDriver(BackupDriver): self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit self.rbd_stripe_count = CONF.backup_ceph_stripe_count else: - LOG.info(_("rbd striping not supported - ignoring configuration " + LOG.info(_("RBD striping not supported - ignoring configuration " "settings for rbd striping")) self.rbd_stripe_count = 0 self.rbd_stripe_unit = 0 - self._ceph_backup_user = self._utf8(CONF.backup_ceph_user) - self._ceph_backup_pool = self._utf8(CONF.backup_ceph_pool) - self._ceph_backup_conf = self._utf8(CONF.backup_ceph_conf) - - @staticmethod - def _utf8(s): - """Ensure string s is utf8 (i.e. not unicode).""" - if isinstance(s, str): - return s - - return s.encode('utf8') + self._ceph_backup_user = strutils.safe_encode(CONF.backup_ceph_user) + self._ceph_backup_pool = strutils.safe_encode(CONF.backup_ceph_pool) + self._ceph_backup_conf = strutils.safe_encode(CONF.backup_ceph_conf) def _validate_string_args(self, *args): """Ensure all args are non-None and non-empty.""" @@ -184,7 +180,7 @@ class CephBackupDriver(BackupDriver): conffile=self._ceph_backup_conf) try: client.connect() - pool_to_open = self._utf8(pool or self._ceph_backup_pool) + pool_to_open = strutils.safe_encode(pool or self._ceph_backup_pool) ioctx = client.open_ioctx(pool_to_open) return client, ioctx except self.rados.Error: @@ -207,12 +203,13 @@ class CephBackupDriver(BackupDriver): """ # Ensure no unicode if diff_format: - return self._utf8("volume-%s.backup.base" % (volume_id)) + return strutils.safe_encode("volume-%s.backup.base" % (volume_id)) else: if backup_id is None: - msg = _("backup_id required") + msg = _("Backup id required") raise exception.InvalidParameterValue(msg) - return self._utf8("volume-%s.backup.%s" % (volume_id, backup_id)) + return strutils.safe_encode("volume-%s.backup.%s" % + (volume_id, backup_id)) def _discard_bytes(self, volume, offset, length): """Trim length bytes from offset. @@ -221,7 +218,7 @@ class CephBackupDriver(BackupDriver): and pad with zeroes. """ if length: - LOG.debug(_("discarding %(length)s bytes from offset %(offset)s") % + LOG.debug(_("Discarding %(length)s bytes from offset %(offset)s") % {'length': length, 'offset': offset}) if self._file_is_rbd(volume): volume.rbd_image.discard(offset, length) @@ -229,7 +226,7 @@ class CephBackupDriver(BackupDriver): zeroes = '\0' * length chunks = int(length / self.chunk_size) for chunk in xrange(0, chunks): - LOG.debug(_("writing zeroes chunk %d") % (chunk)) + LOG.debug(_("Writing zeroes chunk %d") % (chunk)) volume.write(zeroes) volume.flush() # yield to any other pending backups @@ -243,7 +240,7 @@ class CephBackupDriver(BackupDriver): def _transfer_data(self, src, src_name, dest, dest_name, length): """Transfer data between files (Python IO objects).""" - LOG.debug(_("transferring data between '%(src)s' and '%(dest)s'") % + LOG.debug(_("Transferring data between '%(src)s' and '%(dest)s'") % {'src': src_name, 'dest': dest_name}) chunks = int(length / self.chunk_size) @@ -266,7 +263,7 @@ class CephBackupDriver(BackupDriver): dest.flush() delta = (time.time() - before) rate = (self.chunk_size / delta) / 1024 - LOG.debug((_("transferred chunk %(chunk)s of %(chunks)s " + LOG.debug((_("Transferred chunk %(chunk)s of %(chunks)s " "(%(rate)dK/s)") % {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})) @@ -276,7 +273,7 @@ class CephBackupDriver(BackupDriver): rem = int(length % self.chunk_size) if rem: - LOG.debug(_("transferring remaining %s bytes") % (rem)) + LOG.debug(_("Transferring remaining %s bytes") % (rem)) data = src.read(rem) if data == '': if CONF.restore_discard_excess_bytes: @@ -292,7 +289,7 @@ class CephBackupDriver(BackupDriver): This will be the base image used for storing differential exports. """ - LOG.debug(_("creating base image '%s'") % (name)) + LOG.debug(_("Creating base image '%s'") % (name)) old_format, features = self._get_rbd_support() self.rbd.RBD().create(ioctx=rados_client.ioctx, name=name, @@ -319,10 +316,10 @@ class CephBackupDriver(BackupDriver): snap_name = self._get_backup_snap_name(base_rbd, base_name, backup_id) if snap_name: - LOG.debug(_("deleting backup snapshot='%s'") % (snap_name)) + LOG.debug(_("Deleting backup snapshot='%s'") % (snap_name)) base_rbd.remove_snap(snap_name) else: - LOG.debug(_("no backup snapshot to delete")) + LOG.debug(_("No backup snapshot to delete")) # Now check whether any snapshots remain on the base image backup_snaps = self.get_backup_snaps(base_rbd) @@ -358,7 +355,7 @@ class CephBackupDriver(BackupDriver): try_diff_format = True base_name = self._get_backup_base_name(volume_id, backup_id) - LOG.debug(_("trying diff format name format basename='%s'") % + LOG.debug(_("Trying diff format name format basename='%s'") % (base_name)) with rbd_driver.RADOSClient(self) as client: @@ -374,27 +371,27 @@ class CephBackupDriver(BackupDriver): snap, rem = self._delete_backup_snapshot(client, base_name, backup_id) if rem: - msg = (_("base image still has %s snapshots so skipping " + msg = (_("Base image still has %s snapshots so skipping " "base image delete") % (rem)) LOG.info(msg) return - LOG.info(_("deleting base image='%s'") % (base_name)) + LOG.info(_("Deleting base image='%s'") % (base_name)) # Delete base if no more snapshots try: self.rbd.RBD().remove(client.ioctx, base_name) except self.rbd.ImageBusy as exc: # Allow a retry if the image is busy if retries > 0: - LOG.info((_("image busy, retrying %(retries)s " + LOG.info((_("Image busy, retrying %(retries)s " "more time(s) in %(delay)ss") % {'retries': retries, 'delay': delay})) eventlet.sleep(delay) else: - LOG.error(_("max retries reached - raising error")) + LOG.error(_("Max retries reached - raising error")) raise exc else: - LOG.debug(_("base backup image='%s' deleted)") % + LOG.debug(_("Base backup image='%s' deleted)") % (base_name)) retries = 0 finally: @@ -402,9 +399,9 @@ class CephBackupDriver(BackupDriver): # Since we have deleted the base image we can delete the source # volume backup snapshot. - src_name = self._utf8(volume_id) + src_name = strutils.safe_encode(volume_id) if src_name in self.rbd.RBD().list(client.ioctx): - LOG.debug(_("deleting source snap '%s'") % snap) + LOG.debug(_("Deleting source snapshot '%s'") % snap) src_rbd = self.rbd.Image(client.ioctx, src_name) try: src_rbd.remove_snap(snap) @@ -413,14 +410,14 @@ class CephBackupDriver(BackupDriver): def _piped_execute(self, cmd1, cmd2): """Pipe output of cmd1 into cmd2.""" - LOG.debug("piping cmd1='%s' into..." % (' '.join(cmd1))) + LOG.debug("Piping cmd1='%s' into..." % (' '.join(cmd1))) LOG.debug("cmd2='%s'" % (' '.join(cmd2))) try: p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: - LOG.error("pipe1 failed - %s " % unicode(e)) + LOG.error("Pipe1 failed - %s " % unicode(e)) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work @@ -434,7 +431,7 @@ class CephBackupDriver(BackupDriver): stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: - LOG.error("pipe2 failed - %s " % unicode(e)) + LOG.error("Pipe2 failed - %s " % unicode(e)) raise p1.stdout.close() @@ -450,7 +447,7 @@ class CephBackupDriver(BackupDriver): since the rbd volume/base was created, otherwise it will be those changed since the snapshot was created. """ - LOG.debug(_("performing differential transfer from '%(src)s' to " + LOG.debug(_("Performing differential transfer from '%(src)s' to " "'%(dest)s'") % {'src': src_name, 'dest': dest_name}) @@ -465,17 +462,20 @@ class CephBackupDriver(BackupDriver): if from_snap is not None: cmd1.extend(['--from-snap', from_snap]) if src_snap: - path = self._utf8("%s/%s@%s" % (src_pool, src_name, src_snap)) + path = strutils.safe_encode("%s/%s@%s" % + (src_pool, src_name, src_snap)) else: - path = self._utf8("%s/%s" % (src_pool, src_name)) + path = strutils.safe_encode("%s/%s" % (src_pool, src_name)) + cmd1.extend([path, '-']) cmd2 = ['rbd', 'import-diff'] + dest_ceph_args - cmd2.extend(['-', self._utf8("%s/%s" % (dest_pool, dest_name))]) + rbd_path = strutils.safe_encode("%s/%s" % (dest_pool, dest_name)) + cmd2.extend(['-', rbd_path]) ret, stderr = self._piped_execute(cmd1, cmd2) if ret: - msg = (_("rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % + msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % ({'ret': ret, 'stderr': stderr})) LOG.info(msg) raise exception.BackupRBDOperationFailed(msg) @@ -485,12 +485,12 @@ class CephBackupDriver(BackupDriver): """Return tuple (exists, name).""" rbds = self.rbd.RBD().list(client.ioctx) if name not in rbds: - msg = _("image '%s' not found - trying diff format name") % (name) + msg = _("Image '%s' not found - trying diff format name") % (name) LOG.debug(msg) if try_diff_format: name = self._get_backup_base_name(volume_id, diff_format=True) if name not in rbds: - msg = _("diff format image '%s' not found") % (name) + msg = _("Diff format image '%s' not found") % (name) LOG.debug(msg) return False, name else: @@ -525,7 +525,7 @@ class CephBackupDriver(BackupDriver): # Identify our --from-snap point (if one exists) from_snap = self._get_most_recent_snap(source_rbd_image) - LOG.debug(_("using --from-snap '%s'") % from_snap) + LOG.debug(_("Using --from-snap '%s'") % from_snap) base_name = self._get_backup_base_name(volume_id, diff_format=True) image_created = False @@ -540,7 +540,7 @@ class CephBackupDriver(BackupDriver): # If a from_snap is defined but the base does not exist, we # ignore it since it is stale and waiting to be cleaned up. if from_snap: - LOG.debug(_("source snap '%s' is stale so deleting") % + LOG.debug(_("Source snapshot '%s' is stale so deleting") % (from_snap)) source_rbd_image.remove_snap(from_snap) from_snap = None @@ -552,7 +552,7 @@ class CephBackupDriver(BackupDriver): # If a from_snap is defined but does not exist in the back base # then we cannot proceed (see above) if not self._snap_exists(base_name, from_snap, client): - errmsg = (_("snap='%(snap)s' does not exist in base " + errmsg = (_("Snapshot='%(snap)s' does not exist in base " "image='%(base)s' - aborting incremental " "backup") % {'snap': from_snap, 'base': base_name}) @@ -563,7 +563,7 @@ class CephBackupDriver(BackupDriver): # Snapshot source volume so that we have a new point-in-time new_snap = self._get_new_snap_name(backup_id) - LOG.debug(_("creating backup snapshot='%s'") % (new_snap)) + LOG.debug(_("Creating backup snapshot='%s'") % (new_snap)) source_rbd_image.create_snap(new_snap) # Attempt differential backup. If this fails, perhaps because librbd @@ -583,7 +583,7 @@ class CephBackupDriver(BackupDriver): src_snap=new_snap, from_snap=from_snap) - LOG.debug(_("differential backup transfer completed in %.4fs") % + LOG.debug(_("Differential backup transfer completed in %.4fs") % (time.time() - before)) # We don't need the previous snapshot (if there was one) anymore so @@ -592,7 +592,7 @@ class CephBackupDriver(BackupDriver): source_rbd_image.remove_snap(from_snap) except exception.BackupRBDOperationFailed: - LOG.debug(_("differential backup transfer failed")) + LOG.debug(_("Differential backup transfer failed")) # Clean up if image was created as part of this operation if image_created: @@ -600,7 +600,7 @@ class CephBackupDriver(BackupDriver): base_name=base_name) # Delete snapshot - LOG.debug(_("deleting backup snapshot='%s'") % (new_snap)) + LOG.debug(_("Deleting backup snapshot='%s'") % (new_snap)) source_rbd_image.remove_snap(new_snap) # Re-raise the exception so that caller can try another approach @@ -622,7 +622,7 @@ class CephBackupDriver(BackupDriver): with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: # First create base backup image old_format, features = self._get_rbd_support() - LOG.debug(_("creating base image='%s'") % (backup_name)) + LOG.debug(_("Creating base image='%s'") % (backup_name)) self.rbd.RBD().create(ioctx=client.ioctx, name=backup_name, size=length, @@ -631,7 +631,7 @@ class CephBackupDriver(BackupDriver): stripe_unit=self.rbd_stripe_unit, stripe_count=self.rbd_stripe_count) - LOG.debug(_("copying data")) + LOG.debug(_("Copying data")) dest_rbd = self.rbd.Image(client.ioctx, backup_name) try: rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd, @@ -678,7 +678,8 @@ class CephBackupDriver(BackupDriver): return backup_snaps def _get_new_snap_name(self, backup_id): - return self._utf8("backup.%s.snap.%s" % (backup_id, time.time())) + return strutils.safe_encode("backup.%s.snap.%s" % + (backup_id, time.time())) def _get_backup_snap_name(self, rbd_image, name, backup_id): """Return the name of the snapshot associated with backup_id. @@ -691,26 +692,26 @@ class CephBackupDriver(BackupDriver): """ snaps = self.get_backup_snaps(rbd_image) - LOG.debug(_("looking for snapshot of backup base '%s'") % (name)) + LOG.debug(_("Looking for snapshot of backup base '%s'") % (name)) if not snaps: - LOG.debug(_("backup base '%s' has no snapshots") % (name)) + LOG.debug(_("Backup base '%s' has no snapshots") % (name)) return None snaps = [snap['name'] for snap in snaps if snap['backup_id'] == backup_id] if not snaps: - LOG.debug(_("backup '%s' has no snapshot") % (backup_id)) + LOG.debug(_("Backup '%s' has no snapshot") % (backup_id)) return None if len(snaps) > 1: - msg = (_("backup should only have one snapshot but instead has %s") + msg = (_("Backup should only have one snapshot but instead has %s") % (len(snaps))) LOG.error(msg) raise exception.BackupOperationError(msg) - LOG.debug(_("found snapshot '%s'") % (snaps[0])) + LOG.debug(_("Found snapshot '%s'") % (snaps[0])) return snaps[0] def _get_most_recent_snap(self, rbd_image): @@ -731,7 +732,7 @@ class CephBackupDriver(BackupDriver): Raises exception.InvalidParameterValue if volume size is 0. """ if int(volume['size']) == 0: - errmsg = _("need non-zero volume size") + errmsg = _("Need non-zero volume size") raise exception.InvalidParameterValue(errmsg) return int(volume['size']) * units.GiB @@ -761,7 +762,7 @@ class CephBackupDriver(BackupDriver): self._backup_rbd(backup_id, volume_id, volume_file, volume_name, length) except exception.BackupRBDOperationFailed: - LOG.debug(_("forcing full backup")) + LOG.debug(_("Forcing full backup")) do_full_backup = True else: do_full_backup = True @@ -773,7 +774,7 @@ class CephBackupDriver(BackupDriver): self.db.backup_update(self.context, backup_id, {'container': self._ceph_backup_pool}) - LOG.debug(_("backup '%s' finished.") % (backup_id)) + LOG.debug(_("Backup '%s' finished.") % (backup_id)) def _full_restore(self, backup_id, volume_id, dest_file, dest_name, length, src_snap=None): @@ -818,7 +819,8 @@ class CephBackupDriver(BackupDriver): """ with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: adjust_size = 0 - base_image = self.rbd.Image(client.ioctx, self._utf8(backup_base), + base_image = self.rbd.Image(client.ioctx, + strutils.safe_encode(backup_base), read_only=True) try: if restore_length != base_image.size(): @@ -829,9 +831,9 @@ class CephBackupDriver(BackupDriver): if adjust_size: with rbd_driver.RADOSClient(self, src_pool) as client: dest_image = self.rbd.Image(client.ioctx, - self._utf8(restore_vol)) + strutils.safe_encode(restore_vol)) try: - LOG.debug(_("adjusting restore vol size")) + LOG.debug(_("Adjusting restore vol size")) dest_image.resize(adjust_size) finally: dest_image.close() @@ -843,7 +845,7 @@ class CephBackupDriver(BackupDriver): rbd_pool = restore_file.rbd_pool rbd_conf = restore_file.rbd_conf - LOG.debug(_("trying incremental restore from base='%(base)s' " + LOG.debug(_("Attempting incremental restore from base='%(base)s' " "snap='%(snap)s'") % {'base': base_name, 'snap': restore_point}) before = time.time() @@ -855,7 +857,7 @@ class CephBackupDriver(BackupDriver): dest_user=rbd_user, dest_conf=rbd_conf, src_snap=restore_point) except exception.BackupRBDOperationFailed: - LOG.exception(_("differential restore failed, trying full " + LOG.exception(_("Differential restore failed, trying full " "restore")) raise @@ -866,7 +868,7 @@ class CephBackupDriver(BackupDriver): self._check_restore_vol_size(base_name, restore_name, restore_length, rbd_pool) - LOG.debug(_("restore transfer completed in %.4fs") % + LOG.debug(_("Restore transfer completed in %.4fs") % (time.time() - before)) def _num_backup_snaps(self, backup_base_name): @@ -887,7 +889,9 @@ class CephBackupDriver(BackupDriver): def _get_restore_point(self, base_name, backup_id): """Get restore point snapshot name for incremental backup. - If the backup was not incremental None is returned. + If the backup was not incremental (determined by the fact that the + base has no snapshots/restore points), None is returned. Otherwise, the + restore point associated with backup_id is returned. """ with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) @@ -913,7 +917,7 @@ class CephBackupDriver(BackupDriver): rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb) if extents: - LOG.debug(_("rbd has %s extents") % (sum(extents))) + LOG.debug(_("RBD has %s extents") % (sum(extents))) return True return False @@ -956,12 +960,12 @@ class CephBackupDriver(BackupDriver): if self._rbd_has_extents(volume_file.rbd_image): # We return the restore point so that a full copy is done # from snapshot. - LOG.debug(_("destination has extents - forcing full copy")) + LOG.debug(_("Destination has extents - forcing full copy")) return False, restore_point return True, restore_point else: - LOG.info(_("no restore point found for backup='%s', forcing " + LOG.info(_("No restore point found for backup='%s', forcing " "full copy") % (backup['id'])) return not_allowed @@ -992,7 +996,7 @@ class CephBackupDriver(BackupDriver): restore_point, length) do_full_restore = False except exception.BackupRBDOperationFailed: - LOG.debug(_("forcing full restore")) + LOG.debug(_("Forcing full restore")) if do_full_restore: # Otherwise full copy @@ -1002,7 +1006,7 @@ class CephBackupDriver(BackupDriver): def restore(self, backup, volume_id, volume_file): """Restore the given volume backup from Ceph object store.""" target_volume = self.db.volume_get(self.context, volume_id) - LOG.debug(_('starting restore from Ceph backup=%(src)s to ' + LOG.debug(_('Starting restore from Ceph backup=%(src)s to ' 'volume=%(dest)s') % {'src': backup['id'], 'dest': target_volume['name']}) @@ -1013,30 +1017,30 @@ class CephBackupDriver(BackupDriver): try: fileno = volume_file.fileno() except IOError: - LOG.info(_("volume_file does not support fileno() so skipping " - "fsync()")) + LOG.debug(_("Volume_file does not support fileno() so " + "skipping fsync()")) else: os.fsync(fileno) - LOG.debug(_('restore finished successfully.')) + LOG.debug(_('Restore finished successfully.')) except exception.BackupOperationError as e: - LOG.error(_('restore finished with error - %s') % (e)) + LOG.error(_('Restore finished with error - %s') % (e)) raise def delete(self, backup): """Delete the given backup from Ceph object store.""" backup_id = backup['id'] - LOG.debug(_('delete started for backup=%s') % backup['id']) + LOG.debug(_('Delete started for backup=%s') % backup['id']) try: self._try_delete_base_image(backup['id'], backup['volume_id']) except self.rbd.ImageNotFound: - msg = _("rbd image not found but continuing anyway so " + msg = _("RBD image not found but continuing anyway so " "that db entry can be removed") LOG.warning(msg) - LOG.info(_("delete '%s' finished with warning") % (backup_id)) + LOG.info(_("Delete '%s' finished with warning") % (backup_id)) else: - LOG.debug(_("delete '%s' finished") % (backup_id)) + LOG.debug(_("Delete '%s' finished") % (backup_id)) def get_backup_driver(context): diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 069708ac2..15e31fbef 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -145,29 +145,32 @@ # Options defined in cinder.backup.drivers.ceph # -# Ceph config file to use. (string value) +# Ceph configuration file to use. (string value) #backup_ceph_conf=/etc/ceph/ceph.conf -# the Ceph user to connect with (string value) +# The Ceph user to connect with. Default here is to use the +# same user as for Cinder volumes. If not using cephx this +# should be set to None. (string value) #backup_ceph_user=cinder -# the chunk size in bytes that a backup will be broken into -# before transfer to backup store (integer value) +# The chunk size, in bytes, that a backup is broken into +# before transfer to the Ceph object store. (integer value) #backup_ceph_chunk_size=134217728 -# the Ceph pool to backup to (string value) +# The Ceph pool where volume backups are stored. (string +# value) #backup_ceph_pool=backups -# RBD stripe unit to use when creating a backup image (integer -# value) +# RBD stripe unit to use when creating a backup image. +# (integer value) #backup_ceph_stripe_unit=0 -# RBD stripe count to use when creating a backup image +# RBD stripe count to use when creating a backup image. # (integer value) #backup_ceph_stripe_count=0 -# If True, always discard excess bytes when restoring volumes. -# (boolean value) +# If True, always discard excess bytes when restoring volumes +# i.e. pad with zeroes. (boolean value) #restore_discard_excess_bytes=true -- 2.45.2