cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
- 'service doing the restore supports the new version.')
+ 'service doing the restore supports the new version.'),
+ cfg.IntOpt('backup_object_number_per_notification',
+ default=10,
+ help='The number of chunks or objects, for which one '
+ 'Ceilometer notification will be sent'),
+ cfg.IntOpt('backup_timer_interval',
+ default=120,
+ help='Interval, in seconds, between two progress notifications '
+ 'reporting the backup status'),
]
CONF = cfg.CONF
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.volume import utils as volume_utils
+
LOG = logging.getLogger(__name__)
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
help='Compression algorithm (None to disable)'),
+ cfg.BoolOpt('backup_swift_enable_progress_timer',
+ default=True,
+ help='Enable or Disable the timer to send the periodic '
+ 'progress notifications to Ceilometer when backing '
+ 'up the volume to the Swift backend storage. The '
+ 'default value is True to enable the timer.'),
]
CONF = cfg.CONF
LOG.debug("Using swift URL %s", self.swift_url)
self.az = CONF.storage_availability_zone
self.data_block_size_bytes = CONF.backup_swift_object_size
+ self.backup_timer_interval = CONF.backup_timer_interval
+ self.data_block_num = CONF.backup_object_number_per_notification
+ self.enable_progress_timer = CONF.backup_swift_enable_progress_timer
self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff
self.compressor = \
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
- return object_meta, container
+ return object_meta, container, volume_size_bytes
def _backup_chunk(self, backup, container, data, data_offset, object_meta):
"""Backup data chunk based on the object metadata and offset."""
object_meta["volume_meta"] = json_meta
+ def _send_progress_end(self, context, backup, object_meta):
+ object_meta['backup_percent'] = 100
+ volume_utils.notify_about_backup_usage(context,
+ backup,
+ "createprogress",
+ extra_usage_info=
+ object_meta)
+
+ def _send_progress_notification(self, context, backup, object_meta,
+ total_block_sent_num, total_volume_size):
+ backup_percent = total_block_sent_num * 100 / total_volume_size
+ object_meta['backup_percent'] = backup_percent
+ volume_utils.notify_about_backup_usage(context,
+ backup,
+ "createprogress",
+ extra_usage_info=
+ object_meta)
+
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume to Swift."""
+ (object_meta, container,
+ volume_size_bytes) = self._prepare_backup(backup)
+ counter = 0
+ total_block_sent_num = 0
+
+ # There are two mechanisms to send the progress notification.
+ # 1. The notifications are periodically sent in a certain interval.
+ # 2. The notifications are sent after a certain number of chunks.
+ # Both of them are working simultaneously during the volume backup,
+ # when swift is taken as the backup backend.
+ def _notify_progress():
+ self._send_progress_notification(self.context, backup,
+ object_meta,
+ total_block_sent_num,
+ volume_size_bytes)
+ timer = loopingcall.FixedIntervalLoopingCall(
+ _notify_progress)
+ if self.enable_progress_timer:
+ timer.start(interval=self.backup_timer_interval)
- object_meta, container = self._prepare_backup(backup)
while True:
data = volume_file.read(self.data_block_size_bytes)
data_offset = volume_file.tell()
break
self._backup_chunk(backup, container, data,
data_offset, object_meta)
+ total_block_sent_num += self.data_block_num
+ counter += 1
+ if counter == self.data_block_num:
+ # Send the notification to Ceilometer when the chunk
+ # number reaches the data_block_num. The backup percentage
+ # is put in the metadata as the extra information.
+ self._send_progress_notification(self.context, backup,
+ object_meta,
+ total_block_sent_num,
+ volume_size_bytes)
+ # reset the counter
+ counter = 0
+
+ # Stop the timer.
+ timer.stop()
+ # All the data have been sent, the backup_percent reaches 100.
+ self._send_progress_end(self.context, backup, object_meta)
if backup_metadata:
try:
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
+
+ self._notify_about_backup_usage(context, backup, "create.start")
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
'fail_reason': unicode(err)})
self.db.volume_update(context, volume_id, {'status': 'available'})
- self.db.backup_update(context, backup_id, {'status': 'available',
- 'size': volume['size'],
- 'availability_zone':
- self.az})
+ backup = self.db.backup_update(context, backup_id,
+ {'status': 'available',
+ 'size': volume['size'],
+ 'availability_zone': self.az})
LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
+ self._notify_about_backup_usage(context, backup, "create.end")
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
volume = self.db.volume_get(context, volume_id)
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
+ self._notify_about_backup_usage(context, backup, "restore.start")
self.db.backup_update(context, backup_id, {'host': self.host})
{'status': 'available'})
self.db.volume_update(context, volume_id, {'status': 'available'})
- self.db.backup_update(context, backup_id, {'status': 'available'})
+ backup = self.db.backup_update(context, backup_id,
+ {'status': 'available'})
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
+ self._notify_about_backup_usage(context, backup, "restore.end")
def delete_backup(self, context, backup_id):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
+ self._notify_about_backup_usage(context, backup, "delete.start")
self.db.backup_update(context, backup_id, {'host': self.host})
expected_status = 'deleting'
project_id=backup['project_id'])
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
+ self._notify_about_backup_usage(context, backup, "delete.end")
+
+ def _notify_about_backup_usage(self,
+ context,
+ backup,
+ event_suffix,
+ extra_usage_info=None):
+ volume_utils.notify_about_backup_usage(
+ context, backup, event_suffix,
+ extra_usage_info=extra_usage_info,
+ host=self.host)
def export_record(self, context, backup_id):
"""Export all volume backup metadata details to allow clean import.
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
+ @mock.patch('cinder.volume.utils.notify_about_backup_usage')
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
+ def test_create_backup_with_notify(self, _mock_volume_backup, notify):
+ """Test normal backup creation with notifications."""
+ vol_size = 1
+ vol_id = self._create_volume_db_entry(size=vol_size)
+ backup_id = self._create_backup_db_entry(volume_id=vol_id)
+
+ self.backup_mgr.create_backup(self.ctxt, backup_id)
+ self.assertEqual(2, notify.call_count)
+
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume
with a bad status.
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
+ @mock.patch('cinder.volume.utils.notify_about_backup_usage')
+ @mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
+ def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
+ """Test normal backup restoration with notifications."""
+ vol_size = 1
+ vol_id = self._create_volume_db_entry(status='restoring-backup',
+ size=vol_size)
+ backup_id = self._create_backup_db_entry(status='restoring',
+ volume_id=vol_id)
+
+ self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
+ self.assertEqual(2, notify.call_count)
+
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup
with a bad status.
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted')
+ @mock.patch('cinder.volume.utils.notify_about_backup_usage')
+ def test_delete_backup_with_notify(self, notify):
+ """Test normal backup deletion with notifications."""
+ vol_id = self._create_volume_db_entry(size=1)
+ backup_id = self._create_backup_db_entry(status='deleting',
+ volume_id=vol_id)
+ self.backup_mgr.delete_backup(self.ctxt, backup_id)
+ self.assertEqual(2, notify.call_count)
+
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 0)
import tempfile
import zlib
+import mock
from oslo.config import cfg
from swiftclient import client as swift
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], 'volumebackups')
+ @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
+ '_send_progress_end')
+ @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
+ '_send_progress_notification')
+ def test_backup_default_container_notify(self, _send_progress,
+ _send_progress_end):
+ self._create_backup_db_entry(container=None)
+ # If the backup_object_number_per_notification is set to 1,
+ # the _send_progress method will be called for sure.
+ CONF.set_override("backup_object_number_per_notification", 1)
+ CONF.set_override("backup_swift_enable_progress_timer", False)
+ service = SwiftBackupDriver(self.ctxt)
+ self.volume_file.seek(0)
+ backup = db.backup_get(self.ctxt, 123)
+ service.backup(backup, self.volume_file)
+ self.assertTrue(_send_progress.called)
+ self.assertTrue(_send_progress_end.called)
+
+ # If the backup_object_number_per_notification is increased to
+ # another value, the _send_progress method will not be called.
+ _send_progress.reset_mock()
+ _send_progress_end.reset_mock()
+ CONF.set_override("backup_object_number_per_notification", 10)
+ service = SwiftBackupDriver(self.ctxt)
+ self.volume_file.seek(0)
+ backup = db.backup_get(self.ctxt, 123)
+ service.backup(backup, self.volume_file)
+ self.assertFalse(_send_progress.called)
+ self.assertTrue(_send_progress_end.called)
+
+ # If the timer is enabled, the _send_progress will be called,
+ # since the timer can trigger the progress notification.
+ _send_progress.reset_mock()
+ _send_progress_end.reset_mock()
+ CONF.set_override("backup_object_number_per_notification", 10)
+ CONF.set_override("backup_swift_enable_progress_timer", True)
+ service = SwiftBackupDriver(self.ctxt)
+ self.volume_file.seek(0)
+ backup = db.backup_get(self.ctxt, 123)
+ service.backup(backup, self.volume_file)
+ self.assertTrue(_send_progress.called)
+ self.assertTrue(_send_progress_end.called)
+
def test_backup_custom_container(self):
container_name = 'fake99'
self._create_backup_db_entry(container=container_name)
return usage_info
+def _usage_from_backup(context, backup_ref, **kw):
+ usage_info = dict(tenant_id=backup_ref['project_id'],
+ user_id=backup_ref['user_id'],
+ availability_zone=backup_ref['availability_zone'],
+ backup_id=backup_ref['id'],
+ host=backup_ref['host'],
+ display_name=backup_ref['display_name'],
+ created_at=str(backup_ref['created_at']),
+ status=backup_ref['status'],
+ volume_id=backup_ref['volume_id'],
+ size=backup_ref['size'],
+ service_metadata=backup_ref['service_metadata'],
+ service=backup_ref['service'],
+ fail_reason=backup_ref['fail_reason'])
+
+ usage_info.update(kw)
+ return usage_info
+
+
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
usage_info)
+def notify_about_backup_usage(context, backup, event_suffix,
+ extra_usage_info=None,
+ host=None):
+ if not host:
+ host = CONF.host
+
+ if not extra_usage_info:
+ extra_usage_info = {}
+
+ usage_info = _usage_from_backup(context, backup, **extra_usage_info)
+
+ rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
+ usage_info)
+
+
def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
usage_info = {
'tenant_id': snapshot_ref['project_id'],