"""
+from novaclient import extension
from novaclient import service_catalog
from novaclient.v1_1 import client as nova_client
+try:
+ from novaclient.v1_1.contrib import assisted_volume_snapshots
+except ImportError:
+ assisted_volume_snapshots = None
from oslo.config import cfg
from cinder.db import base
help='Info to match when looking for nova in the service '
'catalog. Format is : separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
+ cfg.StrOpt('nova_catalog_admin_info',
+ default='compute:nova:adminURL',
+ help='Same as nova_catalog_info, but for admin endpoint.'),
cfg.StrOpt('nova_endpoint_template',
default=None,
help='Override service catalog lookup with template for nova '
'endpoint e.g. http://localhost:8774/v2/%(tenant_id)s'),
+ cfg.StrOpt('nova_endpoint_admin_template',
+ default=None,
+ help='Same as nova_endpoint_template, but for admin endpoint.'),
cfg.StrOpt('os_region_name',
default=None,
help='region name of this node'),
LOG = logging.getLogger(__name__)
-def novaclient(context):
-
+def novaclient(context, admin=False):
# FIXME: the novaclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
- if CONF.nova_endpoint_template:
- url = CONF.nova_endpoint_template % context.to_dict()
+
+ nova_endpoint_template = CONF.nova_endpoint_template
+ nova_catalog_info = CONF.nova_catalog_info
+
+ if admin:
+ nova_endpoint_template = CONF.nova_endpoint_admin_template
+ nova_catalog_info = CONF.nova_catalog_admin_info
+
+ if nova_endpoint_template:
+ url = nova_endpoint_template % context.to_dict()
else:
- info = CONF.nova_catalog_info
+ info = nova_catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.os_region_name:
LOG.debug(_('Novaclient connection created using URL: %s') % url)
+ extensions = []
+ if assisted_volume_snapshots:
+ extensions.append(assisted_volume_snapshots)
+
c = nova_client.Client(context.user_id,
context.auth_token,
context.project_id,
auth_url=url,
insecure=CONF.nova_api_insecure,
- cacert=CONF.nova_ca_certificates_file)
+ cacert=CONF.nova_ca_certificates_file,
+ extensions=extensions)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
novaclient(context).volumes.update_server_volume(server_id,
attachment_id,
new_volume_id)
+
+ def create_volume_snapshot(self, context, volume_id, create_info):
+ nova = novaclient(context, admin=True)
+
+ nova.assisted_volume_snapshots.create(
+ volume_id,
+ create_info=create_info)
+
+ def delete_volume_snapshot(self, context, snapshot_id, delete_info):
+ nova = novaclient(context, admin=True)
+
+ nova.assisted_volume_snapshots.delete(
+ snapshot_id,
+ delete_info=delete_info)
from mox import IsA
from mox import stubout
+from cinder import compute
from cinder import context
+from cinder import db
from cinder import exception
from cinder.openstack.common import processutils as putils
from cinder import test
+from cinder.tests.compute import test_nova
from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import glusterfs
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_dict)
- drv._read_info_file(info_path, empty_if_missing=True).\
- AndReturn(info_dict)
-
drv._create_qcow2_snap_file(snap_ref, vol_filename, snap_path)
qemu_img_info_output = ("""image: volume-%s
disk size: 152K
""" % self.VOLUME_UUID, '')
+ drv._read_info_file(info_path, empty_if_missing=True).\
+ AndReturn(info_dict)
+
# SNAP_UUID_2 has been removed from dict.
info_file_dict = {'active': 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID),
'SNAP_UUID_2': self.SNAP_UUID_2,
'VOLUME_UUID': self.VOLUME_UUID}
- info_file_dict = {'active': 'volume-%s.%s' %
- (self.VOLUME_UUID, self.SNAP_UUID_2),
- self.SNAP_UUID_2: 'volume-%s.%s' %
- (self.VOLUME_UUID, self.SNAP_UUID_2),
+ info_file_dict = {'active': snap_file_2,
+ self.SNAP_UUID_2: snap_file_2,
self.SNAP_UUID: snap_file}
snap_ref = {'name': 'test snap',
snap_path_chain = [{self.SNAP_UUID: snap_file},
{'active': snap_file}]
- drv._read_info_file(mox_lib.IgnoreArg()).AndReturn(info_file_dict)
-
- drv._execute('qemu-img', 'info', volume_path, run_as_root=True).\
- AndReturn((qemu_img_info_output_2, ''))
-
- drv._execute('qemu-img', 'info', snap_path_2, run_as_root=True).\
- AndReturn((qemu_img_info_output_2, ''))
+ drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
disk size: 175K
""" % self.VOLUME_UUID
- drv._execute('qemu-img', 'info', mox_lib.IgnoreArg(),
- run_as_root=True).\
- AndReturn((qemu_img_info_output_snap_2, ''))
-
snap_path_chain = [{'filename': snap_file_2,
'backing-filename': snap_file},
{'filename': snap_file,
drv._read_info_file(info_path).AndReturn(info_file_dict)
- drv._execute('qemu-img', 'info', snap_path_2,
- run_as_root=True).\
- AndReturn((qemu_img_info_output_snap_1, ''))
-
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
drv._execute('rm', '-f', snap_path_2, run_as_root=True)
drv.extend_volume(volume, 3)
mox.VerifyAll()
+
+ def test_create_snapshot_online(self):
+ (mox, drv) = self._mox, self._driver
+
+ volume = self._simple_volume()
+ volume['status'] = 'in-use'
+
+ hashed = drv._get_hash_str(self.TEST_EXPORT1)
+ volume_file = 'volume-%s' % self.VOLUME_UUID
+ volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+ hashed,
+ volume_file)
+ info_path = '%s.info' % volume_path
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ snap_ref = {'name': 'test snap (online)',
+ 'volume_id': self.VOLUME_UUID,
+ 'volume': volume,
+ 'id': self.SNAP_UUID,
+ 'context': ctxt,
+ 'status': 'asdf',
+ 'progress': 'asdf'}
+
+ snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+ snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
+ mox.StubOutWithMock(db, 'snapshot_get')
+ mox.StubOutWithMock(drv, '_write_info_file')
+ mox.StubOutWithMock(drv, '_nova')
+
+ drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
+
+ create_info = {'snapshot_id': snap_ref['id'],
+ 'type': 'qcow2',
+ 'new_file': snap_file}
+
+ drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
+
+ snap_ref['status'] = 'creating'
+ snap_ref['progress'] = '0%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '50%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '90%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_info = {'active': snap_file,
+ self.SNAP_UUID: snap_file}
+
+ drv._write_info_file(info_path, snap_info)
+
+ mox.ReplayAll()
+
+ drv.create_snapshot(snap_ref)
+
+ def test_create_snapshot_online_novafailure(self):
+ (mox, drv) = self._mox, self._driver
+
+ volume = self._simple_volume()
+ volume['status'] = 'in-use'
+
+ hashed = drv._get_hash_str(self.TEST_EXPORT1)
+ volume_file = 'volume-%s' % self.VOLUME_UUID
+ volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+ hashed,
+ volume_file)
+ info_path = '%s.info' % volume_path
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ snap_ref = {'name': 'test snap (online)',
+ 'volume_id': self.VOLUME_UUID,
+ 'volume': volume,
+ 'id': self.SNAP_UUID,
+ 'context': ctxt}
+
+ snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+ snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
+ mox.StubOutWithMock(drv, '_nova')
+ mox.StubOutWithMock(db, 'snapshot_get')
+ mox.StubOutWithMock(drv, '_write_info_file')
+
+ drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
+
+ create_info = {'snapshot_id': snap_ref['id'],
+ 'type': 'qcow2',
+ 'new_file': snap_file}
+
+ drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
+
+ snap_ref['status'] = 'creating'
+ snap_ref['progress'] = '0%'
+
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '50%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '99%'
+ snap_ref['status'] = 'error'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_info = {'active': snap_file,
+ self.SNAP_UUID: snap_file}
+
+ drv._write_info_file(info_path, snap_info)
+
+ mox.ReplayAll()
+
+ self.assertRaises(exception.GlusterfsException,
+ drv.create_snapshot,
+ snap_ref)
+
+ def test_delete_snapshot_online_1(self):
+ """Delete the newest snapshot."""
+ (mox, drv) = self._mox, self._driver
+
+ volume = self._simple_volume()
+ volume['status'] = 'in-use'
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ snap_ref = {'name': 'test snap to delete (online)',
+ 'volume_id': self.VOLUME_UUID,
+ 'volume': volume,
+ 'id': self.SNAP_UUID,
+ 'context': ctxt}
+
+ hashed = drv._get_hash_str(self.TEST_EXPORT1)
+ volume_file = 'volume-%s' % self.VOLUME_UUID
+ volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+ hashed,
+ volume_file)
+ info_path = '%s.info' % volume_path
+
+ snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+ snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_nova')
+ mox.StubOutWithMock(drv, '_read_info_file')
+ mox.StubOutWithMock(drv, '_write_info_file')
+ mox.StubOutWithMock(os.path, 'exists')
+ mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+ mox.StubOutWithMock(db, 'snapshot_get')
+
+ snap_info = {'active': snap_file,
+ self.SNAP_UUID: snap_file}
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ os.path.exists(snap_path).AndReturn(True)
+
+ drv._read_info_file(info_path, empty_if_missing=True).\
+ AndReturn(snap_info)
+
+ asdfqemu_img_info_output = """image: %s
+ file format: qcow2
+ virtual size: 1.0G (1073741824 bytes)
+ disk size: 173K
+ backing file: %s
+ """ % (snap_file, volume_file)
+
+ delete_info = {
+ 'type': 'qcow2',
+ 'merge_target_file': None,
+ 'file_to_merge': volume_file,
+ 'volume_id': self.VOLUME_UUID
+ }
+
+ drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+ drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ snap_ref['status'] = 'deleting'
+ snap_ref['progress'] = '0%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '50%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '90%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ drv._write_info_file(info_path, snap_info)
+
+ drv._execute('rm', '-f', volume_path, run_as_root=True)
+
+ mox.ReplayAll()
+
+ drv.delete_snapshot(snap_ref)
+
+ def test_delete_snapshot_online_2(self):
+ """Delete the middle snapshot."""
+ (mox, drv) = self._mox, self._driver
+
+ volume = self._simple_volume()
+ volume['status'] = 'in-use'
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ snap_ref = {'name': 'test snap to delete (online)',
+ 'volume_id': self.VOLUME_UUID,
+ 'volume': volume,
+ 'id': self.SNAP_UUID,
+ 'context': ctxt}
+
+ hashed = drv._get_hash_str(self.TEST_EXPORT1)
+ volume_file = 'volume-%s' % self.VOLUME_UUID
+ volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+ hashed,
+ volume_file)
+ info_path = '%s.info' % volume_path
+
+ snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+ snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
+ snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+ snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2)
+
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_nova')
+ mox.StubOutWithMock(drv, '_read_info_file')
+ mox.StubOutWithMock(drv, '_write_info_file')
+ mox.StubOutWithMock(os.path, 'exists')
+ mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+ mox.StubOutWithMock(db, 'snapshot_get')
+
+ snap_info = {'active': snap_file_2,
+ self.SNAP_UUID: snap_file,
+ self.SNAP_UUID_2: snap_file_2}
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ os.path.exists(snap_path).AndReturn(True)
+
+ drv._read_info_file(info_path, empty_if_missing=True).\
+ AndReturn(snap_info)
+
+ drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+ delete_info = {'type': 'qcow2',
+ 'merge_target_file': volume_file,
+ 'file_to_merge': snap_file,
+ 'volume_id': self.VOLUME_UUID}
+ drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ snap_ref['status'] = 'deleting'
+ snap_ref['progress'] = '0%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '50%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '90%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ drv._write_info_file(info_path, snap_info)
+
+ drv._execute('rm', '-f', snap_path, run_as_root=True)
+
+ mox.ReplayAll()
+
+ drv.delete_snapshot(snap_ref)
+
+ def test_delete_snapshot_online_novafailure(self):
+ """Delete the newest snapshot."""
+ (mox, drv) = self._mox, self._driver
+
+ volume = self._simple_volume()
+ volume['status'] = 'in-use'
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ snap_ref = {'name': 'test snap to delete (online)',
+ 'volume_id': self.VOLUME_UUID,
+ 'volume': volume,
+ 'id': self.SNAP_UUID,
+ 'context': ctxt}
+
+ hashed = drv._get_hash_str(self.TEST_EXPORT1)
+ volume_file = 'volume-%s' % self.VOLUME_UUID
+ volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+ hashed,
+ volume_file)
+ info_path = '%s.info' % volume_path
+
+ snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+ snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+ mox.StubOutWithMock(drv, '_execute')
+ mox.StubOutWithMock(drv, '_nova')
+ mox.StubOutWithMock(drv, '_read_info_file')
+ mox.StubOutWithMock(drv, '_write_info_file')
+ mox.StubOutWithMock(os.path, 'exists')
+ mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+ mox.StubOutWithMock(db, 'snapshot_get')
+
+ snap_info = {'active': snap_file,
+ self.SNAP_UUID: snap_file}
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ os.path.exists(snap_path).AndReturn(True)
+
+ drv._read_info_file(info_path, empty_if_missing=True).\
+ AndReturn(snap_info)
+
+ asdfqemu_img_info_output = """image: %s
+ file format: qcow2
+ virtual size: 1.0G (1073741824 bytes)
+ disk size: 173K
+ backing file: %s
+ """ % (snap_file, volume_file)
+
+ delete_info = {
+ 'type': 'qcow2',
+ 'merge_target_file': None,
+ 'file_to_merge': volume_file,
+ 'volume_id': self.VOLUME_UUID
+ }
+
+ drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+ drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ drv._read_info_file(info_path).AndReturn(snap_info)
+
+ snap_ref['status'] = 'deleting'
+ snap_ref['progress'] = '0%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['progress'] = '50%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ snap_ref['status'] = 'error_deleting'
+ snap_ref['progress'] = '90%'
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+ drv._write_info_file(info_path, snap_info)
+
+ drv._execute('rm', '-f', volume_path, run_as_root=True)
+
+ mox.ReplayAll()
+
+ self.assertRaises(exception.GlusterfsException,
+ drv.delete_snapshot,
+ snap_ref)
import json
import os
import re
+import time
from oslo.config import cfg
from cinder.brick.remotefs import remotefs
+from cinder import compute
from cinder import db
from cinder import exception
from cinder.image import image_utils
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.configuration.append_config_values(remotefs.remotefs_client_opts)
+ self._nova = None
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
+ self._nova = compute.API()
+
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
volume_name = CONF.volume_name_template % src_vref['id']
- temp_id = src_vref['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'vol': volume['id'],
'size': volume_size})
- path1 = self._get_hash_str(snapshot['volume']['provider_location'])
path_to_disk = self._local_path_volume(snapshot['volume'])
path_to_new_vol = self._local_path_volume(volume)
them and handling live transfers of data between files as required.
"""
- # Check that volume is not attached (even for force):
- # Online snapshots must be done via Nova
- if snapshot['volume']['status'] != 'available':
- msg = _("Volume status must be 'available'.")
+ status = snapshot['volume']['status']
+ if status not in ['available', 'in-use']:
+ msg = _('Volume status must be "available" or "in-use"'
+ ' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
+ if status == 'in-use':
+ # Perform online snapshot via Nova
+ context = snapshot['context']
+
+ backing_filename = self.get_active_image_from_info(
+ snapshot['volume'])
+ path_to_disk = self._local_path_volume(snapshot['volume'])
+ new_snap_path = '%s.%s' % (
+ self._local_path_volume(snapshot['volume']),
+ snapshot['id'])
+
+ self._create_qcow2_snap_file(snapshot,
+ backing_filename,
+ new_snap_path)
+
+ connection_info = {
+ 'type': 'qcow2',
+ 'new_file': os.path.basename(new_snap_path),
+ 'snapshot_id': snapshot['id']
+ }
+
+ try:
+ result = self._nova.create_volume_snapshot(
+ context,
+ snapshot['volume_id'],
+ connection_info)
+ LOG.debug(_('nova call result: %s') % result)
+ except Exception as e:
+ LOG.error(_('Call to Nova to create snapshot failed'))
+ LOG.exception(e)
+ raise e
+
+ # Loop and wait for result
+ # Nova will call Cinderclient to update the status in the database
+ # An update of progress = '90%' means that Nova is done
+ seconds_elapsed = 0
+ increment = 1
+ timeout = 600
+ while True:
+ s = db.snapshot_get(context, snapshot['id'])
+
+ if s['status'] == 'creating':
+ if s['progress'] == '90%':
+ # Nova tasks completed successfully
+ break
+
+ time.sleep(increment)
+ seconds_elapsed += increment
+ elif s['status'] == 'error':
+
+ msg = _('Nova returned "error" status '
+ 'while creating snapshot.')
+ raise exception.GlusterfsException(msg)
+
+ LOG.debug(_('Status of snapshot %(id)s is now %(status)s') % {
+ 'id': snapshot['id'],
+ 'status': s['status']
+ })
+
+ if 10 < seconds_elapsed <= 20:
+ increment = 2
+ elif 20 < seconds_elapsed <= 60:
+ increment = 5
+ elif 60 < seconds_elapsed:
+ increment = 10
+
+ if seconds_elapsed > timeout:
+ msg = _('Timed out while waiting for Nova update '
+ 'for creation of snapshot %s.') % snapshot['id']
+ raise exception.GlusterfsException(msg)
+
+ info_path = self._local_path_volume(snapshot['volume']) + '.info'
+ snap_info = self._read_info_file(info_path, empty_if_missing=True)
+ snap_info['active'] = os.path.basename(new_snap_path)
+ snap_info[snapshot['id']] = os.path.basename(new_snap_path)
+ self._write_info_file(info_path, snap_info)
+
+ return
+
LOG.debug(_('create snapshot: %s') % snapshot)
LOG.debug(_('volume id: %s') % snapshot['volume_id'])
snap_info = self._read_info_file(info_path,
empty_if_missing=True)
- snap_info[snapshot['id']] = os.path.basename(new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
+ snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _read_file(self, filename):
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
+
+ If volume status is 'in-use', calculate what qcow2 files need to
+ merge, and call to Nova to perform this operation.
+
"""
LOG.debug(_('deleting snapshot %s') % snapshot['id'])
- if snapshot['volume']['status'] != 'available':
- msg = _("Volume status must be 'available'.")
+ volume_status = snapshot['volume']['status']
+ if volume_status not in ['available', 'in-use']:
+ msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
# Determine the true snapshot file for this snapshot
msg = _('Snapshot file at %s does not exist.') % snapshot_path
raise exception.InvalidSnapshot(msg)
- base_file = self._get_backing_file_for_path(snapshot_path)
-
vol_path = self._local_volume_dir(snapshot['volume'])
- base_file_fmt = self._get_file_format_for_path('%s/%s' %
- (vol_path, base_file))
- if base_file_fmt not in ['qcow2', 'raw']:
- msg = _("Invalid snapshot backing file format: %s") % base_file_fmt
- raise exception.InvalidSnapshot(msg)
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = '%s/%s' % (vol_path, active_file)
+ if volume_status == 'in-use':
+ # Online delete
+ context = snapshot['context']
+
+ base_file = self._get_backing_file_for_path(snapshot_path)
+ if base_file is None:
+ # There should always be at least the original volume
+ # file as base.
+ msg = _('No base file found for %s.') % snapshot_path
+ raise exception.GlusterfsException(msg)
+ base_id = None
+ info_path = self._local_path_volume(snapshot['volume']) + '.info'
+ snap_info = self._read_info_file(info_path)
+ for key, value in snap_info.iteritems():
+ if value == base_file and key != 'active':
+ base_id = key
+ break
+ if base_id is None:
+ # This means we are deleting the oldest snapshot
+ msg = _('No %(base_id)s found for %(file)s') % {
+ 'base_id': 'base_id',
+ 'file': snapshot_file}
+ LOG.debug(msg)
+
+ online_delete_info = {
+ 'active_file': active_file,
+ 'snapshot_file': snapshot_file,
+ 'base_file': base_file,
+ 'base_id': base_id
+ }
+
+ return self._delete_snapshot_online(context,
+ snapshot,
+ online_delete_info)
+
if snapshot_file == active_file:
# Need to merge snapshot_file into its backing file
# There is no top file
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
+
del(snap_info[snapshot['id']])
# Active file has changed
snap_info['active'] = base_file
self._write_info_file(info_path, snap_info)
-
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# used here) | | committed down)| if so)
backing_chain = self._get_backing_chain_for_path(active_file_path)
-
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
-
del(snap_info[snapshot['id']])
snap_info[higher_id] = snapshot_file
if higher_file == active_file:
snap_info['active'] = snapshot_file
self._write_info_file(info_path, snap_info)
+ def _delete_snapshot_online(self, context, snapshot, info):
+ # Update info over the course of this method
+ # active file never changes
+ info_path = self._local_path_volume(snapshot['volume']) + '.info'
+ snap_info = self._read_info_file(info_path)
+
+ if info['active_file'] == info['snapshot_file']:
+ # blockRebase/Pull base into active
+ # info['base'] => snapshot_file
+
+ file_to_delete = info['base_file']
+
+ delete_info = {'file_to_merge': info['base_file'],
+ 'merge_target_file': None, # current
+ 'type': 'qcow2',
+ 'volume_id': snapshot['volume']['id']}
+
+ del(snap_info[snapshot['id']])
+ else:
+ # blockCommit snapshot into base
+ # info['base'] <= snapshot_file
+ # delete record of snapshot
+ file_to_delete = info['snapshot_file']
+
+ delete_info = {'file_to_merge': info['snapshot_file'],
+ 'merge_target_file': info['base_file'],
+ 'type': 'qcow2',
+ 'volume_id': snapshot['volume']['id']}
+
+ del(snap_info[snapshot['id']])
+
+ try:
+ self._nova.delete_volume_snapshot(
+ context,
+ snapshot['id'],
+ delete_info)
+ except Exception as e:
+ LOG.error(_('Call to Nova delete snapshot failed'))
+ LOG.exception(e)
+ raise e
+
+ # Loop and wait for result
+ # Nova will call Cinderclient to update the status in the database
+ # An update of progress = '90%' means that Nova is done
+ seconds_elapsed = 0
+ increment = 1
+ timeout = 600
+ while True:
+ s = db.snapshot_get(context, snapshot['id'])
+
+ if s['status'] == 'deleting':
+ if s['progress'] == '90%':
+ # Nova tasks completed successfully
+ break
+ else:
+ msg = _('status of snapshot %s is '
+ 'still "deleting"... waiting') % snapshot['id']
+ LOG.debug(msg)
+ time.sleep(increment)
+ seconds_elapsed += increment
+ else:
+ msg = _('Unable to delete snapshot %(id)s, '
+ 'status: %(status)s.') % {'id': snapshot['id'],
+ 'status': s['status']}
+ raise exception.GlusterfsException(msg)
+
+ if 10 < seconds_elapsed <= 20:
+ increment = 2
+ elif 20 < seconds_elapsed <= 60:
+ increment = 5
+ elif 60 < seconds_elapsed:
+ increment = 10
+
+ if seconds_elapsed > timeout:
+ msg = _('Timed out while waiting for Nova update '
+ 'for deletion of snapshot %(id)s.') %\
+ {'id': snapshot['id']}
+ raise exception.GlusterfsException(msg)
+
+ # Write info file updated above
+ self._write_info_file(info_path, snap_info)
+
+ # Delete stale file
+ path_to_delete = os.path.join(
+ self._local_volume_dir(snapshot['volume']), file_to_delete)
+ self._execute('rm', '-f', path_to_delete, run_as_root=True)
+
def _get_backing_file(self, output):
for line in output.split('\n'):
backing_file = None