]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
QEMU-assisted-snapshots for GlusterFS volumes
authorEric Harney <eharney@redhat.com>
Mon, 19 Aug 2013 04:21:54 +0000 (00:21 -0400)
committerEric Harney <eharney@redhat.com>
Wed, 4 Sep 2013 03:49:50 +0000 (23:49 -0400)
Coordinate with Nova to create and delete snaphots for
GlusterFS volumes that are attached to VMs.

Cinder is responsible for creating a QCOW2 file which Nova
will activate in the VM's snapshot chain when a snapshot is
created.

When a snapshot is deleted, Cinder will request for Nova to
perform a block commit/rebase operation to logically delete
the snapshot from the QCOW2 chain.

Implements blueprint qemu-assisted-snapshots

Change-Id: I4a7f0c1bc08d88b0f75d119168dd2077487a62a0

cinder/compute/nova.py
cinder/tests/compute/test_nova.py
cinder/tests/test_glusterfs.py
cinder/volume/drivers/glusterfs.py
cinder/volume/manager.py
etc/cinder/cinder.conf.sample

index a25401f5f14b7f2c339de03a3fe0e29357415e1d..13c960063a86e1089b40962758dc06852ccf7204 100644 (file)
@@ -17,8 +17,13 @@ Handles all requests to Nova.
 """
 
 
+from novaclient import extension
 from novaclient import service_catalog
 from novaclient.v1_1 import client as nova_client
+try:
+    from novaclient.v1_1.contrib import assisted_volume_snapshots
+except ImportError:
+    assisted_volume_snapshots = None
 from oslo.config import cfg
 
 from cinder.db import base
@@ -30,10 +35,16 @@ nova_opts = [
                help='Info to match when looking for nova in the service '
                     'catalog. Format is : separated values of the form: '
                     '<service_type>:<service_name>:<endpoint_type>'),
+    cfg.StrOpt('nova_catalog_admin_info',
+               default='compute:nova:adminURL',
+               help='Same as nova_catalog_info, but for admin endpoint.'),
     cfg.StrOpt('nova_endpoint_template',
                default=None,
                help='Override service catalog lookup with template for nova '
                     'endpoint e.g. http://localhost:8774/v2/%(tenant_id)s'),
+    cfg.StrOpt('nova_endpoint_admin_template',
+               default=None,
+               help='Same as nova_endpoint_template, but for admin endpoint.'),
     cfg.StrOpt('os_region_name',
                default=None,
                help='region name of this node'),
@@ -52,8 +63,7 @@ CONF.register_opts(nova_opts)
 LOG = logging.getLogger(__name__)
 
 
-def novaclient(context):
-
+def novaclient(context, admin=False):
     # FIXME: the novaclient ServiceCatalog object is mis-named.
     #        It actually contains the entire access blob.
     # Only needed parts of the service catalog are passed in, see
@@ -62,10 +72,18 @@ def novaclient(context):
         'access': {'serviceCatalog': context.service_catalog or []}
     }
     sc = service_catalog.ServiceCatalog(compat_catalog)
-    if CONF.nova_endpoint_template:
-        url = CONF.nova_endpoint_template % context.to_dict()
+
+    nova_endpoint_template = CONF.nova_endpoint_template
+    nova_catalog_info = CONF.nova_catalog_info
+
+    if admin:
+            nova_endpoint_template = CONF.nova_endpoint_admin_template
+            nova_catalog_info = CONF.nova_catalog_admin_info
+
+    if nova_endpoint_template:
+        url = nova_endpoint_template % context.to_dict()
     else:
-        info = CONF.nova_catalog_info
+        info = nova_catalog_info
         service_type, service_name, endpoint_type = info.split(':')
         # extract the region if set in configuration
         if CONF.os_region_name:
@@ -82,12 +100,17 @@ def novaclient(context):
 
     LOG.debug(_('Novaclient connection created using URL: %s') % url)
 
+    extensions = []
+    if assisted_volume_snapshots:
+        extensions.append(assisted_volume_snapshots)
+
     c = nova_client.Client(context.user_id,
                            context.auth_token,
                            context.project_id,
                            auth_url=url,
                            insecure=CONF.nova_api_insecure,
-                           cacert=CONF.nova_ca_certificates_file)
+                           cacert=CONF.nova_ca_certificates_file,
+                           extensions=extensions)
     # noauth extracts user_id:project_id from auth_token
     c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
                                                            context.project_id)
@@ -103,3 +126,17 @@ class API(base.Base):
         novaclient(context).volumes.update_server_volume(server_id,
                                                          attachment_id,
                                                          new_volume_id)
+
+    def create_volume_snapshot(self, context, volume_id, create_info):
+        nova = novaclient(context, admin=True)
+
+        nova.assisted_volume_snapshots.create(
+            volume_id,
+            create_info=create_info)
+
+    def delete_volume_snapshot(self, context, snapshot_id, delete_info):
+        nova = novaclient(context, admin=True)
+
+        nova.assisted_volume_snapshots.delete(
+            snapshot_id,
+            delete_info=delete_info)
index a02c1f5246b4c181147e228855d32e2e7cb21741..45ee334f4c07d3eb973fa07dfe7883f4320e22cb 100644 (file)
@@ -26,6 +26,12 @@ class FakeNovaClient(object):
     def __init__(self):
         self.volumes = self.Volumes()
 
+    def create_volume_snapshot(self, *args, **kwargs):
+        pass
+
+    def delete_volume_snapshot(self, *args, **kwargs):
+        pass
+
 
 class NovaApiTestCase(test.TestCase):
     def setUp(self):
index cc143cd893d2f50e07656be031b6cad6511afc57..b4e7105613490d3758fe5d8377adbb6ce9d513b8 100644 (file)
@@ -25,10 +25,13 @@ from mox import IgnoreArg
 from mox import IsA
 from mox import stubout
 
+from cinder import compute
 from cinder import context
+from cinder import db
 from cinder import exception
 from cinder.openstack.common import processutils as putils
 from cinder import test
+from cinder.tests.compute import test_nova
 from cinder import units
 from cinder.volume import configuration as conf
 from cinder.volume.drivers import glusterfs
@@ -663,9 +666,6 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv._read_info_file(info_path, empty_if_missing=True).\
             AndReturn(info_dict)
 
-        drv._read_info_file(info_path, empty_if_missing=True).\
-            AndReturn(info_dict)
-
         drv._create_qcow2_snap_file(snap_ref, vol_filename, snap_path)
 
         qemu_img_info_output = ("""image: volume-%s
@@ -674,6 +674,9 @@ class GlusterFsDriverTestCase(test.TestCase):
         disk size: 152K
         """ % self.VOLUME_UUID, '')
 
+        drv._read_info_file(info_path, empty_if_missing=True).\
+            AndReturn(info_dict)
+
         # SNAP_UUID_2 has been removed from dict.
         info_file_dict = {'active': 'volume-%s.%s' %
                           (self.VOLUME_UUID, self.SNAP_UUID),
@@ -738,10 +741,8 @@ class GlusterFsDriverTestCase(test.TestCase):
                'SNAP_UUID_2': self.SNAP_UUID_2,
                'VOLUME_UUID': self.VOLUME_UUID}
 
-        info_file_dict = {'active': 'volume-%s.%s' %
-                          (self.VOLUME_UUID, self.SNAP_UUID_2),
-                          self.SNAP_UUID_2: 'volume-%s.%s' %
-                          (self.VOLUME_UUID, self.SNAP_UUID_2),
+        info_file_dict = {'active': snap_file_2,
+                          self.SNAP_UUID_2: snap_file_2,
                           self.SNAP_UUID: snap_file}
 
         snap_ref = {'name': 'test snap',
@@ -773,13 +774,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         snap_path_chain = [{self.SNAP_UUID: snap_file},
                            {'active': snap_file}]
 
-        drv._read_info_file(mox_lib.IgnoreArg()).AndReturn(info_file_dict)
-
-        drv._execute('qemu-img', 'info', volume_path, run_as_root=True).\
-            AndReturn((qemu_img_info_output_2, ''))
-
-        drv._execute('qemu-img', 'info', snap_path_2, run_as_root=True).\
-            AndReturn((qemu_img_info_output_2, ''))
+        drv._read_info_file(info_path).AndReturn(info_file_dict)
 
         drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
 
@@ -874,10 +869,6 @@ class GlusterFsDriverTestCase(test.TestCase):
         disk size: 175K
         """ % self.VOLUME_UUID
 
-        drv._execute('qemu-img', 'info', mox_lib.IgnoreArg(),
-                     run_as_root=True).\
-            AndReturn((qemu_img_info_output_snap_2, ''))
-
         snap_path_chain = [{'filename': snap_file_2,
                             'backing-filename': snap_file},
                            {'filename': snap_file,
@@ -888,10 +879,6 @@ class GlusterFsDriverTestCase(test.TestCase):
 
         drv._read_info_file(info_path).AndReturn(info_file_dict)
 
-        drv._execute('qemu-img', 'info', snap_path_2,
-                     run_as_root=True).\
-            AndReturn((qemu_img_info_output_snap_1, ''))
-
         drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
 
         drv._execute('rm', '-f', snap_path_2, run_as_root=True)
@@ -1019,3 +1006,374 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv.extend_volume(volume, 3)
 
         mox.VerifyAll()
+
+    def test_create_snapshot_online(self):
+        (mox, drv) = self._mox, self._driver
+
+        volume = self._simple_volume()
+        volume['status'] = 'in-use'
+
+        hashed = drv._get_hash_str(self.TEST_EXPORT1)
+        volume_file = 'volume-%s' % self.VOLUME_UUID
+        volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+                                    hashed,
+                                    volume_file)
+        info_path = '%s.info' % volume_path
+
+        ctxt = context.RequestContext('fake_user', 'fake_project')
+
+        snap_ref = {'name': 'test snap (online)',
+                    'volume_id': self.VOLUME_UUID,
+                    'volume': volume,
+                    'id': self.SNAP_UUID,
+                    'context': ctxt,
+                    'status': 'asdf',
+                    'progress': 'asdf'}
+
+        snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+        snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+        mox.StubOutWithMock(drv, '_execute')
+        mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
+        mox.StubOutWithMock(db, 'snapshot_get')
+        mox.StubOutWithMock(drv, '_write_info_file')
+        mox.StubOutWithMock(drv, '_nova')
+
+        drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
+
+        create_info = {'snapshot_id': snap_ref['id'],
+                       'type': 'qcow2',
+                       'new_file': snap_file}
+
+        drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
+
+        snap_ref['status'] = 'creating'
+        snap_ref['progress'] = '0%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '50%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '90%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_info = {'active': snap_file,
+                     self.SNAP_UUID: snap_file}
+
+        drv._write_info_file(info_path, snap_info)
+
+        mox.ReplayAll()
+
+        drv.create_snapshot(snap_ref)
+
+    def test_create_snapshot_online_novafailure(self):
+        (mox, drv) = self._mox, self._driver
+
+        volume = self._simple_volume()
+        volume['status'] = 'in-use'
+
+        hashed = drv._get_hash_str(self.TEST_EXPORT1)
+        volume_file = 'volume-%s' % self.VOLUME_UUID
+        volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+                                    hashed,
+                                    volume_file)
+        info_path = '%s.info' % volume_path
+
+        ctxt = context.RequestContext('fake_user', 'fake_project')
+
+        snap_ref = {'name': 'test snap (online)',
+                    'volume_id': self.VOLUME_UUID,
+                    'volume': volume,
+                    'id': self.SNAP_UUID,
+                    'context': ctxt}
+
+        snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+        snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+        mox.StubOutWithMock(drv, '_execute')
+        mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
+        mox.StubOutWithMock(drv, '_nova')
+        mox.StubOutWithMock(db, 'snapshot_get')
+        mox.StubOutWithMock(drv, '_write_info_file')
+
+        drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
+
+        create_info = {'snapshot_id': snap_ref['id'],
+                       'type': 'qcow2',
+                       'new_file': snap_file}
+
+        drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
+
+        snap_ref['status'] = 'creating'
+        snap_ref['progress'] = '0%'
+
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '50%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '99%'
+        snap_ref['status'] = 'error'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_info = {'active': snap_file,
+                     self.SNAP_UUID: snap_file}
+
+        drv._write_info_file(info_path, snap_info)
+
+        mox.ReplayAll()
+
+        self.assertRaises(exception.GlusterfsException,
+                          drv.create_snapshot,
+                          snap_ref)
+
+    def test_delete_snapshot_online_1(self):
+        """Delete the newest snapshot."""
+        (mox, drv) = self._mox, self._driver
+
+        volume = self._simple_volume()
+        volume['status'] = 'in-use'
+
+        ctxt = context.RequestContext('fake_user', 'fake_project')
+
+        snap_ref = {'name': 'test snap to delete (online)',
+                    'volume_id': self.VOLUME_UUID,
+                    'volume': volume,
+                    'id': self.SNAP_UUID,
+                    'context': ctxt}
+
+        hashed = drv._get_hash_str(self.TEST_EXPORT1)
+        volume_file = 'volume-%s' % self.VOLUME_UUID
+        volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+                                    hashed,
+                                    volume_file)
+        info_path = '%s.info' % volume_path
+
+        snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+        snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+        mox.StubOutWithMock(drv, '_execute')
+        mox.StubOutWithMock(drv, '_nova')
+        mox.StubOutWithMock(drv, '_read_info_file')
+        mox.StubOutWithMock(drv, '_write_info_file')
+        mox.StubOutWithMock(os.path, 'exists')
+        mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+        mox.StubOutWithMock(db, 'snapshot_get')
+
+        snap_info = {'active': snap_file,
+                     self.SNAP_UUID: snap_file}
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        os.path.exists(snap_path).AndReturn(True)
+
+        drv._read_info_file(info_path, empty_if_missing=True).\
+            AndReturn(snap_info)
+
+        asdfqemu_img_info_output = """image: %s
+        file format: qcow2
+        virtual size: 1.0G (1073741824 bytes)
+        disk size: 173K
+        backing file: %s
+        """ % (snap_file, volume_file)
+
+        delete_info = {
+            'type': 'qcow2',
+            'merge_target_file': None,
+            'file_to_merge': volume_file,
+            'volume_id': self.VOLUME_UUID
+        }
+
+        drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+        drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        snap_ref['status'] = 'deleting'
+        snap_ref['progress'] = '0%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '50%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '90%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        drv._write_info_file(info_path, snap_info)
+
+        drv._execute('rm', '-f', volume_path, run_as_root=True)
+
+        mox.ReplayAll()
+
+        drv.delete_snapshot(snap_ref)
+
+    def test_delete_snapshot_online_2(self):
+        """Delete the middle snapshot."""
+        (mox, drv) = self._mox, self._driver
+
+        volume = self._simple_volume()
+        volume['status'] = 'in-use'
+
+        ctxt = context.RequestContext('fake_user', 'fake_project')
+
+        snap_ref = {'name': 'test snap to delete (online)',
+                    'volume_id': self.VOLUME_UUID,
+                    'volume': volume,
+                    'id': self.SNAP_UUID,
+                    'context': ctxt}
+
+        hashed = drv._get_hash_str(self.TEST_EXPORT1)
+        volume_file = 'volume-%s' % self.VOLUME_UUID
+        volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+                                    hashed,
+                                    volume_file)
+        info_path = '%s.info' % volume_path
+
+        snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+        snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
+        snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+        snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2)
+
+        mox.StubOutWithMock(drv, '_execute')
+        mox.StubOutWithMock(drv, '_nova')
+        mox.StubOutWithMock(drv, '_read_info_file')
+        mox.StubOutWithMock(drv, '_write_info_file')
+        mox.StubOutWithMock(os.path, 'exists')
+        mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+        mox.StubOutWithMock(db, 'snapshot_get')
+
+        snap_info = {'active': snap_file_2,
+                     self.SNAP_UUID: snap_file,
+                     self.SNAP_UUID_2: snap_file_2}
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        os.path.exists(snap_path).AndReturn(True)
+
+        drv._read_info_file(info_path, empty_if_missing=True).\
+            AndReturn(snap_info)
+
+        drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+        delete_info = {'type': 'qcow2',
+                       'merge_target_file': volume_file,
+                       'file_to_merge': snap_file,
+                       'volume_id': self.VOLUME_UUID}
+        drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        snap_ref['status'] = 'deleting'
+        snap_ref['progress'] = '0%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '50%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '90%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        drv._write_info_file(info_path, snap_info)
+
+        drv._execute('rm', '-f', snap_path, run_as_root=True)
+
+        mox.ReplayAll()
+
+        drv.delete_snapshot(snap_ref)
+
+    def test_delete_snapshot_online_novafailure(self):
+        """Delete the newest snapshot."""
+        (mox, drv) = self._mox, self._driver
+
+        volume = self._simple_volume()
+        volume['status'] = 'in-use'
+
+        ctxt = context.RequestContext('fake_user', 'fake_project')
+
+        snap_ref = {'name': 'test snap to delete (online)',
+                    'volume_id': self.VOLUME_UUID,
+                    'volume': volume,
+                    'id': self.SNAP_UUID,
+                    'context': ctxt}
+
+        hashed = drv._get_hash_str(self.TEST_EXPORT1)
+        volume_file = 'volume-%s' % self.VOLUME_UUID
+        volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
+                                    hashed,
+                                    volume_file)
+        info_path = '%s.info' % volume_path
+
+        snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
+        snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
+
+        mox.StubOutWithMock(drv, '_execute')
+        mox.StubOutWithMock(drv, '_nova')
+        mox.StubOutWithMock(drv, '_read_info_file')
+        mox.StubOutWithMock(drv, '_write_info_file')
+        mox.StubOutWithMock(os.path, 'exists')
+        mox.StubOutWithMock(drv, '_get_backing_file_for_path')
+        mox.StubOutWithMock(db, 'snapshot_get')
+
+        snap_info = {'active': snap_file,
+                     self.SNAP_UUID: snap_file}
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        os.path.exists(snap_path).AndReturn(True)
+
+        drv._read_info_file(info_path, empty_if_missing=True).\
+            AndReturn(snap_info)
+
+        asdfqemu_img_info_output = """image: %s
+        file format: qcow2
+        virtual size: 1.0G (1073741824 bytes)
+        disk size: 173K
+        backing file: %s
+        """ % (snap_file, volume_file)
+
+        delete_info = {
+            'type': 'qcow2',
+            'merge_target_file': None,
+            'file_to_merge': volume_file,
+            'volume_id': self.VOLUME_UUID
+        }
+
+        drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
+
+        drv._get_backing_file_for_path(snap_path).AndReturn(volume_file)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        drv._read_info_file(info_path).AndReturn(snap_info)
+
+        snap_ref['status'] = 'deleting'
+        snap_ref['progress'] = '0%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['progress'] = '50%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        snap_ref['status'] = 'error_deleting'
+        snap_ref['progress'] = '90%'
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref)
+
+        drv._write_info_file(info_path, snap_info)
+
+        drv._execute('rm', '-f', volume_path, run_as_root=True)
+
+        mox.ReplayAll()
+
+        self.assertRaises(exception.GlusterfsException,
+                          drv.delete_snapshot,
+                          snap_ref)
index 5b3470c7e240995767ae2d9ed38d713236268ab3..6f46d2f06f15417c6591e5716f7bc9190ab17fe1 100644 (file)
@@ -20,10 +20,12 @@ import hashlib
 import json
 import os
 import re
+import time
 
 from oslo.config import cfg
 
 from cinder.brick.remotefs import remotefs
+from cinder import compute
 from cinder import db
 from cinder import exception
 from cinder.image import image_utils
@@ -68,11 +70,14 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
         super(GlusterfsDriver, self).__init__(*args, **kwargs)
         self.configuration.append_config_values(volume_opts)
         self.configuration.append_config_values(remotefs.remotefs_client_opts)
+        self._nova = None
 
     def do_setup(self, context):
         """Any initialization the volume driver does while starting."""
         super(GlusterfsDriver, self).do_setup(context)
 
+        self._nova = compute.API()
+
         config = self.configuration.glusterfs_shares_config
         if not config:
             msg = (_("There's no Gluster config file configured (%s)") %
@@ -143,7 +148,6 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
 
         volume_name = CONF.volume_name_template % src_vref['id']
 
-        temp_id = src_vref['id']
         volume_info = {'provider_location': src_vref['provider_location'],
                        'size': src_vref['size'],
                        'id': volume['id'],
@@ -215,7 +219,6 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
                      'vol': volume['id'],
                      'size': volume_size})
 
-        path1 = self._get_hash_str(snapshot['volume']['provider_location'])
         path_to_disk = self._local_path_volume(snapshot['volume'])
 
         path_to_new_vol = self._local_path_volume(volume)
@@ -324,12 +327,91 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
         them and handling live transfers of data between files as required.
         """
 
-        # Check that volume is not attached (even for force):
-        # Online snapshots must be done via Nova
-        if snapshot['volume']['status'] != 'available':
-            msg = _("Volume status must be 'available'.")
+        status = snapshot['volume']['status']
+        if status not in ['available', 'in-use']:
+            msg = _('Volume status must be "available" or "in-use"'
+                    ' for snapshot. (is %s)') % status
             raise exception.InvalidVolume(msg)
 
+        if status == 'in-use':
+            # Perform online snapshot via Nova
+            context = snapshot['context']
+
+            backing_filename = self.get_active_image_from_info(
+                snapshot['volume'])
+            path_to_disk = self._local_path_volume(snapshot['volume'])
+            new_snap_path = '%s.%s' % (
+                self._local_path_volume(snapshot['volume']),
+                snapshot['id'])
+
+            self._create_qcow2_snap_file(snapshot,
+                                         backing_filename,
+                                         new_snap_path)
+
+            connection_info = {
+                'type': 'qcow2',
+                'new_file': os.path.basename(new_snap_path),
+                'snapshot_id': snapshot['id']
+            }
+
+            try:
+                result = self._nova.create_volume_snapshot(
+                    context,
+                    snapshot['volume_id'],
+                    connection_info)
+                LOG.debug(_('nova call result: %s') % result)
+            except Exception as e:
+                LOG.error(_('Call to Nova to create snapshot failed'))
+                LOG.exception(e)
+                raise e
+
+            # Loop and wait for result
+            # Nova will call Cinderclient to update the status in the database
+            # An update of progress = '90%' means that Nova is done
+            seconds_elapsed = 0
+            increment = 1
+            timeout = 600
+            while True:
+                s = db.snapshot_get(context, snapshot['id'])
+
+                if s['status'] == 'creating':
+                    if s['progress'] == '90%':
+                        # Nova tasks completed successfully
+                        break
+
+                    time.sleep(increment)
+                    seconds_elapsed += increment
+                elif s['status'] == 'error':
+
+                    msg = _('Nova returned "error" status '
+                            'while creating snapshot.')
+                    raise exception.GlusterfsException(msg)
+
+                LOG.debug(_('Status of snapshot %(id)s is now %(status)s') % {
+                    'id': snapshot['id'],
+                    'status': s['status']
+                })
+
+                if 10 < seconds_elapsed <= 20:
+                    increment = 2
+                elif 20 < seconds_elapsed <= 60:
+                    increment = 5
+                elif 60 < seconds_elapsed:
+                    increment = 10
+
+                if seconds_elapsed > timeout:
+                    msg = _('Timed out while waiting for Nova update '
+                            'for creation of snapshot %s.') % snapshot['id']
+                    raise exception.GlusterfsException(msg)
+
+            info_path = self._local_path_volume(snapshot['volume']) + '.info'
+            snap_info = self._read_info_file(info_path, empty_if_missing=True)
+            snap_info['active'] = os.path.basename(new_snap_path)
+            snap_info[snapshot['id']] = os.path.basename(new_snap_path)
+            self._write_info_file(info_path, snap_info)
+
+            return
+
         LOG.debug(_('create snapshot: %s') % snapshot)
         LOG.debug(_('volume id: %s') % snapshot['volume_id'])
 
@@ -384,8 +466,8 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
         snap_info = self._read_info_file(info_path,
                                          empty_if_missing=True)
 
-        snap_info[snapshot['id']] = os.path.basename(new_snap_path)
         snap_info['active'] = os.path.basename(new_snap_path)
+        snap_info[snapshot['id']] = os.path.basename(new_snap_path)
         self._write_info_file(info_path, snap_info)
 
     def _read_file(self, filename):
@@ -423,12 +505,17 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
 
         If volume status is 'available', delete snapshot here in Cinder
         using qemu-img.
+
+        If volume status is 'in-use', calculate what qcow2 files need to
+        merge, and call to Nova to perform this operation.
+
         """
 
         LOG.debug(_('deleting snapshot %s') % snapshot['id'])
 
-        if snapshot['volume']['status'] != 'available':
-            msg = _("Volume status must be 'available'.")
+        volume_status = snapshot['volume']['status']
+        if volume_status not in ['available', 'in-use']:
+            msg = _('Volume status must be "available" or "in-use".')
             raise exception.InvalidVolume(msg)
 
         # Determine the true snapshot file for this snapshot
@@ -446,19 +533,47 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
             msg = _('Snapshot file at %s does not exist.') % snapshot_path
             raise exception.InvalidSnapshot(msg)
 
-        base_file = self._get_backing_file_for_path(snapshot_path)
-
         vol_path = self._local_volume_dir(snapshot['volume'])
-        base_file_fmt = self._get_file_format_for_path('%s/%s' %
-                                                       (vol_path, base_file))
-        if base_file_fmt not in ['qcow2', 'raw']:
-            msg = _("Invalid snapshot backing file format: %s") % base_file_fmt
-            raise exception.InvalidSnapshot(msg)
 
         # Find what file has this as its backing file
         active_file = self.get_active_image_from_info(snapshot['volume'])
         active_file_path = '%s/%s' % (vol_path, active_file)
 
+        if volume_status == 'in-use':
+            # Online delete
+            context = snapshot['context']
+
+            base_file = self._get_backing_file_for_path(snapshot_path)
+            if base_file is None:
+                # There should always be at least the original volume
+                # file as base.
+                msg = _('No base file found for %s.') % snapshot_path
+                raise exception.GlusterfsException(msg)
+            base_id = None
+            info_path = self._local_path_volume(snapshot['volume']) + '.info'
+            snap_info = self._read_info_file(info_path)
+            for key, value in snap_info.iteritems():
+                if value == base_file and key != 'active':
+                    base_id = key
+                    break
+            if base_id is None:
+                # This means we are deleting the oldest snapshot
+                msg = _('No %(base_id)s found for %(file)s') % {
+                    'base_id': 'base_id',
+                    'file': snapshot_file}
+                LOG.debug(msg)
+
+            online_delete_info = {
+                'active_file': active_file,
+                'snapshot_file': snapshot_file,
+                'base_file': base_file,
+                'base_id': base_id
+            }
+
+            return self._delete_snapshot_online(context,
+                                                snapshot,
+                                                online_delete_info)
+
         if snapshot_file == active_file:
             # Need to merge snapshot_file into its backing file
             # There is no top file
@@ -476,11 +591,11 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
             # Remove snapshot_file from info
             info_path = self._local_path_volume(snapshot['volume']) + '.info'
             snap_info = self._read_info_file(info_path)
+
             del(snap_info[snapshot['id']])
             # Active file has changed
             snap_info['active'] = base_file
             self._write_info_file(info_path, snap_info)
-
         else:
             #    T0         |      T1        |     T2         |       T3
             #    base       |  snapshot_file |  higher_file   |  highest_file
@@ -489,7 +604,6 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
             #  used here)   |                | committed down)|     if so)
 
             backing_chain = self._get_backing_chain_for_path(active_file_path)
-
             # This file is guaranteed to exist since we aren't operating on
             # the active file.
             higher_file = next((os.path.basename(f['filename'])
@@ -541,7 +655,6 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
             # Remove snapshot_file from info
             info_path = self._local_path_volume(snapshot['volume']) + '.info'
             snap_info = self._read_info_file(info_path)
-
             del(snap_info[snapshot['id']])
             snap_info[higher_id] = snapshot_file
             if higher_file == active_file:
@@ -553,6 +666,93 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
                 snap_info['active'] = snapshot_file
             self._write_info_file(info_path, snap_info)
 
+    def _delete_snapshot_online(self, context, snapshot, info):
+        # Update info over the course of this method
+        # active file never changes
+        info_path = self._local_path_volume(snapshot['volume']) + '.info'
+        snap_info = self._read_info_file(info_path)
+
+        if info['active_file'] == info['snapshot_file']:
+            # blockRebase/Pull base into active
+            # info['base'] => snapshot_file
+
+            file_to_delete = info['base_file']
+
+            delete_info = {'file_to_merge': info['base_file'],
+                           'merge_target_file': None,  # current
+                           'type': 'qcow2',
+                           'volume_id': snapshot['volume']['id']}
+
+            del(snap_info[snapshot['id']])
+        else:
+            # blockCommit snapshot into base
+            # info['base'] <= snapshot_file
+            # delete record of snapshot
+            file_to_delete = info['snapshot_file']
+
+            delete_info = {'file_to_merge': info['snapshot_file'],
+                           'merge_target_file': info['base_file'],
+                           'type': 'qcow2',
+                           'volume_id': snapshot['volume']['id']}
+
+            del(snap_info[snapshot['id']])
+
+        try:
+            self._nova.delete_volume_snapshot(
+                context,
+                snapshot['id'],
+                delete_info)
+        except Exception as e:
+            LOG.error(_('Call to Nova delete snapshot failed'))
+            LOG.exception(e)
+            raise e
+
+        # Loop and wait for result
+        # Nova will call Cinderclient to update the status in the database
+        # An update of progress = '90%' means that Nova is done
+        seconds_elapsed = 0
+        increment = 1
+        timeout = 600
+        while True:
+            s = db.snapshot_get(context, snapshot['id'])
+
+            if s['status'] == 'deleting':
+                if s['progress'] == '90%':
+                    # Nova tasks completed successfully
+                    break
+                else:
+                    msg = _('status of snapshot %s is '
+                            'still "deleting"... waiting') % snapshot['id']
+                    LOG.debug(msg)
+                    time.sleep(increment)
+                    seconds_elapsed += increment
+            else:
+                msg = _('Unable to delete snapshot %(id)s, '
+                        'status: %(status)s.') % {'id': snapshot['id'],
+                                                  'status': s['status']}
+                raise exception.GlusterfsException(msg)
+
+            if 10 < seconds_elapsed <= 20:
+                increment = 2
+            elif 20 < seconds_elapsed <= 60:
+                increment = 5
+            elif 60 < seconds_elapsed:
+                increment = 10
+
+            if seconds_elapsed > timeout:
+                msg = _('Timed out while waiting for Nova update '
+                        'for deletion of snapshot %(id)s.') %\
+                    {'id': snapshot['id']}
+                raise exception.GlusterfsException(msg)
+
+        # Write info file updated above
+        self._write_info_file(info_path, snap_info)
+
+        # Delete stale file
+        path_to_delete = os.path.join(
+            self._local_volume_dir(snapshot['volume']), file_to_delete)
+        self._execute('rm', '-f', path_to_delete, run_as_root=True)
+
     def _get_backing_file(self, output):
         for line in output.split('\n'):
             backing_file = None
index b93fd05c9f1e29f487a4083ba6cb87c0c75ca275..6bf6e796a41903a0e2d98675d8cad502caddb317 100644 (file)
@@ -299,6 +299,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     def create_snapshot(self, context, volume_id, snapshot_id):
         """Creates and exports the snapshot."""
+        caller_context = context
         context = context.elevated()
         snapshot_ref = self.db.snapshot_get(context, snapshot_id)
         LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
@@ -308,6 +309,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         try:
             LOG.debug(_("snapshot %(snap_id)s: creating"),
                       {'snap_id': snapshot_ref['id']})
+
+            # Pass context so that drivers that want to use it, can,
+            # but it is not a requirement for all drivers.
+            snapshot_ref['context'] = caller_context
+
             model_update = self.driver.create_snapshot(snapshot_ref)
             if model_update:
                 self.db.snapshot_update(context, snapshot_ref['id'],
@@ -341,6 +347,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     def delete_snapshot(self, context, snapshot_id):
         """Deletes and unexports snapshot."""
+        caller_context = context
         context = context.elevated()
         snapshot_ref = self.db.snapshot_get(context, snapshot_id)
         project_id = snapshot_ref['project_id']
@@ -350,6 +357,11 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         try:
             LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
+
+            # Pass context so that drivers that want to use it, can,
+            # but it is not a requirement for all drivers.
+            snapshot_ref['context'] = caller_context
+
             self.driver.delete_snapshot(snapshot_ref)
         except exception.SnapshotIsBusy:
             LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
index 2f10697145854a0e9ef0b2bb2a2cc7602c99cd04..c4d58806100f87ee5d31ba5b890d17a74b75807c 100644 (file)
 # <service_type>:<service_name>:<endpoint_type> (string value)
 #nova_catalog_info=compute:nova:publicURL
 
+# Same as nova_catalog_info, but for admin endpoint. (string
+# value)
+#nova_catalog_admin_info=compute:nova:adminURL
+
 # Override service catalog lookup with template for nova
 # endpoint e.g. http://localhost:8774/v2/%(tenant_id)s (string
 # value)
 #nova_endpoint_template=<None>
 
+# Same as nova_endpoint_template, but for admin endpoint.
+# (string value)
+#nova_endpoint_admin_template=<None>
+
 # region name of this node (string value)
 #os_region_name=<None>
 
 #volume_dd_blocksize=1M
 
 
-# Total option count: 362
+# Total option count: 364