]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Snapshot support for XenAPINFS
authorMate Lakat <mate.lakat@citrix.com>
Mon, 31 Dec 2012 15:52:18 +0000 (15:52 +0000)
committerMate Lakat <mate.lakat@citrix.com>
Wed, 16 Jan 2013 12:31:03 +0000 (12:31 +0000)
Related to blueprint xenapinfs-snapshots

Add support for snapshots, by implementing it with deep copies. Each
snapshot is a copy of the volume. Snapshot object has a new,
provider_location key, to store volume parameters.

Change-Id: Iea06246319ce82d5fcfe417419156ec83ef0a895

cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/tests/test_migrations.py
cinder/tests/test_xenapi_sm.py
cinder/volume/drivers/xenapi/lib.py
cinder/volume/drivers/xenapi/sm.py

diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py
new file mode 100644 (file)
index 0000000..3c2dae9
--- /dev/null
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from sqlalchemy import Column
+from sqlalchemy import MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    snapshots = Table('snapshots', meta, autoload=True)
+    provider_location = Column('provider_location', String(255))
+    snapshots.create_column(provider_location)
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    snapshots = Table('snapshots', meta, autoload=True)
+    provider_location = snapshots.columns.provider_location
+    provider_location.drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py
new file mode 100644 (file)
index 0000000..90cd67c
--- /dev/null
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from sqlalchemy import MetaData, Table
+from migrate.changeset.constraint import ForeignKeyConstraint
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    snapshots = Table('snapshots', meta, autoload=True)
+    volumes = Table('volumes', meta, autoload=True)
+
+    ForeignKeyConstraint(
+        columns=[snapshots.c.volume_id],
+        refcolumns=[volumes.c.id]).create()
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    snapshots = Table('snapshots', meta, autoload=True)
+    volumes = Table('volumes', meta, autoload=True)
+
+    ForeignKeyConstraint(
+        columns=[snapshots.c.volume_id],
+        refcolumns=[volumes.c.id]).drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql
new file mode 100644 (file)
index 0000000..d2fe9b6
--- /dev/null
@@ -0,0 +1,32 @@
+-- As sqlite does not support the DROP FOREIGN KEY, we need to create
+-- the table, and move all the data to it.
+
+BEGIN TRANSACTION;
+
+CREATE TABLE snapshots_v6 (
+    created_at DATETIME,
+    updated_at DATETIME,
+    deleted_at DATETIME,
+    deleted BOOLEAN,
+    id VARCHAR(36) NOT NULL,
+    volume_id VARCHAR(36) NOT NULL,
+    user_id VARCHAR(255),
+    project_id VARCHAR(255),
+    status VARCHAR(255),
+    progress VARCHAR(255),
+    volume_size INTEGER,
+    scheduled_at DATETIME,
+    display_name VARCHAR(255),
+    display_description VARCHAR(255),
+    provider_location VARCHAR(255),
+    PRIMARY KEY (id),
+    CHECK (deleted IN (0, 1))
+);
+
+INSERT INTO snapshots_v6 SELECT * FROM snapshots;
+
+DROP TABLE snapshots;
+
+ALTER TABLE snapshots_v6 RENAME TO snapshots;
+
+COMMIT;
index 9389a405428e09604220d3764038512b5cc3f9d9..b37cbd8bbca5ae3a179d896b11f803a4834088d4 100644 (file)
@@ -316,6 +316,14 @@ class Snapshot(BASE, CinderBase):
     display_name = Column(String(255))
     display_description = Column(String(255))
 
+    provider_location = Column(String(255))
+
+    volume = relationship(Volume, backref="snapshots",
+                          foreign_keys=volume_id,
+                          primaryjoin='and_('
+                          'Snapshot.volume_id == Volume.id,'
+                          'Snapshot.deleted == False)')
+
 
 class IscsiTarget(BASE, CinderBase):
     """Represents an iscsi target for a given host."""
index b669fb5c46e7cf2ba1ef611ac7fb6ef4f0a608cf..bc23fca7074e58ec09ad7868cca3854f9101b892 100644 (file)
@@ -350,3 +350,53 @@ class TestMigrations(test.TestCase):
                                        autoload=True)
             self.assertTrue(isinstance(volumes.c.source_volid.type,
                                        sqlalchemy.types.VARCHAR))
+
+    def _metadatas(self, upgrade_to, downgrade_to=None):
+        for (key, engine) in self.engines.items():
+            migration_api.version_control(engine,
+                                          TestMigrations.REPOSITORY,
+                                          migration.INIT_VERSION)
+            migration_api.upgrade(engine,
+                                  TestMigrations.REPOSITORY,
+                                  upgrade_to)
+
+            if downgrade_to is not None:
+                migration_api.downgrade(
+                    engine, TestMigrations.REPOSITORY, downgrade_to)
+
+            metadata = sqlalchemy.schema.MetaData()
+            metadata.bind = engine
+            yield metadata
+
+    def metadatas_upgraded_to(self, revision):
+        return self._metadatas(revision)
+
+    def metadatas_downgraded_from(self, revision):
+        return self._metadatas(revision, revision - 1)
+
+    def test_upgrade_006_adds_provider_location(self):
+        for metadata in self.metadatas_upgraded_to(6):
+            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
+            self.assertTrue(isinstance(snapshots.c.provider_location.type,
+                                       sqlalchemy.types.VARCHAR))
+
+    def test_downgrade_006_removes_provider_location(self):
+        for metadata in self.metadatas_downgraded_from(6):
+            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
+
+            self.assertTrue('provider_location' not in snapshots.c)
+
+    def test_upgrade_007_adds_fk(self):
+        for metadata in self.metadatas_upgraded_to(7):
+            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
+            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
+
+            fkey, = snapshots.c.volume_id.foreign_keys
+
+            self.assertEquals(volumes.c.id, fkey.column)
+
+    def test_downgrade_007_removes_fk(self):
+        for metadata in self.metadatas_downgraded_from(7):
+            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
+
+            self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys))
index b4cb11e122c03030ca45162e40237e5076620d28..bc81b76f9946cc3c7b4918e5cd03419dd00eab02 100644 (file)
@@ -16,6 +16,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from cinder.db import api as db_api
 from cinder.volume.drivers.xenapi import lib
 from cinder.volume.drivers.xenapi import sm as driver
 import mox
@@ -182,3 +183,72 @@ class DriverTestCase(unittest.TestCase):
             ),
             result
         )
+
+    def _setup_for_snapshots(self, server, serverpath):
+        mock = mox.Mox()
+
+        drv = driver.XenAPINFSDriver()
+        ops = mock.CreateMock(lib.NFSBasedVolumeOperations)
+        db = mock.CreateMock(db_api)
+        drv.nfs_ops = ops
+        drv.db = db
+
+        mock.StubOutWithMock(driver, 'FLAGS')
+        driver.FLAGS.xenapi_nfs_server = server
+        driver.FLAGS.xenapi_nfs_serverpath = serverpath
+
+        return mock, drv
+
+    def test_create_snapshot(self):
+        mock, drv = self._setup_for_snapshots('server', 'serverpath')
+
+        snapshot = dict(
+            volume_id="volume-id",
+            display_name="snapshot-name",
+            display_description="snapshot-desc",
+            volume=dict(provider_location="sr-uuid/vdi-uuid"))
+
+        drv.nfs_ops.copy_volume(
+            "server", "serverpath", "sr-uuid", "vdi-uuid",
+            "snapshot-name", "snapshot-desc"
+        ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi"))
+
+        mock.ReplayAll()
+        result = drv.create_snapshot(snapshot)
+        mock.VerifyAll()
+        self.assertEquals(
+            dict(provider_location="copied-sr/copied-vdi"),
+            result)
+
+    def test_create_volume_from_snapshot(self):
+        mock, drv = self._setup_for_snapshots('server', 'serverpath')
+
+        snapshot = dict(
+            provider_location='src-sr-uuid/src-vdi-uuid')
+        volume = dict(
+            display_name='tgt-name', name_description='tgt-desc')
+
+        drv.nfs_ops.copy_volume(
+            "server", "serverpath", "src-sr-uuid", "src-vdi-uuid",
+            "tgt-name", "tgt-desc"
+        ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi"))
+
+        mock.ReplayAll()
+        result = drv.create_volume_from_snapshot(volume, snapshot)
+        mock.VerifyAll()
+
+        self.assertEquals(
+            dict(provider_location='copied-sr/copied-vdi'), result)
+
+    def test_delete_snapshot(self):
+        mock, drv = self._setup_for_snapshots('server', 'serverpath')
+
+        snapshot = dict(
+            provider_location='src-sr-uuid/src-vdi-uuid')
+
+        drv.nfs_ops.delete_volume(
+            "server", "serverpath", "src-sr-uuid", "src-vdi-uuid")
+
+        mock.ReplayAll()
+        drv.delete_snapshot(snapshot)
+        mock.VerifyAll()
index 2258a4b092b5208b4b34873bb3e8776485688cae..dc17682e6cc38049670ad2c8354066023e9e9c9d 100644 (file)
@@ -135,6 +135,9 @@ class VdiOperations(OperationsBase):
     def destroy(self, vdi_ref):
         self.call_xenapi('VDI.destroy', vdi_ref)
 
+    def copy(self, vdi_ref, sr_ref):
+        return self.call_xenapi('VDI.copy', vdi_ref, sr_ref)
+
 
 class HostOperations(OperationsBase):
     def get_record(self, host_ref):
@@ -255,6 +258,9 @@ class NFSOperationsMixIn(CompoundOperations):
         vdi_ref = self.VDI.get_by_uuid(vdi_uuid)
         return dict(sr_ref=sr_ref, vdi_ref=vdi_ref)
 
+    def copy_vdi_to_sr(self, vdi_ref, sr_ref):
+        return self.VDI.copy(vdi_ref, sr_ref)
+
 
 class ContextAwareSession(XenAPISession):
     def __enter__(self):
@@ -326,3 +332,26 @@ class NFSBasedVolumeOperations(object):
             vdi_rec = session.VDI.get_record(vdi_ref)
             sr_ref = vdi_rec['SR']
             session.unplug_pbds_and_forget_sr(sr_ref)
+
+    def copy_volume(self, server, serverpath, sr_uuid, vdi_uuid,
+                    name=None, description=None):
+        with self._session_factory.get_session() as session:
+            src_refs = session.connect_volume(
+                server, serverpath, sr_uuid, vdi_uuid)
+            try:
+                host_ref = session.get_this_host()
+
+                with session.new_sr_on_nfs(host_ref, server, serverpath,
+                                           name, description) as target_sr_ref:
+                    target_vdi_ref = session.copy_vdi_to_sr(
+                        src_refs['vdi_ref'], target_sr_ref)
+
+                    dst_refs = dict(
+                        sr_uuid=session.SR.get_uuid(target_sr_ref),
+                        vdi_uuid=session.VDI.get_uuid(target_vdi_ref)
+                    )
+
+            finally:
+                session.unplug_pbds_and_forget_sr(src_refs['sr_ref'])
+
+            return dst_refs
index f1f7936752578d60c61e3a71abcd2b1dc6d4a38f..6c1709152f865804c5fd417ee41eb27127317b3a 100644 (file)
@@ -112,13 +112,31 @@ class XenAPINFSDriver(driver.VolumeDriver):
         """To override superclass' method"""
 
     def create_volume_from_snapshot(self, volume, snapshot):
-        raise NotImplementedError()
+        return self._copy_volume(
+            snapshot, volume['display_name'], volume['name_description'])
 
     def create_snapshot(self, snapshot):
-        raise NotImplementedError()
+        volume_id = snapshot['volume_id']
+        volume = snapshot['volume']
+        return self._copy_volume(
+            volume, snapshot['display_name'], snapshot['display_description'])
+
+    def _copy_volume(self, volume, target_name, target_desc):
+        sr_uuid, vdi_uuid = volume['provider_location'].split('/')
+
+        volume_details = self.nfs_ops.copy_volume(
+            FLAGS.xenapi_nfs_server,
+            FLAGS.xenapi_nfs_serverpath,
+            sr_uuid,
+            vdi_uuid,
+            target_name,
+            target_desc
+        )
+        location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details
+        return dict(provider_location=location)
 
     def delete_snapshot(self, snapshot):
-        raise NotImplementedError()
+        self.delete_volume(snapshot)
 
     def ensure_export(self, context, volume):
         pass