]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Non-disruptive backup
authorXing Yang <xing.yang@emc.com>
Mon, 22 Jun 2015 21:46:32 +0000 (17:46 -0400)
committerXing Yang <xing.yang@emc.com>
Wed, 22 Jul 2015 20:59:19 +0000 (16:59 -0400)
This patch adds support for non-disruptive backup for
volumes in 'in-use' status as follows:

Adds a force flag in create backup API when backing up
an 'in-use' volume.

For the default implementation in volume/driver.py:
* Create a temporary volume from the original volume
* Backup the temporary volume
* Clean up the temporary volume

For the LVM driver:
* Create a temporary snapshot
* Obtain local_path for the temporary snapshot
* Backup the temporary snapshot
* Cleanup the temporary snapshot

Attach snapshot will be implemented in another patch.

Partial-implements blueprint non-disruptive-backup
Change-Id: I915c279b526e7268d68ab18ce01200ae22deabdd

21 files changed:
cinder/api/contrib/backups.py
cinder/backup/api.py
cinder/backup/manager.py
cinder/backup/rpcapi.py
cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.py [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/objects/backup.py
cinder/objects/volume.py
cinder/tests/unit/api/contrib/test_backups.py
cinder/tests/unit/fake_driver.py
cinder/tests/unit/fake_volume.py
cinder/tests/unit/objects/test_backup.py
cinder/tests/unit/objects/test_volume.py
cinder/tests/unit/test_backup.py
cinder/tests/unit/test_db_api.py
cinder/tests/unit/test_migrations.py
cinder/tests/unit/test_volume.py
cinder/tests/unit/utils.py
cinder/volume/driver.py
cinder/volume/drivers/lvm.py

index d327fdb39bf59c61aa8bb18607db0616138f7a77..d04b2ae4dc7b8e1e081f0be92d80965e40dd451a 100644 (file)
@@ -255,7 +255,7 @@ class BackupsController(wsgi.Controller):
         name = backup.get('name', None)
         description = backup.get('description', None)
         incremental = backup.get('incremental', False)
-
+        force = backup.get('force', False)
         LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
                      " %(container)s"),
                  {'volume_id': volume_id, 'container': container},
@@ -264,7 +264,7 @@ class BackupsController(wsgi.Controller):
         try:
             new_backup = self.backup_api.create(context, name, description,
                                                 volume_id, container,
-                                                incremental)
+                                                incremental, None, force)
         except exception.InvalidVolume as error:
             raise exc.HTTPBadRequest(explanation=error.msg)
         except exception.VolumeNotFound as error:
index afb0fcc2f29b21a86010ff21725e4191fa4aa2c4..272fea2ef4c4bbde3affbd0e950060197feefc39 100644 (file)
@@ -139,17 +139,23 @@ class API(base.Base):
         return [srv['host'] for srv in services if not srv['disabled']]
 
     def create(self, context, name, description, volume_id,
-               container, incremental=False, availability_zone=None):
+               container, incremental=False, availability_zone=None,
+               force=False):
         """Make the RPC call to create a volume backup."""
         check_policy(context, 'create')
         volume = self.volume_api.get(context, volume_id)
 
-        if volume['status'] != "available":
+        if volume['status'] not in ["available", "in-use"]:
             msg = (_('Volume to be backed up must be available '
-                     'but the current status is "%s".')
+                     'or in-use, but the current status is "%s".')
                    % volume['status'])
             raise exception.InvalidVolume(reason=msg)
+        elif volume['status'] in ["in-use"] and not force:
+            msg = _('Backing up an in-use volume must use '
+                    'the force flag.')
+            raise exception.InvalidVolume(reason=msg)
 
+        previous_status = volume['status']
         volume_host = volume_utils.extract_host(volume['host'], 'host')
         if not self._is_backup_service_enabled(volume, volume_host):
             raise exception.ServiceNotFound(service_id='cinder-backup')
@@ -212,7 +218,9 @@ class API(base.Base):
                         'incremental backup.')
                 raise exception.InvalidBackup(reason=msg)
 
-        self.db.volume_update(context, volume_id, {'status': 'backing-up'})
+        self.db.volume_update(context, volume_id,
+                              {'status': 'backing-up',
+                               'previous_status': previous_status})
         try:
             kwargs = {
                 'user_id': context.user_id,
index dd7fb6dc58ede4e084fd70f16255ef692dc51deb..f0a90f7b29141e56bfcd99f99649669b9422e7c7 100644 (file)
@@ -74,7 +74,7 @@ QUOTAS = quota.QUOTAS
 class BackupManager(manager.SchedulerDependentManager):
     """Manages backup of block storage devices."""
 
-    RPC_API_VERSION = '1.0'
+    RPC_API_VERSION = '1.2'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -205,16 +205,27 @@ class BackupManager(manager.SchedulerDependentManager):
             backend = self._get_volume_backend(host=volume_host)
             attachments = volume['volume_attachment']
             if attachments:
-                if volume['status'] == 'backing-up':
-                    LOG.info(_LI('Resetting volume %s to available '
-                                 '(was backing-up).'), volume['id'])
+                if (volume['status'] == 'backing-up' and
+                        volume['previous_status'] == 'available'):
+                    LOG.info(_LI('Resetting volume %(vol_id)s to previous '
+                                 'status %(status)s (was backing-up).'),
+                             {'vol_id': volume['id'],
+                              'status': volume['previous_status']})
                     mgr = self._get_manager(backend)
                     for attachment in attachments:
                         if (attachment['attached_host'] == self.host and
                            attachment['instance_uuid'] is None):
                             mgr.detach_volume(ctxt, volume['id'],
                                               attachment['id'])
-                if volume['status'] == 'restoring-backup':
+                elif (volume['status'] == 'backing-up' and
+                        volume['previous_status'] == 'in-use'):
+                    LOG.info(_LI('Resetting volume %(vol_id)s to previous '
+                                 'status %(status)s (was backing-up).'),
+                             {'vol_id': volume['id'],
+                              'status': volume['previous_status']})
+                    self.db.volume_update(ctxt, volume['id'],
+                                          volume['previous_status'])
+                elif volume['status'] == 'restoring-backup':
                     LOG.info(_LI('setting volume %s to error_restoring '
                                  '(was restoring-backup).'), volume['id'])
                     mgr = self._get_manager(backend)
@@ -245,10 +256,42 @@ class BackupManager(manager.SchedulerDependentManager):
                 LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
                 self.delete_backup(ctxt, backup)
 
+        self._cleanup_temp_volumes_snapshots(backups)
+
+    def _cleanup_temp_volumes_snapshots(self, backups):
+        # NOTE(xyang): If the service crashes or gets restarted during the
+        # backup operation, there could be temporary volumes or snapshots
+        # that are not deleted. Make sure any temporary volumes or snapshots
+        # create by the backup job are deleted when service is started.
+        ctxt = context.get_admin_context()
+        for backup in backups:
+            volume = self.db.volume_get(ctxt, backup.volume_id)
+            volume_host = volume_utils.extract_host(volume['host'], 'backend')
+            backend = self._get_volume_backend(host=volume_host)
+            mgr = self._get_manager(backend)
+            if backup.temp_volume_id and backup.status == 'error':
+                temp_volume = self.db.volume_get(ctxt,
+                                                 backup.temp_volume_id)
+                # The temp volume should be deleted directly thru the
+                # the volume driver, not thru the volume manager.
+                mgr.driver.delete_volume(temp_volume)
+                self.db.volume_destroy(ctxt, temp_volume['id'])
+            if backup.temp_snapshot_id and backup.status == 'error':
+                temp_snapshot = objects.Snapshot.get_by_id(
+                    ctxt, backup.temp_snapshot_id)
+                # The temp snapshot should be deleted directly thru the
+                # volume driver, not thru the volume manager.
+                mgr.driver.delete_snapshot(temp_snapshot)
+                with temp_snapshot.obj_as_admin():
+                    self.db.volume_glance_metadata_delete_by_snapshot(
+                        ctxt, temp_snapshot.id)
+                    temp_snapshot.destroy()
+
     def create_backup(self, context, backup):
         """Create volume backups using configured backup service."""
         volume_id = backup.volume_id
         volume = self.db.volume_get(context, volume_id)
+        previous_status = volume.get('previous_status', None)
         LOG.info(_LI('Create backup started, backup: %(backup_id)s '
                      'volume: %(volume_id)s.'),
                  {'backup_id': backup.id, 'volume_id': volume_id})
@@ -297,10 +340,14 @@ class BackupManager(manager.SchedulerDependentManager):
         except Exception as err:
             with excutils.save_and_reraise_exception():
                 self.db.volume_update(context, volume_id,
-                                      {'status': 'available'})
+                                      {'status': previous_status,
+                                       'previous_status': 'error_backing-up'})
                 self._update_backup_error(backup, context, six.text_type(err))
 
-        self.db.volume_update(context, volume_id, {'status': 'available'})
+        # Restore the original status.
+        self.db.volume_update(context, volume_id,
+                              {'status': previous_status,
+                               'previous_status': 'backing-up'})
         backup.status = 'available'
         backup.size = volume['size']
         backup.availability_zone = self.az
index f6909cc12bfa1ceeabc520477cffe26729e44b30..a237119ec5eaa34cd014999d3dfc42e3b2c6622a 100644 (file)
@@ -37,7 +37,7 @@ class BackupAPI(object):
     API version history:
 
         1.0 - Initial version.
-        1.1 - Changed methods to accept backup objects instaed of IDs.
+        1.1 - Changed methods to accept backup objects instead of IDs.
     """
 
     BASE_RPC_API_VERSION = '1.0'
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.py b/cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.py
new file mode 100644 (file)
index 0000000..11ed0c5
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (c) 2015 EMC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Column, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    backups = Table('backups', meta, autoload=True)
+    temp_volume_id = Column('temp_volume_id', String(length=36))
+    temp_snapshot_id = Column('temp_snapshot_id', String(length=36))
+
+    backups.create_column(temp_volume_id)
+    backups.update().values(temp_volume_id=None).execute()
+
+    backups.create_column(temp_snapshot_id)
+    backups.update().values(temp_snapshot_id=None).execute()
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    backups = Table('backups', meta, autoload=True)
+    temp_volume_id = backups.columns.temp_volume_id
+    temp_snapshot_id = backups.columns.temp_snapshot_id
+
+    backups.drop_column(temp_volume_id)
+    backups.drop_column(temp_snapshot_id)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py
new file mode 100644 (file)
index 0000000..ddf1d16
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (c) 2015 EMC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Column, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    volumes = Table('volumes', meta, autoload=True)
+    previous_status = Column('previous_status', String(length=255))
+
+    volumes.create_column(previous_status)
+    volumes.update().values(previous_status=None).execute()
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    volumes = Table('volumes', meta, autoload=True)
+    previous_status = volumes.columns.previous_status
+
+    volumes.drop_column(previous_status)
index ac42008c5fec9d722b7d55a0b9cacf0b02000f00..0863b834816f6aad01cb69ce6e91aa10ba61a6b5 100644 (file)
@@ -164,6 +164,8 @@ class Volume(BASE, CinderBase):
     replication_extended_status = Column(String(255))
     replication_driver_data = Column(String(255))
 
+    previous_status = Column(String(255))
+
     consistencygroup = relationship(
         ConsistencyGroup,
         backref="volumes",
@@ -526,6 +528,8 @@ class Backup(BASE, CinderBase):
     service = Column(String(255))
     size = Column(Integer)
     object_count = Column(Integer)
+    temp_volume_id = Column(String(36))
+    temp_snapshot_id = Column(String(36))
 
     @validates('fail_reason')
     def validate_fail_reason(self, key, fail_reason):
index 3fdc0b04b824784883e4923719ae59ae07a81966..b6f17674dd293f8b8a465b8873eff96e5d19df3c 100644 (file)
@@ -56,6 +56,9 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
         'service': fields.StringField(nullable=True),
 
         'object_count': fields.IntegerField(),
+
+        'temp_volume_id': fields.StringField(nullable=True),
+        'temp_snapshot_id': fields.StringField(nullable=True),
     }
 
     obj_extra_fields = ['name']
index 1dab3a234913cce5dbe7c8c2cafdf5cfe5922eda..69920c0b7a9cb183d7ef08b1ff6b93bb14f509a4 100644 (file)
@@ -74,6 +74,8 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
         'replication_status': fields.StringField(nullable=True),
         'replication_extended_status': fields.StringField(nullable=True),
         'replication_driver_data': fields.StringField(nullable=True),
+
+        'previous_status': fields.StringField(nullable=True),
     }
 
     # NOTE(thangp): obj_extra_fields is used to hold properties that are not
index f410997ecf1e6f03c7e7396b3f6290a23b2f4ca5..10caf3555cf1468bbf9dc4612232e65e2a53c522 100644 (file)
@@ -406,6 +406,69 @@ class BackupsAPITestCase(test.TestCase):
 
         db.volume_destroy(context.get_admin_context(), volume_id)
 
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_create_backup_inuse_no_force(self,
+                                          _mock_service_get_all_by_topic):
+        _mock_service_get_all_by_topic.return_value = [
+            {'availability_zone': "fake_az", 'host': 'test_host',
+             'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
+        volume_id = utils.create_volume(self.context, size=5,
+                                        status='in-use')['id']
+
+        body = {"backup": {"display_name": "nightly001",
+                           "display_description":
+                           "Nightly Backup 03-Sep-2012",
+                           "volume_id": volume_id,
+                           "container": "nightlybackups",
+                           }
+                }
+        req = webob.Request.blank('/v2/fake/backups')
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(400, res.status_int)
+        self.assertEqual(400, res_dict['badRequest']['code'])
+        self.assertIsNotNone(res_dict['badRequest']['message'])
+
+        db.volume_destroy(context.get_admin_context(), volume_id)
+
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_create_backup_inuse_force(self, _mock_service_get_all_by_topic):
+        _mock_service_get_all_by_topic.return_value = [
+            {'availability_zone': "fake_az", 'host': 'test_host',
+             'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
+        volume_id = utils.create_volume(self.context, size=5,
+                                        status='in-use')['id']
+        backup_id = self._create_backup(volume_id, status="available")
+        body = {"backup": {"display_name": "nightly001",
+                           "display_description":
+                           "Nightly Backup 03-Sep-2012",
+                           "volume_id": volume_id,
+                           "container": "nightlybackups",
+                           "force": True,
+                           }
+                }
+        req = webob.Request.blank('/v2/fake/backups')
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(202, res.status_int)
+        self.assertIn('id', res_dict['backup'])
+        self.assertTrue(_mock_service_get_all_by_topic.called)
+
+        db.backup_destroy(context.get_admin_context(), backup_id)
+        db.volume_destroy(context.get_admin_context(), volume_id)
+
     @mock.patch('cinder.db.service_get_all_by_topic')
     def test_create_backup_snapshot_json(self, _mock_service_get_all_by_topic):
         _mock_service_get_all_by_topic.return_value = [
@@ -600,27 +663,6 @@ class BackupsAPITestCase(test.TestCase):
         self.assertEqual(res.status_int, 400)
         self.assertEqual(res_dict['badRequest']['code'], 400)
 
-    def test_create_backup_with_InvalidVolume2(self):
-        # need to create the volume referenced below first
-        volume_id = utils.create_volume(self.context, size=5,
-                                        status='in-use')['id']
-        body = {"backup": {"display_name": "nightly001",
-                           "display_description":
-                           "Nightly Backup 03-Sep-2012",
-                           "volume_id": volume_id,
-                           "container": "nightlybackups",
-                           }
-                }
-        req = webob.Request.blank('/v2/fake/backups')
-        req.method = 'POST'
-        req.headers['Content-Type'] = 'application/json'
-        req.body = json.dumps(body)
-        res = req.get_response(fakes.wsgi_app())
-        res_dict = json.loads(res.body)
-
-        self.assertEqual(res.status_int, 400)
-        self.assertEqual(res_dict['badRequest']['code'], 400)
-
     @mock.patch('cinder.db.service_get_all_by_topic')
     def test_create_backup_WithOUT_enabled_backup_service(
             self,
index d68bbb088285f22e18847e82bec8ce724aa2f54f..286c4689843539190116e1ac4e7987d608e12d91 100644 (file)
@@ -143,6 +143,9 @@ class LoggingVolumeDriver(driver.VolumeDriver):
     def terminate_connection(self, volume, connector):
         self.log_action('terminate_connection', volume)
 
+    def create_cloned_volume(self, volume, src_vol):
+        self.log_action('create_cloned_volume', volume)
+
     _LOGS = []
 
     @staticmethod
index 46af555095e3aaf2c23d457629ae34d3fd546e53..57b8b736fbfc44499489c6807ceb7b17f7190c58 100644 (file)
@@ -25,6 +25,7 @@ def fake_db_volume(**updates):
         'availability_zone': 'fake_availability_zone',
         'status': 'available',
         'attach_status': 'detached',
+        'previous_status': None
     }
 
     for name, field in objects.Volume.fields.items():
index 3db5aa0da5f9949087a5a7a07af29620bd02691a..cbc8fd6267b2f4d7eea00f51c15917a6f1bb276a 100644 (file)
@@ -29,6 +29,8 @@ fake_backup = {
     'display_description': 'fake_description',
     'user_id': 'fake_user',
     'project_id': 'fake_project',
+    'temp_volume_id': None,
+    'temp_snapshot_id': None,
 }
 
 
@@ -75,6 +77,13 @@ class TestBackup(test_objects.BaseObjectsTestCase):
         admin_context = backup_destroy.call_args[0][0]
         self.assertTrue(admin_context.is_admin)
 
+    def test_obj_field_temp_volume_snapshot_id(self):
+        backup = objects.Backup(context=self.context,
+                                temp_volume_id='2',
+                                temp_snapshot_id='3')
+        self.assertEqual('2', backup.temp_volume_id)
+        self.assertEqual('3', backup.temp_snapshot_id)
+
 
 class TestBackupList(test_objects.BaseObjectsTestCase):
     @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
index a2f871293f9d250c640dc1ab7f729dd181414843..8f79b3855492990888cedb216376bb862e6bda17 100644 (file)
@@ -64,6 +64,11 @@ class TestVolume(test_objects.BaseObjectsTestCase):
         self.assertEqual('volume-2', volume.name)
         self.assertEqual('2', volume.name_id)
 
+    def test_obj_field_previous_status(self):
+        volume = objects.Volume(context=self.context,
+                                previous_status='backing-up')
+        self.assertEqual('backing-up', volume.previous_status)
+
 
 class TestVolumeList(test_objects.BaseObjectsTestCase):
     @mock.patch('cinder.db.volume_get_all')
index d49a6a075bd5af68491ac150903e8f8e1f89d7af..836d5dd56aa97c6ea956b56f5b95da3fc3179b85 100644 (file)
@@ -31,6 +31,7 @@ from cinder import exception
 from cinder import objects
 from cinder import test
 from cinder.tests.unit.backup import fake_service_with_verify as fake_service
+from cinder.volume.drivers import lvm
 
 
 CONF = cfg.CONF
@@ -57,7 +58,9 @@ class BaseBackupTest(test.TestCase):
                                 size=1,
                                 object_count=0,
                                 project_id='fake',
-                                service=None):
+                                service=None,
+                                temp_volume_id=None,
+                                temp_snapshot_id=None):
         """Create a backup entry in the DB.
 
         Return the entry ID
@@ -78,6 +81,8 @@ class BaseBackupTest(test.TestCase):
         kwargs['parent_id'] = None
         kwargs['size'] = size
         kwargs['object_count'] = object_count
+        kwargs['temp_volume_id'] = temp_volume_id
+        kwargs['temp_snapshot_id'] = temp_snapshot_id
         backup = objects.Backup(context=self.ctxt, **kwargs)
         backup.create()
         return backup
@@ -85,6 +90,7 @@ class BaseBackupTest(test.TestCase):
     def _create_volume_db_entry(self, display_name='test_volume',
                                 display_description='this is a test volume',
                                 status='backing-up',
+                                previous_status='available',
                                 size=1):
         """Create a volume entry in the DB.
 
@@ -99,8 +105,36 @@ class BaseBackupTest(test.TestCase):
         vol['display_name'] = display_name
         vol['display_description'] = display_description
         vol['attach_status'] = 'detached'
+        vol['availability_zone'] = '1'
+        vol['previous_status'] = previous_status
         return db.volume_create(self.ctxt, vol)['id']
 
+    def _create_snapshot_db_entry(self, display_name='test_snapshot',
+                                  display_description='test snapshot',
+                                  status='available',
+                                  size=1,
+                                  volume_id='1',
+                                  provider_location=None):
+        """Create a snapshot entry in the DB.
+
+        Return the entry ID.
+        """
+        kwargs = {}
+        kwargs['size'] = size
+        kwargs['host'] = 'testhost'
+        kwargs['user_id'] = 'fake'
+        kwargs['project_id'] = 'fake'
+        kwargs['status'] = status
+        kwargs['display_name'] = display_name
+        kwargs['display_description'] = display_description
+        kwargs['volume_id'] = volume_id
+        kwargs['cgsnapshot_id'] = None
+        kwargs['volume_size'] = size
+        kwargs['provider_location'] = provider_location
+        snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
+        snapshot_obj.create()
+        return snapshot_obj
+
     def _create_volume_attach(self, volume_id):
         values = {'volume_id': volume_id,
                   'attach_status': 'attached', }
@@ -139,7 +173,9 @@ class BaseBackupTest(test.TestCase):
 class BackupTestCase(BaseBackupTest):
     """Test Case for backups."""
 
-    def test_init_host(self):
+    @mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
+    @mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
+    def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
         """Make sure stuck volumes and backups are reset to correct
         states when backup_manager.init_host() is called
         """
@@ -149,9 +185,29 @@ class BackupTestCase(BaseBackupTest):
         vol2_id = self._create_volume_db_entry()
         self._create_volume_attach(vol2_id)
         db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
-        backup1 = self._create_backup_db_entry(status='creating')
-        backup2 = self._create_backup_db_entry(status='restoring')
-        backup3 = self._create_backup_db_entry(status='deleting')
+        vol3_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
+        vol4_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
+        temp_vol_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
+        vol5_id = self._create_volume_db_entry()
+        db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
+        temp_snap = self._create_snapshot_db_entry()
+        temp_snap.status = 'available'
+        temp_snap.save()
+        backup1 = self._create_backup_db_entry(status='creating',
+                                               volume_id=vol1_id)
+        backup2 = self._create_backup_db_entry(status='restoring',
+                                               volume_id=vol2_id)
+        backup3 = self._create_backup_db_entry(status='deleting',
+                                               volume_id=vol3_id)
+        self._create_backup_db_entry(status='creating',
+                                     volume_id=vol4_id,
+                                     temp_volume_id=temp_vol_id)
+        self._create_backup_db_entry(status='creating',
+                                     volume_id=vol5_id,
+                                     temp_snapshot_id=temp_snap.id)
 
         self.backup_mgr.init_host()
         vol1 = db.volume_get(self.ctxt, vol1_id)
@@ -168,11 +224,12 @@ class BackupTestCase(BaseBackupTest):
                           self.ctxt,
                           backup3.id)
 
+        self.assertTrue(mock_delete_volume.called)
+        self.assertTrue(mock_delete_snapshot.called)
+
     def test_create_backup_with_bad_volume_status(self):
-        """Test error handling when creating a backup from a volume
-        with a bad status
-        """
-        vol_id = self._create_volume_db_entry(status='available', size=1)
+        """Test creating a backup from a volume with a bad status."""
+        vol_id = self._create_volume_db_entry(status='restoring', size=1)
         backup = self._create_backup_db_entry(volume_id=vol_id)
         self.assertRaises(exception.InvalidVolume,
                           self.backup_mgr.create_backup,
@@ -180,9 +237,7 @@ class BackupTestCase(BaseBackupTest):
                           backup)
 
     def test_create_backup_with_bad_backup_status(self):
-        """Test error handling when creating a backup with a backup
-        with a bad status
-        """
+        """Test creating a backup with a backup with a bad status."""
         vol_id = self._create_volume_db_entry(size=1)
         backup = self._create_backup_db_entry(status='available',
                                               volume_id=vol_id)
@@ -203,9 +258,10 @@ class BackupTestCase(BaseBackupTest):
                           self.ctxt,
                           backup)
         vol = db.volume_get(self.ctxt, vol_id)
-        self.assertEqual(vol['status'], 'available')
+        self.assertEqual('available', vol['status'])
+        self.assertEqual('error_backing-up', vol['previous_status'])
         backup = db.backup_get(self.ctxt, backup.id)
-        self.assertEqual(backup['status'], 'error')
+        self.assertEqual('error', backup['status'])
         self.assertTrue(_mock_volume_backup.called)
 
     @mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
@@ -217,9 +273,10 @@ class BackupTestCase(BaseBackupTest):
 
         self.backup_mgr.create_backup(self.ctxt, backup)
         vol = db.volume_get(self.ctxt, vol_id)
-        self.assertEqual(vol['status'], 'available')
+        self.assertEqual('available', vol['status'])
+        self.assertEqual('backing-up', vol['previous_status'])
         backup = db.backup_get(self.ctxt, backup.id)
-        self.assertEqual(backup['status'], 'available')
+        self.assertEqual('available', backup['status'])
         self.assertEqual(backup['size'], vol_size)
         self.assertTrue(_mock_volume_backup.called)
 
index b8abe0cc139ee889d11196e42747e98665b3979f..38e24912c7b71c83631a4c4faaeb4355cd97b83f 100644 (file)
@@ -19,6 +19,7 @@ import datetime
 import enum
 from oslo_config import cfg
 from oslo_utils import uuidutils
+import six
 
 from cinder.api import common
 from cinder import context
@@ -82,7 +83,8 @@ class ModelsObjectComparatorMixin(object):
 
         self.assertEqual(
             len(obj1), len(obj2),
-            "Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
+            "Keys mismatch: %s" % six.text_type(
+                set(obj1.keys()) ^ set(obj2.keys())))
         for key, value in obj1.items():
             self.assertEqual(value, obj2[key])
 
@@ -1669,7 +1671,9 @@ class DBAPIBackupTestCase(BaseTest):
             'service': 'service',
             'parent_id': "parent_id",
             'size': 1000,
-            'object_count': 100}
+            'object_count': 100,
+            'temp_volume_id': 'temp_volume_id',
+            'temp_snapshot_id': 'temp_snapshot_id', }
         if one:
             return base_values
 
index 57aae64829962828354fac18fdfe79e6bc03a005..90f6649427b4554d004e8c7016fbd79dc9cd929e 100644 (file)
@@ -826,6 +826,27 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         quotas = db_utils.get_table(engine, 'quotas')
         self.assertNotIn('allocated', quotas.c)
 
+    def _check_049(self, engine, data):
+        backups = db_utils.get_table(engine, 'backups')
+        self.assertIsInstance(backups.c.temp_volume_id.type,
+                              sqlalchemy.types.VARCHAR)
+        self.assertIsInstance(backups.c.temp_snapshot_id.type,
+                              sqlalchemy.types.VARCHAR)
+
+    def _post_downgrade_049(self, engine):
+        backups = db_utils.get_table(engine, 'backups')
+        self.assertNotIn('temp_volume_id', backups.c)
+        self.assertNotIn('temp_snapshot_id', backups.c)
+
+    def _check_050(self, engine, data):
+        volumes = db_utils.get_table(engine, 'volumes')
+        self.assertIsInstance(volumes.c.previous_status.type,
+                              sqlalchemy.types.VARCHAR)
+
+    def _post_downgrade_050(self, engine):
+        volumes = db_utils.get_table(engine, 'volumes')
+        self.assertNotIn('previous_status', volumes.c)
+
     def test_walk_versions(self):
         self.walk_versions(True, False)
 
index 4765edb636c69d064005d6cbaf883e3347e2b769..1b6de793bce8b9d3e14971b90943a0b503000c5f 100644 (file)
@@ -5591,7 +5591,11 @@ class GenericVolumeDriverTestCase(DriverTestCase):
 
     def test_backup_volume(self):
         vol = tests_utils.create_volume(self.context)
-        backup = {'volume_id': vol['id']}
+        self.context.user_id = 'fake'
+        self.context.project_id = 'fake'
+        backup = tests_utils.create_backup(self.context,
+                                           vol['id'])
+        backup_obj = objects.Backup.get_by_id(self.context, backup.id)
         properties = {}
         attach_info = {'device': {'path': '/dev/null'}}
         backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
@@ -5616,14 +5620,54 @@ class GenericVolumeDriverTestCase(DriverTestCase):
         os.getuid()
         utils.execute('chown', None, '/dev/null', run_as_root=True)
         f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null'))
-        backup_service.backup(backup, f)
+        backup_service.backup(backup_obj, f)
         utils.execute('chown', 0, '/dev/null', run_as_root=True)
         self.volume.driver._detach_volume(self.context, attach_info, vol,
                                           properties)
         self.mox.ReplayAll()
-        self.volume.driver.backup_volume(self.context, backup, backup_service)
+        self.volume.driver.backup_volume(self.context, backup_obj,
+                                         backup_service)
         self.mox.UnsetStubs()
 
+    @mock.patch.object(utils, 'temporary_chown')
+    @mock.patch.object(fileutils, 'file_open')
+    @mock.patch.object(os_brick.initiator.connector,
+                       'get_connector_properties')
+    @mock.patch.object(db, 'volume_get')
+    def test_backup_volume_inuse(self, mock_volume_get,
+                                 mock_get_connector_properties,
+                                 mock_file_open,
+                                 mock_temporary_chown):
+        vol = tests_utils.create_volume(self.context)
+        vol['status'] = 'in-use'
+        self.context.user_id = 'fake'
+        self.context.project_id = 'fake'
+        backup = tests_utils.create_backup(self.context,
+                                           vol['id'])
+        backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+        properties = {}
+        attach_info = {'device': {'path': '/dev/null'}}
+        backup_service = mock.Mock()
+
+        self.volume.driver._attach_volume = mock.MagicMock()
+        self.volume.driver._detach_volume = mock.MagicMock()
+        self.volume.driver.terminate_connection = mock.MagicMock()
+        self.volume.driver.create_snapshot = mock.MagicMock()
+        self.volume.driver.delete_snapshot = mock.MagicMock()
+        self.volume.driver.create_volume_from_snapshot = mock.MagicMock()
+
+        mock_volume_get.return_value = vol
+        mock_get_connector_properties.return_value = properties
+        f = mock_file_open.return_value = file('/dev/null')
+
+        backup_service.backup(backup_obj, f, None)
+        self.volume.driver._attach_volume.return_value = attach_info, vol
+
+        self.volume.driver.backup_volume(self.context, backup_obj,
+                                         backup_service)
+
+        mock_volume_get.assert_called_with(self.context, vol['id'])
+
     def test_restore_backup(self):
         vol = tests_utils.create_volume(self.context)
         backup = {'volume_id': vol['id'],
@@ -6008,7 +6052,12 @@ class LVMVolumeDriverTestCase(DriverTestCase):
                            mock_file_open,
                            mock_temporary_chown):
         vol = tests_utils.create_volume(self.context)
-        backup = {'volume_id': vol['id']}
+        self.context.user_id = 'fake'
+        self.context.project_id = 'fake'
+        backup = tests_utils.create_backup(self.context,
+                                           vol['id'])
+        backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+
         properties = {}
         attach_info = {'device': {'path': '/dev/null'}}
         backup_service = mock.Mock()
@@ -6021,10 +6070,10 @@ class LVMVolumeDriverTestCase(DriverTestCase):
         mock_get_connector_properties.return_value = properties
         f = mock_file_open.return_value = file('/dev/null')
 
-        backup_service.backup(backup, f, None)
+        backup_service.backup(backup_obj, f, None)
         self.volume.driver._attach_volume.return_value = attach_info
 
-        self.volume.driver.backup_volume(self.context, backup,
+        self.volume.driver.backup_volume(self.context, backup_obj,
                                          backup_service)
 
         mock_volume_get.assert_called_with(self.context, vol['id'])
@@ -6066,6 +6115,44 @@ class LVMVolumeDriverTestCase(DriverTestCase):
                               'provider_location': fake_provider},
                              update)
 
+    @mock.patch.object(utils, 'temporary_chown')
+    @mock.patch.object(fileutils, 'file_open')
+    @mock.patch.object(os_brick.initiator.connector,
+                       'get_connector_properties')
+    @mock.patch.object(db, 'volume_get')
+    def test_backup_volume_inuse(self, mock_volume_get,
+                                 mock_get_connector_properties,
+                                 mock_file_open,
+                                 mock_temporary_chown):
+        vol = tests_utils.create_volume(self.context)
+        vol['status'] = 'in-use'
+        self.context.user_id = 'fake'
+        self.context.project_id = 'fake'
+        backup = tests_utils.create_backup(self.context,
+                                           vol['id'])
+        backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+        properties = {}
+        attach_info = {'device': {'path': '/dev/null'}}
+        backup_service = mock.Mock()
+
+        self.volume.driver._detach_volume = mock.MagicMock()
+        self.volume.driver._attach_volume = mock.MagicMock()
+        self.volume.driver.terminate_connection = mock.MagicMock()
+        self.volume.driver.create_snapshot = mock.MagicMock()
+        self.volume.driver.delete_snapshot = mock.MagicMock()
+
+        mock_volume_get.return_value = vol
+        mock_get_connector_properties.return_value = properties
+        f = mock_file_open.return_value = file('/dev/null')
+
+        backup_service.backup(backup_obj, f, None)
+        self.volume.driver._attach_volume.return_value = attach_info
+
+        self.volume.driver.backup_volume(self.context, backup_obj,
+                                         backup_service)
+
+        mock_volume_get.assert_called_with(self.context, vol['id'])
+
 
 class ISCSITestCase(DriverTestCase):
     """Test Case for ISCSIDriver"""
index 41fce98b715d5facb7cdad48c1a8045281831384..9333fef02f25ef612abe7883efb0fa9b273acc24 100644 (file)
@@ -13,6 +13,8 @@
 #    under the License.
 #
 
+import socket
+
 from oslo_service import loopingcall
 from oslo_utils import timeutils
 
@@ -141,6 +143,34 @@ def create_cgsnapshot(ctxt,
     return db.cgsnapshot_create(ctxt, cgsnap)
 
 
+def create_backup(ctxt,
+                  volume_id,
+                  display_name='test_backup',
+                  display_description='This is a test backup',
+                  status='creating',
+                  parent_id=None,
+                  temp_volume_id=None,
+                  temp_snapshot_id=None):
+    backup = {}
+    backup['volume_id'] = volume_id
+    backup['user_id'] = ctxt.user_id
+    backup['project_id'] = ctxt.project_id
+    backup['host'] = socket.gethostname()
+    backup['availability_zone'] = '1'
+    backup['display_name'] = display_name
+    backup['display_description'] = display_description
+    backup['container'] = 'fake'
+    backup['status'] = status
+    backup['fail_reason'] = ''
+    backup['service'] = 'fake'
+    backup['parent_id'] = parent_id
+    backup['size'] = 5 * 1024 * 1024
+    backup['object_count'] = 22
+    backup['temp_volume_id'] = temp_volume_id
+    backup['temp_snapshot_id'] = temp_snapshot_id
+    return db.backup_create(ctxt, backup)
+
+
 class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall):
     def start(self, interval, **kwargs):
         kwargs['initial_delay'] = 0
index 8873a84d2524ac56b02e4b72556ed4fcba7f4d4d..28ad334004915a611e054b0378175a130c23c5bf 100644 (file)
@@ -29,6 +29,7 @@ import six
 from cinder import exception
 from cinder.i18n import _, _LE, _LW
 from cinder.image import image_utils
+from cinder import objects
 from cinder.openstack.common import fileutils
 from cinder import utils
 from cinder.volume import rpcapi as volume_rpcapi
@@ -745,10 +746,23 @@ class BaseVD(object):
 
     def backup_volume(self, context, backup, backup_service):
         """Create a new backup from an existing volume."""
-        volume = self.db.volume_get(context, backup['volume_id'])
+        volume = self.db.volume_get(context, backup.volume_id)
 
         LOG.debug('Creating a new backup for volume %s.', volume['name'])
 
+        # NOTE(xyang): Check volume status; if not 'available', create a
+        # a temp volume from the volume, and backup the temp volume, and
+        # then clean up the temp volume; if 'available', just backup the
+        # volume.
+        previous_status = volume.get('previous_status', None)
+        temp_vol_ref = None
+        if previous_status == "in_use":
+            temp_vol_ref = self._create_temp_cloned_volume(
+                context, volume)
+            backup.temp_volume_id = temp_vol_ref['id']
+            backup.save()
+            volume = temp_vol_ref
+
         use_multipath = self.configuration.use_multipath_for_image_xfer
         enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
         properties = utils.brick_get_connector_properties(use_multipath,
@@ -769,6 +783,10 @@ class BaseVD(object):
 
         finally:
             self._detach_volume(context, attach_info, volume, properties)
+            if temp_vol_ref:
+                self._delete_volume(context, temp_vol_ref)
+                backup.temp_volume_id = None
+                backup.save()
 
     def restore_backup(self, context, backup, volume, backup_service):
         """Restore an existing backup to a new or existing volume."""
@@ -799,6 +817,67 @@ class BaseVD(object):
         finally:
             self._detach_volume(context, attach_info, volume, properties)
 
+    def _create_temp_snapshot(self, context, volume):
+        kwargs = {
+            'volume_id': volume['id'],
+            'cgsnapshot_id': None,
+            'user_id': context.user_id,
+            'project_id': context.project_id,
+            'status': 'creating',
+            'progress': '0%',
+            'volume_size': volume['size'],
+            'display_name': 'backup-snap-%s' % volume['id'],
+            'display_description': None,
+            'volume_type_id': volume['volume_type_id'],
+            'encryption_key_id': volume['encryption_key_id'],
+            'metadata': {},
+        }
+        temp_snap_ref = objects.Snapshot(context=context, **kwargs)
+        temp_snap_ref.create()
+        try:
+            self.create_snapshot(temp_snap_ref)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                with temp_snap_ref.obj_as_admin():
+                    self.db.volume_glance_metadata_delete_by_snapshot(
+                        context, temp_snap_ref.id)
+                    temp_snap_ref.destroy()
+
+        temp_snap_ref.status = 'available'
+        temp_snap_ref.save()
+        return temp_snap_ref
+
+    def _create_temp_cloned_volume(self, context, volume):
+        temp_volume = {
+            'size': volume['size'],
+            'display_name': 'backup-vol-%s' % volume['id'],
+            'host': volume['host'],
+            'user_id': context.user_id,
+            'project_id': context.project_id,
+            'status': 'creating',
+        }
+        temp_vol_ref = self.db.volume_create(context, temp_volume)
+        try:
+            self.create_cloned_volume(temp_vol_ref, volume)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.volume_destroy(context, temp_vol_ref['id'])
+
+        self.db.volume_update(context, temp_vol_ref['id'],
+                              {'status': 'available'})
+        return temp_vol_ref
+
+    def _delete_snapshot(self, context, snapshot):
+        self.delete_snapshot(snapshot)
+        with snapshot.obj_as_admin():
+            self.db.volume_glance_metadata_delete_by_snapshot(
+                context, snapshot.id)
+            snapshot.destroy()
+
+    def _delete_volume(self, context, volume):
+        self.delete_volume(volume)
+        self.db.volume_destroy(context, volume['id'])
+
     def clear_download(self, context, volume):
         """Clean up after an interrupted image copy."""
         pass
index 3c57b632bd203b0de079ba84cf07c7fe67fe4f9f..56512205c324a563bfacb7c37b7d21abc0e393c5 100644 (file)
@@ -467,11 +467,26 @@ class LVMVolumeDriver(driver.VolumeDriver):
 
     def backup_volume(self, context, backup, backup_service):
         """Create a new backup from an existing volume."""
-        volume = self.db.volume_get(context, backup['volume_id'])
-        volume_path = self.local_path(volume)
-        with utils.temporary_chown(volume_path):
-            with fileutils.file_open(volume_path) as volume_file:
-                backup_service.backup(backup, volume_file)
+        volume = self.db.volume_get(context, backup.volume_id)
+        temp_snapshot = None
+        previous_status = volume['previous_status']
+        if previous_status == 'in-use':
+            temp_snapshot = self._create_temp_snapshot(context, volume)
+            backup.temp_snapshot_id = temp_snapshot.id
+            backup.save()
+            volume_path = self.local_path(temp_snapshot)
+        else:
+            volume_path = self.local_path(volume)
+
+        try:
+            with utils.temporary_chown(volume_path):
+                with fileutils.file_open(volume_path) as volume_file:
+                    backup_service.backup(backup, volume_file)
+        finally:
+            if temp_snapshot:
+                self._delete_snapshot(context, temp_snapshot)
+                backup.temp_snapshot_id = None
+                backup.save()
 
     def restore_backup(self, context, backup, volume, backup_service):
         """Restore an existing backup to a new or existing volume."""