name = backup.get('name', None)
description = backup.get('description', None)
incremental = backup.get('incremental', False)
-
+ force = backup.get('force', False)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
- incremental)
+ incremental, None, force)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error:
return [srv['host'] for srv in services if not srv['disabled']]
def create(self, context, name, description, volume_id,
- container, incremental=False, availability_zone=None):
+ container, incremental=False, availability_zone=None,
+ force=False):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create')
volume = self.volume_api.get(context, volume_id)
- if volume['status'] != "available":
+ if volume['status'] not in ["available", "in-use"]:
msg = (_('Volume to be backed up must be available '
- 'but the current status is "%s".')
+ 'or in-use, but the current status is "%s".')
% volume['status'])
raise exception.InvalidVolume(reason=msg)
+ elif volume['status'] in ["in-use"] and not force:
+ msg = _('Backing up an in-use volume must use '
+ 'the force flag.')
+ raise exception.InvalidVolume(reason=msg)
+ previous_status = volume['status']
volume_host = volume_utils.extract_host(volume['host'], 'host')
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
'incremental backup.')
raise exception.InvalidBackup(reason=msg)
- self.db.volume_update(context, volume_id, {'status': 'backing-up'})
+ self.db.volume_update(context, volume_id,
+ {'status': 'backing-up',
+ 'previous_status': previous_status})
try:
kwargs = {
'user_id': context.user_id,
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.2'
target = messaging.Target(version=RPC_API_VERSION)
backend = self._get_volume_backend(host=volume_host)
attachments = volume['volume_attachment']
if attachments:
- if volume['status'] == 'backing-up':
- LOG.info(_LI('Resetting volume %s to available '
- '(was backing-up).'), volume['id'])
+ if (volume['status'] == 'backing-up' and
+ volume['previous_status'] == 'available'):
+ LOG.info(_LI('Resetting volume %(vol_id)s to previous '
+ 'status %(status)s (was backing-up).'),
+ {'vol_id': volume['id'],
+ 'status': volume['previous_status']})
mgr = self._get_manager(backend)
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
- if volume['status'] == 'restoring-backup':
+ elif (volume['status'] == 'backing-up' and
+ volume['previous_status'] == 'in-use'):
+ LOG.info(_LI('Resetting volume %(vol_id)s to previous '
+ 'status %(status)s (was backing-up).'),
+ {'vol_id': volume['id'],
+ 'status': volume['previous_status']})
+ self.db.volume_update(ctxt, volume['id'],
+ volume['previous_status'])
+ elif volume['status'] == 'restoring-backup':
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
mgr = self._get_manager(backend)
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
self.delete_backup(ctxt, backup)
+ self._cleanup_temp_volumes_snapshots(backups)
+
+ def _cleanup_temp_volumes_snapshots(self, backups):
+ # NOTE(xyang): If the service crashes or gets restarted during the
+ # backup operation, there could be temporary volumes or snapshots
+ # that are not deleted. Make sure any temporary volumes or snapshots
+ # create by the backup job are deleted when service is started.
+ ctxt = context.get_admin_context()
+ for backup in backups:
+ volume = self.db.volume_get(ctxt, backup.volume_id)
+ volume_host = volume_utils.extract_host(volume['host'], 'backend')
+ backend = self._get_volume_backend(host=volume_host)
+ mgr = self._get_manager(backend)
+ if backup.temp_volume_id and backup.status == 'error':
+ temp_volume = self.db.volume_get(ctxt,
+ backup.temp_volume_id)
+ # The temp volume should be deleted directly thru the
+ # the volume driver, not thru the volume manager.
+ mgr.driver.delete_volume(temp_volume)
+ self.db.volume_destroy(ctxt, temp_volume['id'])
+ if backup.temp_snapshot_id and backup.status == 'error':
+ temp_snapshot = objects.Snapshot.get_by_id(
+ ctxt, backup.temp_snapshot_id)
+ # The temp snapshot should be deleted directly thru the
+ # volume driver, not thru the volume manager.
+ mgr.driver.delete_snapshot(temp_snapshot)
+ with temp_snapshot.obj_as_admin():
+ self.db.volume_glance_metadata_delete_by_snapshot(
+ ctxt, temp_snapshot.id)
+ temp_snapshot.destroy()
+
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
volume = self.db.volume_get(context, volume_id)
+ previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
- {'status': 'available'})
+ {'status': previous_status,
+ 'previous_status': 'error_backing-up'})
self._update_backup_error(backup, context, six.text_type(err))
- self.db.volume_update(context, volume_id, {'status': 'available'})
+ # Restore the original status.
+ self.db.volume_update(context, volume_id,
+ {'status': previous_status,
+ 'previous_status': 'backing-up'})
backup.status = 'available'
backup.size = volume['size']
backup.availability_zone = self.az
API version history:
1.0 - Initial version.
- 1.1 - Changed methods to accept backup objects instaed of IDs.
+ 1.1 - Changed methods to accept backup objects instead of IDs.
"""
BASE_RPC_API_VERSION = '1.0'
--- /dev/null
+# Copyright (c) 2015 EMC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ backups = Table('backups', meta, autoload=True)
+ temp_volume_id = Column('temp_volume_id', String(length=36))
+ temp_snapshot_id = Column('temp_snapshot_id', String(length=36))
+
+ backups.create_column(temp_volume_id)
+ backups.update().values(temp_volume_id=None).execute()
+
+ backups.create_column(temp_snapshot_id)
+ backups.update().values(temp_snapshot_id=None).execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ backups = Table('backups', meta, autoload=True)
+ temp_volume_id = backups.columns.temp_volume_id
+ temp_snapshot_id = backups.columns.temp_snapshot_id
+
+ backups.drop_column(temp_volume_id)
+ backups.drop_column(temp_snapshot_id)
--- /dev/null
+# Copyright (c) 2015 EMC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volumes = Table('volumes', meta, autoload=True)
+ previous_status = Column('previous_status', String(length=255))
+
+ volumes.create_column(previous_status)
+ volumes.update().values(previous_status=None).execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volumes = Table('volumes', meta, autoload=True)
+ previous_status = volumes.columns.previous_status
+
+ volumes.drop_column(previous_status)
replication_extended_status = Column(String(255))
replication_driver_data = Column(String(255))
+ previous_status = Column(String(255))
+
consistencygroup = relationship(
ConsistencyGroup,
backref="volumes",
service = Column(String(255))
size = Column(Integer)
object_count = Column(Integer)
+ temp_volume_id = Column(String(36))
+ temp_snapshot_id = Column(String(36))
@validates('fail_reason')
def validate_fail_reason(self, key, fail_reason):
'service': fields.StringField(nullable=True),
'object_count': fields.IntegerField(),
+
+ 'temp_volume_id': fields.StringField(nullable=True),
+ 'temp_snapshot_id': fields.StringField(nullable=True),
}
obj_extra_fields = ['name']
'replication_status': fields.StringField(nullable=True),
'replication_extended_status': fields.StringField(nullable=True),
'replication_driver_data': fields.StringField(nullable=True),
+
+ 'previous_status': fields.StringField(nullable=True),
}
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
db.volume_destroy(context.get_admin_context(), volume_id)
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_create_backup_inuse_no_force(self,
+ _mock_service_get_all_by_topic):
+ _mock_service_get_all_by_topic.return_value = [
+ {'availability_zone': "fake_az", 'host': 'test_host',
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
+ volume_id = utils.create_volume(self.context, size=5,
+ status='in-use')['id']
+
+ body = {"backup": {"display_name": "nightly001",
+ "display_description":
+ "Nightly Backup 03-Sep-2012",
+ "volume_id": volume_id,
+ "container": "nightlybackups",
+ }
+ }
+ req = webob.Request.blank('/v2/fake/backups')
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(400, res.status_int)
+ self.assertEqual(400, res_dict['badRequest']['code'])
+ self.assertIsNotNone(res_dict['badRequest']['message'])
+
+ db.volume_destroy(context.get_admin_context(), volume_id)
+
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_create_backup_inuse_force(self, _mock_service_get_all_by_topic):
+ _mock_service_get_all_by_topic.return_value = [
+ {'availability_zone': "fake_az", 'host': 'test_host',
+ 'disabled': 0, 'updated_at': timeutils.utcnow()}]
+
+ volume_id = utils.create_volume(self.context, size=5,
+ status='in-use')['id']
+ backup_id = self._create_backup(volume_id, status="available")
+ body = {"backup": {"display_name": "nightly001",
+ "display_description":
+ "Nightly Backup 03-Sep-2012",
+ "volume_id": volume_id,
+ "container": "nightlybackups",
+ "force": True,
+ }
+ }
+ req = webob.Request.blank('/v2/fake/backups')
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(202, res.status_int)
+ self.assertIn('id', res_dict['backup'])
+ self.assertTrue(_mock_service_get_all_by_topic.called)
+
+ db.backup_destroy(context.get_admin_context(), backup_id)
+ db.volume_destroy(context.get_admin_context(), volume_id)
+
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_snapshot_json(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
- def test_create_backup_with_InvalidVolume2(self):
- # need to create the volume referenced below first
- volume_id = utils.create_volume(self.context, size=5,
- status='in-use')['id']
- body = {"backup": {"display_name": "nightly001",
- "display_description":
- "Nightly Backup 03-Sep-2012",
- "volume_id": volume_id,
- "container": "nightlybackups",
- }
- }
- req = webob.Request.blank('/v2/fake/backups')
- req.method = 'POST'
- req.headers['Content-Type'] = 'application/json'
- req.body = json.dumps(body)
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- self.assertEqual(res.status_int, 400)
- self.assertEqual(res_dict['badRequest']['code'], 400)
-
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_WithOUT_enabled_backup_service(
self,
def terminate_connection(self, volume, connector):
self.log_action('terminate_connection', volume)
+ def create_cloned_volume(self, volume, src_vol):
+ self.log_action('create_cloned_volume', volume)
+
_LOGS = []
@staticmethod
'availability_zone': 'fake_availability_zone',
'status': 'available',
'attach_status': 'detached',
+ 'previous_status': None
}
for name, field in objects.Volume.fields.items():
'display_description': 'fake_description',
'user_id': 'fake_user',
'project_id': 'fake_project',
+ 'temp_volume_id': None,
+ 'temp_snapshot_id': None,
}
admin_context = backup_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
+ def test_obj_field_temp_volume_snapshot_id(self):
+ backup = objects.Backup(context=self.context,
+ temp_volume_id='2',
+ temp_snapshot_id='3')
+ self.assertEqual('2', backup.temp_volume_id)
+ self.assertEqual('3', backup.temp_snapshot_id)
+
class TestBackupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
self.assertEqual('volume-2', volume.name)
self.assertEqual('2', volume.name_id)
+ def test_obj_field_previous_status(self):
+ volume = objects.Volume(context=self.context,
+ previous_status='backing-up')
+ self.assertEqual('backing-up', volume.previous_status)
+
class TestVolumeList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_get_all')
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
+from cinder.volume.drivers import lvm
CONF = cfg.CONF
size=1,
object_count=0,
project_id='fake',
- service=None):
+ service=None,
+ temp_volume_id=None,
+ temp_snapshot_id=None):
"""Create a backup entry in the DB.
Return the entry ID
kwargs['parent_id'] = None
kwargs['size'] = size
kwargs['object_count'] = object_count
+ kwargs['temp_volume_id'] = temp_volume_id
+ kwargs['temp_snapshot_id'] = temp_snapshot_id
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
+ previous_status='available',
size=1):
"""Create a volume entry in the DB.
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
+ vol['availability_zone'] = '1'
+ vol['previous_status'] = previous_status
return db.volume_create(self.ctxt, vol)['id']
+ def _create_snapshot_db_entry(self, display_name='test_snapshot',
+ display_description='test snapshot',
+ status='available',
+ size=1,
+ volume_id='1',
+ provider_location=None):
+ """Create a snapshot entry in the DB.
+
+ Return the entry ID.
+ """
+ kwargs = {}
+ kwargs['size'] = size
+ kwargs['host'] = 'testhost'
+ kwargs['user_id'] = 'fake'
+ kwargs['project_id'] = 'fake'
+ kwargs['status'] = status
+ kwargs['display_name'] = display_name
+ kwargs['display_description'] = display_description
+ kwargs['volume_id'] = volume_id
+ kwargs['cgsnapshot_id'] = None
+ kwargs['volume_size'] = size
+ kwargs['provider_location'] = provider_location
+ snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
+ snapshot_obj.create()
+ return snapshot_obj
+
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': 'attached', }
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
- def test_init_host(self):
+ @mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
+ @mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
+ def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
"""Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
- backup1 = self._create_backup_db_entry(status='creating')
- backup2 = self._create_backup_db_entry(status='restoring')
- backup3 = self._create_backup_db_entry(status='deleting')
+ vol3_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
+ vol4_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
+ temp_vol_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
+ vol5_id = self._create_volume_db_entry()
+ db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
+ temp_snap = self._create_snapshot_db_entry()
+ temp_snap.status = 'available'
+ temp_snap.save()
+ backup1 = self._create_backup_db_entry(status='creating',
+ volume_id=vol1_id)
+ backup2 = self._create_backup_db_entry(status='restoring',
+ volume_id=vol2_id)
+ backup3 = self._create_backup_db_entry(status='deleting',
+ volume_id=vol3_id)
+ self._create_backup_db_entry(status='creating',
+ volume_id=vol4_id,
+ temp_volume_id=temp_vol_id)
+ self._create_backup_db_entry(status='creating',
+ volume_id=vol5_id,
+ temp_snapshot_id=temp_snap.id)
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.ctxt,
backup3.id)
+ self.assertTrue(mock_delete_volume.called)
+ self.assertTrue(mock_delete_snapshot.called)
+
def test_create_backup_with_bad_volume_status(self):
- """Test error handling when creating a backup from a volume
- with a bad status
- """
- vol_id = self._create_volume_db_entry(status='available', size=1)
+ """Test creating a backup from a volume with a bad status."""
+ vol_id = self._create_volume_db_entry(status='restoring', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
backup)
def test_create_backup_with_bad_backup_status(self):
- """Test error handling when creating a backup with a backup
- with a bad status
- """
+ """Test creating a backup with a backup with a bad status."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.ctxt,
backup)
vol = db.volume_get(self.ctxt, vol_id)
- self.assertEqual(vol['status'], 'available')
+ self.assertEqual('available', vol['status'])
+ self.assertEqual('error_backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
- self.assertEqual(backup['status'], 'error')
+ self.assertEqual('error', backup['status'])
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
self.backup_mgr.create_backup(self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id)
- self.assertEqual(vol['status'], 'available')
+ self.assertEqual('available', vol['status'])
+ self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
- self.assertEqual(backup['status'], 'available')
+ self.assertEqual('available', backup['status'])
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
import enum
from oslo_config import cfg
from oslo_utils import uuidutils
+import six
from cinder.api import common
from cinder import context
self.assertEqual(
len(obj1), len(obj2),
- "Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
+ "Keys mismatch: %s" % six.text_type(
+ set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
'service': 'service',
'parent_id': "parent_id",
'size': 1000,
- 'object_count': 100}
+ 'object_count': 100,
+ 'temp_volume_id': 'temp_volume_id',
+ 'temp_snapshot_id': 'temp_snapshot_id', }
if one:
return base_values
quotas = db_utils.get_table(engine, 'quotas')
self.assertNotIn('allocated', quotas.c)
+ def _check_049(self, engine, data):
+ backups = db_utils.get_table(engine, 'backups')
+ self.assertIsInstance(backups.c.temp_volume_id.type,
+ sqlalchemy.types.VARCHAR)
+ self.assertIsInstance(backups.c.temp_snapshot_id.type,
+ sqlalchemy.types.VARCHAR)
+
+ def _post_downgrade_049(self, engine):
+ backups = db_utils.get_table(engine, 'backups')
+ self.assertNotIn('temp_volume_id', backups.c)
+ self.assertNotIn('temp_snapshot_id', backups.c)
+
+ def _check_050(self, engine, data):
+ volumes = db_utils.get_table(engine, 'volumes')
+ self.assertIsInstance(volumes.c.previous_status.type,
+ sqlalchemy.types.VARCHAR)
+
+ def _post_downgrade_050(self, engine):
+ volumes = db_utils.get_table(engine, 'volumes')
+ self.assertNotIn('previous_status', volumes.c)
+
def test_walk_versions(self):
self.walk_versions(True, False)
def test_backup_volume(self):
vol = tests_utils.create_volume(self.context)
- backup = {'volume_id': vol['id']}
+ self.context.user_id = 'fake'
+ self.context.project_id = 'fake'
+ backup = tests_utils.create_backup(self.context,
+ vol['id'])
+ backup_obj = objects.Backup.get_by_id(self.context, backup.id)
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
os.getuid()
utils.execute('chown', None, '/dev/null', run_as_root=True)
f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null'))
- backup_service.backup(backup, f)
+ backup_service.backup(backup_obj, f)
utils.execute('chown', 0, '/dev/null', run_as_root=True)
self.volume.driver._detach_volume(self.context, attach_info, vol,
properties)
self.mox.ReplayAll()
- self.volume.driver.backup_volume(self.context, backup, backup_service)
+ self.volume.driver.backup_volume(self.context, backup_obj,
+ backup_service)
self.mox.UnsetStubs()
+ @mock.patch.object(utils, 'temporary_chown')
+ @mock.patch.object(fileutils, 'file_open')
+ @mock.patch.object(os_brick.initiator.connector,
+ 'get_connector_properties')
+ @mock.patch.object(db, 'volume_get')
+ def test_backup_volume_inuse(self, mock_volume_get,
+ mock_get_connector_properties,
+ mock_file_open,
+ mock_temporary_chown):
+ vol = tests_utils.create_volume(self.context)
+ vol['status'] = 'in-use'
+ self.context.user_id = 'fake'
+ self.context.project_id = 'fake'
+ backup = tests_utils.create_backup(self.context,
+ vol['id'])
+ backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+ properties = {}
+ attach_info = {'device': {'path': '/dev/null'}}
+ backup_service = mock.Mock()
+
+ self.volume.driver._attach_volume = mock.MagicMock()
+ self.volume.driver._detach_volume = mock.MagicMock()
+ self.volume.driver.terminate_connection = mock.MagicMock()
+ self.volume.driver.create_snapshot = mock.MagicMock()
+ self.volume.driver.delete_snapshot = mock.MagicMock()
+ self.volume.driver.create_volume_from_snapshot = mock.MagicMock()
+
+ mock_volume_get.return_value = vol
+ mock_get_connector_properties.return_value = properties
+ f = mock_file_open.return_value = file('/dev/null')
+
+ backup_service.backup(backup_obj, f, None)
+ self.volume.driver._attach_volume.return_value = attach_info, vol
+
+ self.volume.driver.backup_volume(self.context, backup_obj,
+ backup_service)
+
+ mock_volume_get.assert_called_with(self.context, vol['id'])
+
def test_restore_backup(self):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'],
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
- backup = {'volume_id': vol['id']}
+ self.context.user_id = 'fake'
+ self.context.project_id = 'fake'
+ backup = tests_utils.create_backup(self.context,
+ vol['id'])
+ backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = file('/dev/null')
- backup_service.backup(backup, f, None)
+ backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info
- self.volume.driver.backup_volume(self.context, backup,
+ self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
'provider_location': fake_provider},
update)
+ @mock.patch.object(utils, 'temporary_chown')
+ @mock.patch.object(fileutils, 'file_open')
+ @mock.patch.object(os_brick.initiator.connector,
+ 'get_connector_properties')
+ @mock.patch.object(db, 'volume_get')
+ def test_backup_volume_inuse(self, mock_volume_get,
+ mock_get_connector_properties,
+ mock_file_open,
+ mock_temporary_chown):
+ vol = tests_utils.create_volume(self.context)
+ vol['status'] = 'in-use'
+ self.context.user_id = 'fake'
+ self.context.project_id = 'fake'
+ backup = tests_utils.create_backup(self.context,
+ vol['id'])
+ backup_obj = objects.Backup.get_by_id(self.context, backup.id)
+ properties = {}
+ attach_info = {'device': {'path': '/dev/null'}}
+ backup_service = mock.Mock()
+
+ self.volume.driver._detach_volume = mock.MagicMock()
+ self.volume.driver._attach_volume = mock.MagicMock()
+ self.volume.driver.terminate_connection = mock.MagicMock()
+ self.volume.driver.create_snapshot = mock.MagicMock()
+ self.volume.driver.delete_snapshot = mock.MagicMock()
+
+ mock_volume_get.return_value = vol
+ mock_get_connector_properties.return_value = properties
+ f = mock_file_open.return_value = file('/dev/null')
+
+ backup_service.backup(backup_obj, f, None)
+ self.volume.driver._attach_volume.return_value = attach_info
+
+ self.volume.driver.backup_volume(self.context, backup_obj,
+ backup_service)
+
+ mock_volume_get.assert_called_with(self.context, vol['id'])
+
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
# under the License.
#
+import socket
+
from oslo_service import loopingcall
from oslo_utils import timeutils
return db.cgsnapshot_create(ctxt, cgsnap)
+def create_backup(ctxt,
+ volume_id,
+ display_name='test_backup',
+ display_description='This is a test backup',
+ status='creating',
+ parent_id=None,
+ temp_volume_id=None,
+ temp_snapshot_id=None):
+ backup = {}
+ backup['volume_id'] = volume_id
+ backup['user_id'] = ctxt.user_id
+ backup['project_id'] = ctxt.project_id
+ backup['host'] = socket.gethostname()
+ backup['availability_zone'] = '1'
+ backup['display_name'] = display_name
+ backup['display_description'] = display_description
+ backup['container'] = 'fake'
+ backup['status'] = status
+ backup['fail_reason'] = ''
+ backup['service'] = 'fake'
+ backup['parent_id'] = parent_id
+ backup['size'] = 5 * 1024 * 1024
+ backup['object_count'] = 22
+ backup['temp_volume_id'] = temp_volume_id
+ backup['temp_snapshot_id'] = temp_snapshot_id
+ return db.backup_create(ctxt, backup)
+
+
class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall):
def start(self, interval, **kwargs):
kwargs['initial_delay'] = 0
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
+from cinder import objects
from cinder.openstack.common import fileutils
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
- volume = self.db.volume_get(context, backup['volume_id'])
+ volume = self.db.volume_get(context, backup.volume_id)
LOG.debug('Creating a new backup for volume %s.', volume['name'])
+ # NOTE(xyang): Check volume status; if not 'available', create a
+ # a temp volume from the volume, and backup the temp volume, and
+ # then clean up the temp volume; if 'available', just backup the
+ # volume.
+ previous_status = volume.get('previous_status', None)
+ temp_vol_ref = None
+ if previous_status == "in_use":
+ temp_vol_ref = self._create_temp_cloned_volume(
+ context, volume)
+ backup.temp_volume_id = temp_vol_ref['id']
+ backup.save()
+ volume = temp_vol_ref
+
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
finally:
self._detach_volume(context, attach_info, volume, properties)
+ if temp_vol_ref:
+ self._delete_volume(context, temp_vol_ref)
+ backup.temp_volume_id = None
+ backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
finally:
self._detach_volume(context, attach_info, volume, properties)
+ def _create_temp_snapshot(self, context, volume):
+ kwargs = {
+ 'volume_id': volume['id'],
+ 'cgsnapshot_id': None,
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'status': 'creating',
+ 'progress': '0%',
+ 'volume_size': volume['size'],
+ 'display_name': 'backup-snap-%s' % volume['id'],
+ 'display_description': None,
+ 'volume_type_id': volume['volume_type_id'],
+ 'encryption_key_id': volume['encryption_key_id'],
+ 'metadata': {},
+ }
+ temp_snap_ref = objects.Snapshot(context=context, **kwargs)
+ temp_snap_ref.create()
+ try:
+ self.create_snapshot(temp_snap_ref)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ with temp_snap_ref.obj_as_admin():
+ self.db.volume_glance_metadata_delete_by_snapshot(
+ context, temp_snap_ref.id)
+ temp_snap_ref.destroy()
+
+ temp_snap_ref.status = 'available'
+ temp_snap_ref.save()
+ return temp_snap_ref
+
+ def _create_temp_cloned_volume(self, context, volume):
+ temp_volume = {
+ 'size': volume['size'],
+ 'display_name': 'backup-vol-%s' % volume['id'],
+ 'host': volume['host'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'status': 'creating',
+ }
+ temp_vol_ref = self.db.volume_create(context, temp_volume)
+ try:
+ self.create_cloned_volume(temp_vol_ref, volume)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.db.volume_destroy(context, temp_vol_ref['id'])
+
+ self.db.volume_update(context, temp_vol_ref['id'],
+ {'status': 'available'})
+ return temp_vol_ref
+
+ def _delete_snapshot(self, context, snapshot):
+ self.delete_snapshot(snapshot)
+ with snapshot.obj_as_admin():
+ self.db.volume_glance_metadata_delete_by_snapshot(
+ context, snapshot.id)
+ snapshot.destroy()
+
+ def _delete_volume(self, context, volume):
+ self.delete_volume(volume)
+ self.db.volume_destroy(context, volume['id'])
+
def clear_download(self, context, volume):
"""Clean up after an interrupted image copy."""
pass
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
- volume = self.db.volume_get(context, backup['volume_id'])
- volume_path = self.local_path(volume)
- with utils.temporary_chown(volume_path):
- with fileutils.file_open(volume_path) as volume_file:
- backup_service.backup(backup, volume_file)
+ volume = self.db.volume_get(context, backup.volume_id)
+ temp_snapshot = None
+ previous_status = volume['previous_status']
+ if previous_status == 'in-use':
+ temp_snapshot = self._create_temp_snapshot(context, volume)
+ backup.temp_snapshot_id = temp_snapshot.id
+ backup.save()
+ volume_path = self.local_path(temp_snapshot)
+ else:
+ volume_path = self.local_path(volume)
+
+ try:
+ with utils.temporary_chown(volume_path):
+ with fileutils.file_open(volume_path) as volume_file:
+ backup_service.backup(backup, volume_file)
+ finally:
+ if temp_snapshot:
+ self._delete_snapshot(context, temp_snapshot)
+ backup.temp_snapshot_id = None
+ backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""