'description': backup.get('display_description'),
'fail_reason': backup.get('fail_reason'),
'volume_id': backup.get('volume_id'),
- 'links': self._get_links(request, backup['id'])
+ 'links': self._get_links(request, backup['id']),
+ 'is_incremental': backup.is_incremental,
+ 'has_dependent_backups': backup.has_dependent_backups,
}
}
backup.size = volume['size']
backup.availability_zone = self.az
backup.save()
+ # Handle the num_dependent_backups of parent backup when child backup
+ # has created successfully.
+ if backup.parent_id:
+ parent_backup = objects.Backup.get_by_id(context,
+ backup.parent_id)
+ parent_backup.num_dependent_backups += 1
+ parent_backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
-
+ # If this backup is incremental backup, handle the
+ # num_dependent_backups of parent backup
+ if backup.parent_id:
+ parent_backup = objects.Backup.get_by_id(context,
+ backup.parent_id)
+ if parent_backup.has_dependent_backups:
+ parent_backup.num_dependent_backups -= 1
+ parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
--- /dev/null
+# Copyright (c) 2015 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, Table
+
+
+def upgrade(migrate_engine):
+ """Add num_dependent_backups column to backups."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ backups = Table('backups', meta, autoload=True)
+ num_dependent_backups = Column('num_dependent_backups', Integer, default=0)
+ backups.create_column(num_dependent_backups)
+ backups_list = list(backups.select().execute())
+ for backup in backups_list:
+ dep_bks_list = list(backups.select().where(backups.columns.parent_id ==
+ backup.id).execute())
+ if dep_bks_list:
+ backups.update().where(backups.columns.id == backup.id).values(
+ num_dependent_backups=len(dep_bks_list)).execute()
+
+
+def downgrade(migrate_engine):
+ """Remove num_dependent_backups column to backups."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ backups = Table('backups', meta, autoload=True)
+ num_dependent_backups = backups.columns.num_dependent_backups
+
+ backups.drop_column(num_dependent_backups)
object_count = Column(Integer)
temp_volume_id = Column(String(36))
temp_snapshot_id = Column(String(36))
+ num_dependent_backups = Column(Integer)
@validates('fail_reason')
def validate_fail_reason(self, key, fail_reason):
class Backup(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
- VERSION = '1.0'
+ # Version 1.1: Add new field num_dependent_backups and extra fields
+ # is_incremental and has_dependent_backups.
+ VERSION = '1.1'
fields = {
'id': fields.UUIDField(),
'temp_volume_id': fields.StringField(nullable=True),
'temp_snapshot_id': fields.StringField(nullable=True),
+ 'num_dependent_backups': fields.IntegerField(),
}
- obj_extra_fields = ['name']
+ obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']
@property
def name(self):
return CONF.backup_name_template % self.id
+ @property
+ def is_incremental(self):
+ return bool(self.parent_id)
+
+ @property
+ def has_dependent_backups(self):
+ return bool(self.num_dependent_backups)
+
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
super(Backup, self).obj_make_compatible(primitive, target_version)
snapshot=False,
incremental=False,
parent_id=None,
- size=0, object_count=0, host='testhost'):
+ size=0, object_count=0, host='testhost',
+ num_dependent_backups=0):
"""Create a backup object."""
backup = {}
backup['volume_id'] = volume_id
backup['snapshot'] = snapshot
backup['incremental'] = incremental
backup['parent_id'] = parent_id
+ backup['num_dependent_backups'] = num_dependent_backups
return db.backup_create(context.get_admin_context(), backup)['id']
@staticmethod
self.assertEqual(0, res_dict['backup']['size'])
self.assertEqual('creating', res_dict['backup']['status'])
self.assertEqual(volume_id, res_dict['backup']['volume_id'])
+ self.assertFalse(res_dict['backup']['is_incremental'])
+ self.assertFalse(res_dict['backup']['has_dependent_backups'])
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
- self.assertEqual(12, len(res_dict['backups'][0]))
+ self.assertEqual(14, len(res_dict['backups'][0]))
self.assertEqual('az1', res_dict['backups'][0]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][0]['container'])
self.assertEqual('creating', res_dict['backups'][0]['status'])
self.assertEqual('1', res_dict['backups'][0]['volume_id'])
- self.assertEqual(12, len(res_dict['backups'][1]))
+ self.assertEqual(14, len(res_dict['backups'][1]))
self.assertEqual('az1', res_dict['backups'][1]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][1]['container'])
self.assertEqual('creating', res_dict['backups'][1]['status'])
self.assertEqual('1', res_dict['backups'][1]['volume_id'])
- self.assertEqual(12, len(res_dict['backups'][2]))
+ self.assertEqual(14, len(res_dict['backups'][2]))
self.assertEqual('az1', res_dict['backups'][2]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][2]['container'])
backup = self.backup_api.get(self.context, backup_id)
self.assertRaises(exception.NotSupportedOperation,
self.backup_api.delete, self.context, backup, True)
+
+ def test_show_incremental_backup(self):
+ volume_id = utils.create_volume(self.context, size=5)['id']
+ parent_backup_id = self._create_backup(volume_id, status="available",
+ num_dependent_backups=1)
+ backup_id = self._create_backup(volume_id, status="available",
+ incremental=True,
+ parent_id=parent_backup_id,
+ num_dependent_backups=1)
+ child_backup_id = self._create_backup(volume_id, status="available",
+ incremental=True,
+ parent_id=backup_id)
+
+ req = webob.Request.blank('/v2/fake/backups/%s' %
+ backup_id)
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(200, res.status_int)
+ self.assertTrue(res_dict['backup']['is_incremental'])
+ self.assertTrue(res_dict['backup']['has_dependent_backups'])
+
+ req = webob.Request.blank('/v2/fake/backups/%s' %
+ parent_backup_id)
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(200, res.status_int)
+ self.assertFalse(res_dict['backup']['is_incremental'])
+ self.assertTrue(res_dict['backup']['has_dependent_backups'])
+
+ req = webob.Request.blank('/v2/fake/backups/%s' %
+ child_backup_id)
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(200, res.status_int)
+ self.assertTrue(res_dict['backup']['is_incremental'])
+ self.assertFalse(res_dict['backup']['has_dependent_backups'])
+
+ db.backup_destroy(context.get_admin_context(), child_backup_id)
+ db.backup_destroy(context.get_admin_context(), backup_id)
+ db.volume_destroy(context.get_admin_context(), volume_id)
'display_description': 'fake_description',
'service_metadata': 'fake_metadata',
'service': 'fake_service',
- 'object_count': 5
+ 'object_count': 5,
+ 'num_dependent_backups': 0,
}
for name, field in objects.Backup.fields.items():
self.assertEqual('3', backup.temp_snapshot_id)
def test_import_record(self):
- backup = objects.Backup(context=self.context, id=1)
+ backup = objects.Backup(context=self.context, id=1, parent_id=None,
+ num_dependent_backups=0)
export_string = backup.encode_record()
imported_backup = objects.Backup.decode_record(export_string)
self.assertDictEqual(dict(backup), imported_backup)
def test_import_record_additional_info(self):
- backup = objects.Backup(context=self.context, id=1)
+ backup = objects.Backup(context=self.context, id=1, parent_id=None,
+ num_dependent_backups=0)
extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
extra_info_copy = extra_info.copy()
export_string = backup.encode_record(extra_info=extra_info)
self.assertDictEqual(expected, imported_backup)
def test_import_record_additional_info_cant_overwrite(self):
- backup = objects.Backup(context=self.context, id=1)
+ backup = objects.Backup(context=self.context, id=1, parent_id=None,
+ num_dependent_backups=0)
export_string = backup.encode_record(id='fake_id')
imported_backup = objects.Backup.decode_record(export_string)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
+ def test_backup_has_dependent_backups(self):
+ """Test backup has dependent backups.
+
+ Test the query of has_dependent_backups in backup object is correct.
+ """
+ vol_size = 1
+ vol_id = self._create_volume_db_entry(size=vol_size)
+ backup = self._create_backup_db_entry(volume_id=vol_id)
+ self.assertFalse(backup.has_dependent_backups)
+
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
'size': 1000,
'object_count': 100,
'temp_volume_id': 'temp_volume_id',
- 'temp_snapshot_id': 'temp_snapshot_id', }
+ 'temp_snapshot_id': 'temp_snapshot_id',
+ 'num_dependent_backups': 0, }
if one:
return base_values
self.assertNotIn('object_current_version', services.c)
self.assertNotIn('object_available_version', services.c)
+ def _check_054(self, engine, data):
+ backups = db_utils.get_table(engine, 'backups')
+ self.assertIsInstance(backups.c.num_dependent_backups.type,
+ sqlalchemy.types.INTEGER)
+
+ def _post_downgrade_054(self, engine):
+ backups = db_utils.get_table(engine, 'backups')
+ self.assertNotIn('num_dependent_backups', backups.c)
+
def test_walk_versions(self):
self.walk_versions(True, False)
'cgsnapshot.test_suffix',
mock_usage.return_value)
+ def test_usage_from_backup(self):
+ raw_backup = {
+ 'project_id': '12b0330ec2584a',
+ 'user_id': '158cba1b8c2bb6008e',
+ 'availability_zone': 'nova',
+ 'id': 'fake_id',
+ 'host': 'fake_host',
+ 'display_name': 'test_backup',
+ 'created_at': '2014-12-11T10:10:00',
+ 'status': 'available',
+ 'volume_id': 'fake_volume_id',
+ 'size': 1,
+ 'service_metadata': None,
+ 'service': 'cinder.backup.drivers.swift',
+ 'fail_reason': None,
+ 'parent_id': 'fake_parent_id',
+ 'num_dependent_backups': 0,
+ }
+
+ # Make it easier to find out differences between raw and expected.
+ expected_backup = raw_backup.copy()
+ expected_backup['tenant_id'] = expected_backup.pop('project_id')
+ expected_backup['backup_id'] = expected_backup.pop('id')
+
+ usage_info = volume_utils._usage_from_backup(raw_backup)
+ self.assertEqual(expected_backup, usage_info)
+
class LVMVolumeDriverTestCase(test.TestCase):
def test_convert_blocksize_option(self):
def _usage_from_backup(backup_ref, **kw):
+ num_dependent_backups = backup_ref['num_dependent_backups']
usage_info = dict(tenant_id=backup_ref['project_id'],
user_id=backup_ref['user_id'],
availability_zone=backup_ref['availability_zone'],
size=backup_ref['size'],
service_metadata=backup_ref['service_metadata'],
service=backup_ref['service'],
- fail_reason=backup_ref['fail_reason'])
+ fail_reason=backup_ref['fail_reason'],
+ parent_id=backup_ref['parent_id'],
+ num_dependent_backups=num_dependent_backups,
+ )
usage_info.update(kw)
return usage_info