From: Michal Dulko Date: Tue, 21 Jul 2015 08:32:01 +0000 (+0200) Subject: ConsistencyGroup Object X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=08ee6c4893359ccce9eaef45034d292c32b93529;p=openstack-build%2Fcinder-build.git ConsistencyGroup Object This patch adds VersionedObjects abstraction layer to consistency groups. Change-Id: I3febb95bb2b3dd8f10da4f01ffa705e39cb20520 Partial-Implements: blueprint cinder-objects Co-Authored-by: Daniel Tadrzak Co-Authored-by: Michal Dulko --- diff --git a/cinder/api/contrib/consistencygroups.py b/cinder/api/contrib/consistencygroups.py index 284e6234c..54199b0f0 100644 --- a/cinder/api/contrib/consistencygroups.py +++ b/cinder/api/contrib/consistencygroups.py @@ -240,9 +240,7 @@ class ConsistencyGroupsController(wsgi.Controller): except exception.ConsistencyGroupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) - retval = self._view_builder.summary( - req, - dict(new_consistencygroup)) + retval = self._view_builder.summary(req, new_consistencygroup) return retval @wsgi.response(202) @@ -299,9 +297,7 @@ class ConsistencyGroupsController(wsgi.Controller): except exception.CinderException as error: raise exc.HTTPBadRequest(explanation=error.msg) - retval = self._view_builder.summary( - req, - dict(new_consistencygroup)) + retval = self._view_builder.summary(req, new_consistencygroup) return retval @wsgi.serializers(xml=ConsistencyGroupTemplate) diff --git a/cinder/api/views/consistencygroups.py b/cinder/api/views/consistencygroups.py index 50a2b1297..4337c1c8e 100644 --- a/cinder/api/views/consistencygroups.py +++ b/cinder/api/views/consistencygroups.py @@ -42,8 +42,8 @@ class ViewBuilder(common.ViewBuilder): """Generic, non-detailed view of a consistency group.""" return { 'consistencygroup': { - 'id': consistencygroup['id'], - 'name': consistencygroup['name'] + 'id': consistencygroup.id, + 'name': consistencygroup.name } } @@ -51,12 +51,12 @@ class ViewBuilder(common.ViewBuilder): """Detailed view of a single consistency group.""" return { 'consistencygroup': { - 'id': consistencygroup.get('id'), - 'status': consistencygroup.get('status'), - 'availability_zone': consistencygroup.get('availability_zone'), - 'created_at': consistencygroup.get('created_at'), - 'name': consistencygroup.get('name'), - 'description': consistencygroup.get('description') + 'id': consistencygroup.id, + 'status': consistencygroup.status, + 'availability_zone': consistencygroup.availability_zone, + 'created_at': consistencygroup.created_at, + 'name': consistencygroup.name, + 'description': consistencygroup.description } } diff --git a/cinder/cmd/scheduler.py b/cinder/cmd/scheduler.py index 861e3578f..bea47e2b6 100644 --- a/cinder/cmd/scheduler.py +++ b/cinder/cmd/scheduler.py @@ -31,6 +31,7 @@ i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa +from cinder import objects from cinder import service from cinder import utils from cinder import version @@ -40,6 +41,7 @@ CONF = cfg.CONF def main(): + objects.register_all() CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py index f09eef7b5..bed1cd9a8 100644 --- a/cinder/consistencygroup/api.py +++ b/cinder/consistencygroup/api.py @@ -64,7 +64,8 @@ def check_policy(context, action, target_obj=None): 'project_id': context.project_id, 'user_id': context.user_id, } - target.update(target_obj or {}) + target_obj = target_obj.fields if target_obj else {} + target.update(target_obj) _action = 'consistencygroup:%s' % action cinder.policy.enforce(context, _action, target) @@ -108,7 +109,6 @@ class API(base.Base): def create(self, context, name, description, cg_volume_types, availability_zone=None): - check_policy(context, 'create') volume_type_list = None @@ -126,18 +126,17 @@ class API(base.Base): req_volume_type_ids = None availability_zone = self._extract_availability_zone(availability_zone) - - options = {'user_id': context.user_id, - 'project_id': context.project_id, - 'availability_zone': availability_zone, - 'status': "creating", - 'name': name, - 'description': description, - 'volume_type_id': req_volume_type_ids} - + kwargs = {'user_id': context.user_id, + 'project_id': context.project_id, + 'availability_zone': availability_zone, + 'status': "creating", + 'name': name, + 'description': description, + 'volume_type_id': req_volume_type_ids} group = None try: - group = self.db.consistencygroup_create(context, options) + group = objects.ConsistencyGroup(context=context, **kwargs) + group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" @@ -147,25 +146,23 @@ class API(base.Base): filter_properties_list = [] for req_volume_type in req_volume_types: request_spec = {'volume_type': req_volume_type.copy(), - 'consistencygroup_id': group['id']} + 'consistencygroup_id': group.id} filter_properties = {} request_spec_list.append(request_spec) filter_properties_list.append(filter_properties) # Update quota for consistencygroups - self.update_quota(context, group['id'], 1) + self.update_quota(context, group, 1) - self._cast_create_consistencygroup(context, group['id'], + self._cast_create_consistencygroup(context, group, request_spec_list, filter_properties_list) return group def create_from_src(self, context, name, description=None, - cgsnapshot_id=None, - source_cgid=None): + cgsnapshot_id=None, source_cgid=None): check_policy(context, 'create') - cgsnapshot = None orig_cg = None if cgsnapshot_id: @@ -177,15 +174,14 @@ class API(base.Base): "creating consistency group %(cg)s from " "source."), {'cg': name, 'cgsnap': cgsnapshot_id}) - orig_cg = self.db.consistencygroup_get( - context, - cgsnapshot['consistencygroup_id']) + orig_cg = objects.ConsistencyGroup.get_by_id( + context, cgsnapshot['consistencygroup_id']) source_cg = None if source_cgid: try: - source_cg = self.db.consistencygroup_get( - context, source_cgid) + source_cg = objects.ConsistencyGroup.get_by_id(context, + source_cgid) except exception.ConsistencyGroupNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("Source CG %(source_cg) not found when " @@ -193,27 +189,30 @@ class API(base.Base): "source."), {'cg': name, 'source_cg': source_cgid}) - options = {'user_id': context.user_id, - 'project_id': context.project_id, - 'status': "creating", - 'name': name, - 'description': description, - 'cgsnapshot_id': cgsnapshot_id, - 'source_cgid': source_cgid} + kwargs = { + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'name': name, + 'description': description, + 'cgsnapshot_id': cgsnapshot_id, + 'source_cgid': source_cgid, + } if orig_cg: - options['volume_type_id'] = orig_cg.get('volume_type_id') - options['availability_zone'] = orig_cg.get('availability_zone') - options['host'] = orig_cg.get('host') + kwargs['volume_type_id'] = orig_cg.volume_type_id + kwargs['availability_zone'] = orig_cg.availability_zone + kwargs['host'] = orig_cg.host if source_cg: - options['volume_type_id'] = source_cg.get('volume_type_id') - options['availability_zone'] = source_cg.get('availability_zone') - options['host'] = source_cg.get('host') + kwargs['volume_type_id'] = source_cg.get('volume_type_id') + kwargs['availability_zone'] = source_cg.get('availability_zone') + kwargs['host'] = source_cg.get('host') group = None try: - group = self.db.consistencygroup_create(context, options) + group = objects.ConsistencyGroup(context=context, **kwargs) + group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" @@ -221,10 +220,10 @@ class API(base.Base): {'cg': name, 'cgsnap': cgsnapshot_id}) # Update quota for consistencygroups - self.update_quota(context, group['id'], 1) + self.update_quota(context, group, 1) - if not group['host']: - msg = _("No host to create consistency group %s.") % group['id'] + if not group.host: + msg = _("No host to create consistency group %s.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) @@ -247,7 +246,7 @@ class API(base.Base): for snapshot in snapshots: kwargs = {} - kwargs['availability_zone'] = group.get('availability_zone') + kwargs['availability_zone'] = group.availability_zone kwargs['cgsnapshot'] = cgsnapshot kwargs['consistencygroup'] = group kwargs['snapshot'] = snapshot @@ -273,34 +272,33 @@ class API(base.Base): "entry from snapshot in the process of " "creating consistency group %(group)s " "from cgsnapshot %(cgsnap)s."), - {'group': group['id'], + {'group': group.id, 'cgsnap': cgsnapshot['id']}) except Exception: with excutils.save_and_reraise_exception(): try: - self.db.consistencygroup_destroy(context.elevated(), - group['id']) + group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from cgsnapshot " "%(cgsnap)s."), - {'group': group['id'], + {'group': group.id, 'cgsnap': cgsnapshot['id']}) volumes = self.db.volume_get_all_by_group(context, - group['id']) + group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.get('host')}) self.volume_rpcapi.create_consistencygroup_from_src( - context, group, group['host'], cgsnapshot) + context, group, cgsnapshot) def _create_cg_from_source_cg(self, context, group, source_cg): try: source_vols = self.db.volume_get_all_by_group(context, - source_cg['id']) + source_cg.id) if not source_vols: msg = _("Source CG is empty. No consistency group " @@ -309,7 +307,7 @@ class API(base.Base): for source_vol in source_vols: kwargs = {} - kwargs['availability_zone'] = group.get('availability_zone') + kwargs['availability_zone'] = group.availability_zone kwargs['source_cg'] = source_cg kwargs['consistencygroup'] = group kwargs['source_volume'] = source_vol @@ -335,31 +333,30 @@ class API(base.Base): "volume in the process of creating " "consistency group %(group)s from " "source CG %(source_cg)s."), - {'group': group['id'], - 'source_cg': source_cg['id']}) + {'group': group.id, + 'source_cg': source_cg.id}) except Exception: with excutils.save_and_reraise_exception(): try: - self.db.consistencygroup_destroy(context.elevated(), - group['id']) + group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from source CG " "%(source_cg)s."), - {'group': group['id'], - 'source_cg': source_cg['id']}) + {'group': group.id, + 'source_cg': source_cg.id}) volumes = self.db.volume_get_all_by_group(context, - group['id']) + group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], - {'host': group.get('host')}) + {'host': group.host}) - self.volume_rpcapi.create_consistencygroup_from_src( - context, group, group['host'], None, source_cg) + self.volume_rpcapi.create_consistencygroup_from_src(context, group, + None, source_cg) - def _cast_create_consistencygroup(self, context, group_id, + def _cast_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): @@ -399,22 +396,22 @@ class API(base.Base): except Exception: with excutils.save_and_reraise_exception(): try: - self.db.consistencygroup_destroy(context, group_id) + group.destroy() finally: LOG.error(_LE("Error occurred when building " "request spec list for consistency group " - "%s."), group_id) + "%s."), group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. self.scheduler_rpcapi.create_consistencygroup( context, CONF.volume_topic, - group_id, + group, request_spec_list=request_spec_list, filter_properties_list=filter_properties_list) - def update_quota(self, context, group_id, num, project_id=None): + def update_quota(self, context, group, num, project_id=None): reserve_opts = {'consistencygroups': num} try: reservations = CGQUOTAS.reserve(context, @@ -425,50 +422,49 @@ class API(base.Base): except Exception: with excutils.save_and_reraise_exception(): try: - self.db.consistencygroup_destroy(context.elevated(), - group_id) + group.destroy() finally: LOG.error(_LE("Failed to update quota for " - "consistency group %s."), group_id) + "consistency group %s."), group.id) @wrap_check_policy def delete(self, context, group, force=False): - if not group['host']: - self.update_quota(context, group['id'], -1, group['project_id']) + if not group.host: + self.update_quota(context, group, -1, group.project_id) LOG.debug("No host for consistency group %s. Deleting from " - "the database.", group['id']) - self.db.consistencygroup_destroy(context.elevated(), group['id']) + "the database.", group.id) + group.destroy() return - if not force and group['status'] not in ["available", "error"]: + if not force and group.status not in ["available", "error"]: msg = _("Consistency group status must be available or error, " - "but current status is: %s") % group['status'] + "but current status is: %s") % group.status raise exception.InvalidConsistencyGroup(reason=msg) cgsnaps = self.db.cgsnapshot_get_all_by_group( context.elevated(), - group['id']) + group.id) if cgsnaps: msg = _("Consistency group %s still has dependent " - "cgsnapshots.") % group['id'] + "cgsnapshots.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) volumes = self.db.volume_get_all_by_group(context.elevated(), - group['id']) + group.id) if volumes and not force: msg = _("Consistency group %s still contains volumes. " - "The force flag is required to delete it.") % group['id'] + "The force flag is required to delete it.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) for volume in volumes: if volume['attach_status'] == "attached": msg = _("Volume in consistency group %s is attached. " - "Need to detach first.") % group['id'] + "Need to detach first.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) @@ -480,19 +476,18 @@ class API(base.Base): LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) - now = timeutils.utcnow() - self.db.consistencygroup_update(context, group['id'], - {'status': 'deleting', - 'terminated_at': now}) + group.status = 'deleting' + group.terminated_at = timeutils.utcnow() + group.save() self.volume_rpcapi.delete_consistencygroup(context, group) def update(self, context, group, name, description, add_volumes, remove_volumes): """Update consistency group.""" - if group['status'] not in ["available"]: + if group.status != 'available': msg = _("Consistency group status must be available, " - "but current status is: %s.") % group['status'] + "but current status is: %s.") % group.status raise exception.InvalidConsistencyGroup(reason=msg) add_volumes_list = [] @@ -513,14 +508,14 @@ class API(base.Base): "list.") % invalid_uuids raise exception.InvalidVolume(reason=msg) - volumes = self.db.volume_get_all_by_group(context, group['id']) + volumes = self.db.volume_get_all_by_group(context, group.id) # Validate name. - if not name or name == group['name']: + if not name or name == group.name: name = None # Validate description. - if not description or description == group['description']: + if not description or description == group.description: description = None # Validate volumes in add_volumes and remove_volumes. @@ -538,11 +533,10 @@ class API(base.Base): msg = (_("Cannot update consistency group %(group_id)s " "because no valid name, description, add_volumes, " "or remove_volumes were provided.") % - {'group_id': group['id']}) + {'group_id': group.id}) raise exception.InvalidConsistencyGroup(reason=msg) - now = timeutils.utcnow() - fields = {'updated_at': now} + fields = {'updated_at': timeutils.utcnow()} # Update name and description in db now. No need to # to send them over through an RPC call. @@ -556,7 +550,8 @@ class API(base.Base): else: fields['status'] = 'updating' - self.db.consistencygroup_update(context, group['id'], fields) + group.update(fields) + group.save() # Do an RPC call only if the update request includes # adding/removing volumes. add_volumes_new and remove_volumes_new @@ -579,7 +574,7 @@ class API(base.Base): "is in an invalid state: %(status)s. Valid " "states are: %(valid)s.") % {'volume_id': volume['id'], - 'group_id': group['id'], + 'group_id': group.id, 'status': volume['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) @@ -594,7 +589,7 @@ class API(base.Base): "consistency group %(group_id)s because it " "is not in the group.") % {'volume_id': rem_vol, - 'group_id': group['id']}) + 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return remove_volumes_new @@ -614,7 +609,7 @@ class API(base.Base): "group %(group_id)s because volume cannot be " "found.") % {'volume_id': add_vol, - 'group_id': group['id']}) + 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) if add_vol_ref: add_vol_type_id = add_vol_ref.get('volume_type_id', None) @@ -623,15 +618,15 @@ class API(base.Base): "group %(group_id)s because it has no volume " "type.") % {'volume_id': add_vol_ref['id'], - 'group_id': group['id']}) + 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) - if add_vol_type_id not in group['volume_type_id']: + if add_vol_type_id not in group.volume_type_id: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume type " "%(volume_type)s is not supported by the " "group.") % {'volume_id': add_vol_ref['id'], - 'group_id': group['id'], + 'group_id': group.id, 'volume_type': add_vol_type_id}) raise exception.InvalidVolume(reason=msg) if (add_vol_ref['status'] not in @@ -641,16 +636,16 @@ class API(base.Base): "invalid state: %(status)s. Valid states are: " "%(valid)s.") % {'volume_id': add_vol_ref['id'], - 'group_id': group['id'], + 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) - # group['host'] and add_vol_ref['host'] are in this format: + # group.host and add_vol_ref['host'] are in this format: # 'host@backend#pool'. Extract host (host@backend) before # doing comparison. vol_host = vol_utils.extract_host(add_vol_ref['host']) - group_host = vol_utils.extract_host(group['host']) + group_host = vol_utils.extract_host(group.host) if group_host != vol_host: raise exception.InvalidVolume( reason=_("Volume is not local to this node.")) @@ -664,14 +659,13 @@ class API(base.Base): msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume does not exist.") % {'volume_id': add_vol_ref['id'], - 'group_id': group['id']}) + 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return add_volumes_new def get(self, context, group_id): - rv = self.db.consistencygroup_get(context, group_id) - group = dict(rv) + group = objects.ConsistencyGroup.get_by_id(context, group_id) check_policy(context, 'get', group) return group @@ -695,14 +689,10 @@ class API(base.Base): LOG.debug("Searching by: %s", filters) if (context.is_admin and 'all_tenants' in filters): - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - groups = self.db.consistencygroup_get_all(context) + groups = objects.ConsistencyGroupList.get_all(context) else: - groups = self.db.consistencygroup_get_all_by_project( - context, - context.project_id) - + groups = objects.ConsistencyGroupList.get_all_by_project( + context, context.project_id) return groups def create_cgsnapshot(self, context, @@ -755,11 +745,10 @@ class API(base.Base): raise exception.InvalidCgSnapshot(reason=msg) self.db.cgsnapshot_update(context, cgsnapshot['id'], {'status': 'deleting'}) - group = self.db.consistencygroup_get( - context, - cgsnapshot['consistencygroup_id']) + group = objects.ConsistencyGroup.get_by_id(context, cgsnapshot[ + 'consistencygroup_id']) self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot, - group['host']) + group.host) def update_cgsnapshot(self, context, cgsnapshot, fields): self.db.cgsnapshot_update(context, cgsnapshot['id'], fields) diff --git a/cinder/objects/__init__.py b/cinder/objects/__init__.py index 1efca3358..2cdec1b55 100644 --- a/cinder/objects/__init__.py +++ b/cinder/objects/__init__.py @@ -27,3 +27,4 @@ def register_all(): __import__('cinder.objects.volume') __import__('cinder.objects.snapshot') __import__('cinder.objects.backup') + __import__('cinder.objects.consistencygroup') diff --git a/cinder/objects/consistencygroup.py b/cinder/objects/consistencygroup.py new file mode 100644 index 000000000..16f3d6830 --- /dev/null +++ b/cinder/objects/consistencygroup.py @@ -0,0 +1,105 @@ +# Copyright 2015 Yahoo Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import db +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import base +from oslo_versionedobjects import fields + + +@base.CinderObjectRegistry.register +class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, + base.CinderObjectDictCompat): + VERSION = '1.0' + + fields = { + 'id': fields.UUIDField(), + 'user_id': fields.UUIDField(), + 'project_id': fields.UUIDField(), + 'host': fields.StringField(nullable=True), + 'availability_zone': fields.StringField(nullable=True), + 'name': fields.StringField(nullable=True), + 'description': fields.StringField(nullable=True), + 'volume_type_id': fields.UUIDField(nullable=True), + 'status': fields.StringField(nullable=True), + 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'source_cgid': fields.UUIDField(nullable=True), + } + + @staticmethod + def _from_db_object(context, consistencygroup, db_consistencygroup): + for name, field in consistencygroup.fields.items(): + value = db_consistencygroup.get(name) + setattr(consistencygroup, name, value) + + consistencygroup._context = context + consistencygroup.obj_reset_changes() + return consistencygroup + + @base.remotable_classmethod + def get_by_id(cls, context, id): + db_consistencygroup = db.consistencygroup_get(context, id) + return cls._from_db_object(context, cls(context), + db_consistencygroup) + + @base.remotable + def create(self): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason=_('already_created')) + updates = self.cinder_obj_get_changes() + db_consistencygroups = db.consistencygroup_create(self._context, + updates) + self._from_db_object(self._context, self, db_consistencygroups) + + @base.remotable + def save(self): + updates = self.cinder_obj_get_changes() + if updates: + db.consistencygroup_update(self._context, self.id, updates) + self.obj_reset_changes() + + @base.remotable + def destroy(self): + with self.obj_as_admin(): + db.consistencygroup_destroy(self._context, self.id) + + +@base.CinderObjectRegistry.register +class ConsistencyGroupList(base.ObjectListBase, base.CinderObject): + VERSION = '1.0' + + fields = { + 'objects': fields.ListOfObjectsField('ConsistencyGroup') + } + child_version = { + '1.0': '1.0' + } + + @base.remotable_classmethod + def get_all(cls, context): + consistencygroups = db.consistencygroup_get_all(context) + return base.obj_make_list(context, cls(context), + objects.ConsistencyGroup, + consistencygroups) + + @base.remotable_classmethod + def get_all_by_project(cls, context, project_id): + consistencygroups = db.consistencygroup_get_all_by_project(context, + project_id) + return base.obj_make_list(context, cls(context), + objects.ConsistencyGroup, + consistencygroups) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py index 6de52e8c7..eb31fa1a0 100644 --- a/cinder/scheduler/driver.py +++ b/cinder/scheduler/driver.py @@ -46,19 +46,18 @@ def volume_update_db(context, volume_id, host): :returns: A Volume with the updated fields set properly. """ - now = timeutils.utcnow() - values = {'host': host, 'scheduled_at': now} + values = {'host': host, 'scheduled_at': timeutils.utcnow()} return db.volume_update(context, volume_id, values) -def group_update_db(context, group_id, host): +def group_update_db(context, group, host): """Set the host and the scheduled_at field of a consistencygroup. :returns: A Consistencygroup with the updated fields set properly. """ - now = timeutils.utcnow() - values = {'host': host, 'updated_at': now} - return db.consistencygroup_update(context, group_id, values) + group.update({'host': host, 'updated_at': timeutils.utcnow()}) + group.save() + return group class Scheduler(object): @@ -101,7 +100,7 @@ class Scheduler(object): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_volume")) - def schedule_create_consistencygroup(self, context, group_id, + def schedule_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): """Must override schedule method for scheduler to work.""" diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 45e2cb86c..3e1a81caf 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -61,7 +61,7 @@ class FilterScheduler(driver.Scheduler): filter_properties['metadata'] = vol.get('metadata') filter_properties['qos_specs'] = vol.get('qos_specs') - def schedule_create_consistencygroup(self, context, group_id, + def schedule_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): @@ -75,7 +75,7 @@ class FilterScheduler(driver.Scheduler): host = weighed_host.obj.host - updated_group = driver.group_update_db(context, group_id, host) + updated_group = driver.group_update_db(context, group, host) self.volume_rpcapi.create_consistencygroup(context, updated_group, host) diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index 87ab67a44..352db2376 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) class SchedulerManager(manager.Manager): """Chooses a host to create volumes.""" - RPC_API_VERSION = '1.7' + RPC_API_VERSION = '1.8' target = messaging.Target(version=RPC_API_VERSION) @@ -90,29 +90,29 @@ class SchedulerManager(manager.Manager): eventlet.sleep(1) def create_consistencygroup(self, context, topic, - group_id, + group, request_spec_list=None, filter_properties_list=None): self._wait_for_scheduler() try: self.driver.schedule_create_consistencygroup( - context, group_id, + context, group, request_spec_list, filter_properties_list) except exception.NoValidHost: LOG.error(_LE("Could not find a host for consistency group " "%(group_id)s."), - {'group_id': group_id}) - db.consistencygroup_update(context, group_id, - {'status': 'error'}) + {'group_id': group.id}) + group.status = 'error' + group.save() except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to create consistency group " "%(group_id)s."), - {'group_id': group_id}) - db.consistencygroup_update(context, group_id, - {'status': 'error'}) + {'group_id': group.id}) + group.status = 'error' + group.save() def create_volume(self, context, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py index 12dfb0d7a..d21080102 100644 --- a/cinder/scheduler/rpcapi.py +++ b/cinder/scheduler/rpcapi.py @@ -41,6 +41,7 @@ class SchedulerAPI(object): 1.5 - Add manage_existing method 1.6 - Add create_consistencygroup method 1.7 - Add get_active_pools method + 1.8 - Add sending object over RPC in create_consistencygroup method """ RPC_API_VERSION = '1.0' @@ -50,14 +51,14 @@ class SchedulerAPI(object): target = messaging.Target(topic=CONF.scheduler_topic, version=self.RPC_API_VERSION) serializer = objects_base.CinderObjectSerializer() - self.client = rpc.get_client(target, version_cap='1.7', + self.client = rpc.get_client(target, version_cap='1.8', serializer=serializer) - def create_consistencygroup(self, ctxt, topic, group_id, + def create_consistencygroup(self, ctxt, topic, group, request_spec_list=None, filter_properties_list=None): - cctxt = self.client.prepare(version='1.6') + cctxt = self.client.prepare(version='1.8') request_spec_p_list = [] for request_spec in request_spec_list: request_spec_p = jsonutils.to_primitive(request_spec) @@ -65,7 +66,7 @@ class SchedulerAPI(object): return cctxt.cast(ctxt, 'create_consistencygroup', topic=topic, - group_id=group_id, + group=group, request_spec_list=request_spec_p_list, filter_properties_list=filter_properties_list) diff --git a/cinder/tests/unit/api/contrib/test_consistencygroups.py b/cinder/tests/unit/api/contrib/test_consistencygroups.py index 4abba1a88..c3c396b5c 100644 --- a/cinder/tests/unit/api/contrib/test_consistencygroups.py +++ b/cinder/tests/unit/api/contrib/test_consistencygroups.py @@ -28,6 +28,7 @@ from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ +from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import stubs @@ -411,8 +412,8 @@ class ConsistencyGroupsAPITestCase(test.TestCase): cg = db.consistencygroup_get( context.get_admin_context(read_deleted='yes'), consistencygroup_id) - self.assertEqual(cg['status'], 'deleted') - self.assertEqual(cg['host'], None) + self.assertEqual('deleted', cg['status']) + self.assertIsNone(cg['host']) def test_create_delete_consistencygroup_update_quota(self): ctxt = context.RequestContext('fake', 'fake', auth_token=True) @@ -429,19 +430,18 @@ class ConsistencyGroupsAPITestCase(test.TestCase): cg = self.cg_api.create(ctxt, name, description, fake_type['name']) self.cg_api.update_quota.assert_called_once_with( - ctxt, cg['id'], 1) - self.assertEqual(cg['status'], 'creating') - self.assertEqual(cg['host'], None) + ctxt, cg, 1) + self.assertEqual('creating', cg['status']) + self.assertIsNone(cg['host']) self.cg_api.update_quota.reset_mock() - - cg['status'] = 'error' + cg.status = 'error' self.cg_api.delete(ctxt, cg) self.cg_api.update_quota.assert_called_once_with( - ctxt, cg['id'], -1, ctxt.project_id) - cg = db.consistencygroup_get( + ctxt, cg, -1, ctxt.project_id) + cg = objects.ConsistencyGroup.get_by_id( context.get_admin_context(read_deleted='yes'), - cg['id']) - self.assertEqual(cg['status'], 'deleted') + cg.id) + self.assertEqual('deleted', cg['status']) def test_delete_consistencygroup_with_invalid_body(self): consistencygroup_id = self._create_consistencygroup(status='available') diff --git a/cinder/tests/unit/fake_consistencygroup.py b/cinder/tests/unit/fake_consistencygroup.py new file mode 100644 index 000000000..ed52e63fc --- /dev/null +++ b/cinder/tests/unit/fake_consistencygroup.py @@ -0,0 +1,48 @@ +# Copyright 2015 SimpliVity Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields + +from cinder import objects + + +def fake_db_consistencygroup(**updates): + db_values = { + 'id': 1, + 'user_id': 2, + 'project_id': 3, + 'host': 'FakeHost', + } + for name, field in objects.ConsistencyGroup.fields.items(): + if name in db_values: + continue + if field.nullable: + db_values[name] = None + elif field.default != fields.UnspecifiedDefault: + db_values[name] = field.default + else: + raise Exception('fake_db_consistencygroup needs help with %s' % + name) + + if updates: + db_values.update(updates) + + return db_values + + +def fake_consistencyobject_obj(context, **updates): + return objects.ConsistencyGroup._from_db_object(context, + objects.ConsistencyGroup(), + fake_db_consistencygroup( + **updates)) diff --git a/cinder/tests/unit/objects/test_consistencygroup.py b/cinder/tests/unit/objects/test_consistencygroup.py new file mode 100644 index 000000000..0f24a30a2 --- /dev/null +++ b/cinder/tests/unit/objects/test_consistencygroup.py @@ -0,0 +1,121 @@ +# Copyright 2015 Yahoo Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from cinder import context +from cinder import exception +from cinder import objects +from cinder.tests.unit import objects as test_objects + +fake_consistencygroup = { + 'id': '1', + 'user_id': 'fake_user_id', + 'project_id': 'fake_project_id', + 'host': 'fake_host', + 'availability_zone': 'fake_az', + 'name': 'fake_name', + 'description': 'fake_description', + 'volume_type_id': 'fake_volume_type_id', + 'status': 'creating', + 'cgsnapshot_id': 'fake_id', + 'source_cgid': None, +} + + +class TestConsistencyGroup(test_objects.BaseObjectsTestCase): + def setUp(self): + super(TestConsistencyGroup, self).setUp() + # NOTE (e0ne): base tests contains original RequestContext from + # oslo_context. We change it to our RequestContext implementation + # to have 'elevated' method + self.user_id = 123 + self.project_id = 456 + self.context = context.RequestContext(self.user_id, self.project_id, + is_admin=False) + + @staticmethod + def _compare(test, db, obj): + for field, value in db.items(): + test.assertEqual(db[field], getattr(obj, field)) + + @mock.patch('cinder.db.consistencygroup_get', + return_value=fake_consistencygroup) + def test_get_by_id(self, consistencygroup_get): + consistencygroup = objects.ConsistencyGroup.get_by_id(self.context, 1) + self._compare(self, fake_consistencygroup, consistencygroup) + + @mock.patch('cinder.db.sqlalchemy.api.model_query') + def test_get_by_id_no_existing_id(self, model_query): + query = mock.Mock() + filter_by = mock.Mock() + filter_by.first.return_value = None + query.filter_by.return_value = filter_by + model_query.return_value = query + self.assertRaises(exception.ConsistencyGroupNotFound, + objects.ConsistencyGroup.get_by_id, self.context, + 123) + + @mock.patch('cinder.db.consistencygroup_create', + return_value=fake_consistencygroup) + def test_create(self, consistencygroup_create): + fake_cg = fake_consistencygroup.copy() + del fake_cg['id'] + consistencygroup = objects.ConsistencyGroup(context=self.context, + **fake_cg) + consistencygroup.create() + self._compare(self, fake_consistencygroup, consistencygroup) + + def test_create_with_id_except_exception(self, ): + consistencygroup = objects.ConsistencyGroup(context=self.context, + **{'id': 1}) + self.assertRaises(exception.ObjectActionError, consistencygroup.create) + + @mock.patch('cinder.db.consistencygroup_update') + def test_save(self, consistencygroup_update): + consistencygroup = objects.ConsistencyGroup._from_db_object( + self.context, objects.ConsistencyGroup(), fake_consistencygroup) + consistencygroup.status = 'active' + consistencygroup.save() + consistencygroup_update.assert_called_once_with(self.context, + consistencygroup.id, + {'status': 'active'}) + + @mock.patch('cinder.db.consistencygroup_destroy') + def test_destroy(self, consistencygroup_destroy): + consistencygroup = objects.ConsistencyGroup(context=self.context, + id='1') + consistencygroup.destroy() + self.assertTrue(consistencygroup_destroy.called) + admin_context = consistencygroup_destroy.call_args[0][0] + self.assertTrue(admin_context.is_admin) + + +class TestConsistencyGroupList(test_objects.BaseObjectsTestCase): + @mock.patch('cinder.db.consistencygroup_get_all', + return_value=[fake_consistencygroup]) + def test_get_all(self, consistencygroup_get_all): + consistencygroups = objects.ConsistencyGroupList.get_all(self.context) + self.assertEqual(1, len(consistencygroups)) + TestConsistencyGroup._compare(self, fake_consistencygroup, + consistencygroups[0]) + + @mock.patch('cinder.db.consistencygroup_get_all_by_project', + return_value=[fake_consistencygroup]) + def test_get_all_by_project(self, consistencygroup_get_all_by_project): + consistencygroups = objects.ConsistencyGroupList.get_all_by_project( + self.context, self.project_id) + self.assertEqual(1, len(consistencygroups)) + TestConsistencyGroup._compare(self, fake_consistencygroup, + consistencygroups[0]) diff --git a/cinder/tests/unit/scheduler/test_scheduler.py b/cinder/tests/unit/scheduler/test_scheduler.py index 7e8f4bc84..563f434a3 100644 --- a/cinder/tests/unit/scheduler/test_scheduler.py +++ b/cinder/tests/unit/scheduler/test_scheduler.py @@ -27,7 +27,7 @@ from cinder.scheduler import driver from cinder.scheduler import filter_scheduler from cinder.scheduler import manager from cinder import test - +from cinder.tests.unit import fake_consistencygroup CONF = cfg.CONF @@ -231,6 +231,8 @@ class SchedulerManagerTestCase(test.TestCase): with mock.patch.object(filter_scheduler.FilterScheduler, 'schedule_create_consistencygroup') as mock_cg: original_driver = self.manager.driver + consistencygroup_obj = \ + fake_consistencygroup.fake_consistencyobject_obj(self.context) self.manager.driver = filter_scheduler.FilterScheduler LOG = self.mock_object(manager, 'LOG') self.stubs.Set(db, 'consistencygroup_update', mock.Mock()) @@ -242,7 +244,7 @@ class SchedulerManagerTestCase(test.TestCase): self.manager.create_consistencygroup, self.context, 'volume', - group_id) + consistencygroup_obj) self.assertTrue(LOG.exception.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': 'error'}) @@ -254,7 +256,7 @@ class SchedulerManagerTestCase(test.TestCase): mock_cg.side_effect = exception.NoValidHost( reason="No weighed hosts available") self.manager.create_consistencygroup( - self.context, 'volume', group_id) + self.context, 'volume', consistencygroup_obj) self.assertTrue(LOG.error.call_count > 0) db.consistencygroup_update.assert_called_once_with( self.context, group_id, {'status': 'error'}) diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index d977907ac..bc6d5a52a 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -4630,7 +4630,7 @@ class VolumeTestCase(BaseVolumeTestCase): def fake_driver_create_cg(context, group): """Make sure that the pool is part of the host.""" self.assertIn('host', group) - host = group['host'] + host = group.host pool = volutils.extract_host(host, level='pool') self.assertEqual(pool, 'fakepool') return {'status': 'available'} @@ -4643,10 +4643,10 @@ class VolumeTestCase(BaseVolumeTestCase): availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool') - group_id = group['id'] + group = objects.ConsistencyGroup.get_by_id(self.context, group.id) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) - self.volume.create_consistencygroup(self.context, group_id) + self.volume.create_consistencygroup(self.context, group) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[0] @@ -4658,22 +4658,21 @@ class VolumeTestCase(BaseVolumeTestCase): 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': 'fake', - 'consistencygroup_id': group_id + 'consistencygroup_id': group.id } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[1] self.assertEqual(msg['event_type'], 'consistencygroup.create.end') - expected['status'] = 'available' self.assertDictMatch(expected, msg['payload']) self.assertEqual( - group_id, + group.id, db.consistencygroup_get(context.get_admin_context(), - group_id).id) + group.id).id) - self.volume.delete_consistencygroup(self.context, group_id) + self.volume.delete_consistencygroup(self.context, group) cg = db.consistencygroup_get( context.get_admin_context(read_deleted='yes'), - group_id) + group.id) self.assertEqual('deleted', cg['status']) self.assertEqual(4, len(self.notifier.notifications), self.notifier.notifications) @@ -4682,11 +4681,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('consistencygroup.delete.end', msg['event_type']) + expected['status'] = 'deleted' self.assertDictMatch(expected, msg['payload']) self.assertRaises(exception.NotFound, db.consistencygroup_get, self.context, - group_id) + group.id) @mock.patch.object(CGQUOTAS, "reserve", return_value=["RESERVATION"]) @@ -4705,12 +4705,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') - group_id = group['id'] - self.volume.create_consistencygroup(self.context, group_id) + group = objects.ConsistencyGroup.get_by_id(self.context, group['id']) + self.volume.create_consistencygroup(self.context, group) volume = tests_utils.create_volume( self.context, - consistencygroup_id=group_id, + consistencygroup_id=group.id, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) @@ -4727,12 +4727,10 @@ class VolumeTestCase(BaseVolumeTestCase): [{'id': volume_id2, 'status': 'available'}], [{'id': volume_id, 'status': 'available'}]) - self.volume.update_consistencygroup(self.context, group_id, + self.volume.update_consistencygroup(self.context, group, add_volumes=volume_id2, remove_volumes=volume_id) - cg = db.consistencygroup_get( - self.context, - group_id) + cg = objects.ConsistencyGroup.get_by_id(self.context, group.id) expected = { 'status': 'available', 'name': 'test_cg', @@ -4740,9 +4738,9 @@ class VolumeTestCase(BaseVolumeTestCase): 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': 'fake', - 'consistencygroup_id': group_id + 'consistencygroup_id': group.id } - self.assertEqual('available', cg['status']) + self.assertEqual('available', cg.status) self.assertEqual(10, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[6] @@ -4751,7 +4749,7 @@ class VolumeTestCase(BaseVolumeTestCase): msg = self.notifier.notifications[8] self.assertEqual('consistencygroup.update.end', msg['event_type']) self.assertDictMatch(expected, msg['payload']) - cgvolumes = db.volume_get_all_by_group(self.context, group_id) + cgvolumes = db.volume_get_all_by_group(self.context, group.id) cgvol_ids = [cgvol['id'] for cgvol in cgvolumes] # Verify volume is removed. self.assertNotIn(volume_id, cgvol_ids) @@ -4773,7 +4771,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.InvalidVolume, self.volume.update_consistencygroup, self.context, - group_id, + group, add_volumes=volume_id3, remove_volumes=None) self.volume.db.volume_get.reset_mock() @@ -4819,8 +4817,9 @@ class VolumeTestCase(BaseVolumeTestCase): status='available', host=CONF.host, size=1) + group = objects.ConsistencyGroup.get_by_id(self.context, group['id']) volume_id = volume['id'] - cgsnapshot_returns = self._create_cgsnapshot(group_id, volume_id) + cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id) cgsnapshot_id = cgsnapshot_returns[0]['id'] snapshot_id = cgsnapshot_returns[1]['id'] @@ -4830,20 +4829,17 @@ class VolumeTestCase(BaseVolumeTestCase): availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', cgsnapshot_id=cgsnapshot_id) - group2_id = group2['id'] + group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) volume2 = tests_utils.create_volume( self.context, - consistencygroup_id=group2_id, + consistencygroup_id=group2.id, snapshot_id=snapshot_id, **self.volume_params) volume2_id = volume2['id'] self.volume.create_volume(self.context, volume2_id) self.volume.create_consistencygroup_from_src( - self.context, group2_id, cgsnapshot_id=cgsnapshot_id) - - cg2 = db.consistencygroup_get( - self.context, - group2_id) + self.context, group2, cgsnapshot_id=cgsnapshot_id) + cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) expected = { 'status': 'available', 'name': 'test_cg', @@ -4851,10 +4847,10 @@ class VolumeTestCase(BaseVolumeTestCase): 'tenant_id': self.context.project_id, 'created_at': 'DONTCARE', 'user_id': 'fake', - 'consistencygroup_id': group2_id + 'consistencygroup_id': group2.id, } - self.assertEqual('available', cg2['status']) - self.assertEqual(group2_id, cg2['id']) + self.assertEqual('available', cg2.status) + self.assertEqual(group2.id, cg2['id']) self.assertEqual(cgsnapshot_id, cg2['cgsnapshot_id']) self.assertIsNone(cg2['source_cgid']) @@ -4870,7 +4866,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(6, len(self.notifier.notifications), self.notifier.notifications) - self.volume.delete_consistencygroup(self.context, group2_id) + self.volume.delete_consistencygroup(self.context, group2) if len(self.notifier.notifications) > 10: self.assertFalse(self.notifier.notifications[10]) @@ -4883,16 +4879,16 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('consistencygroup.delete.end', msg['event_type']) + expected['status'] = 'deleted' self.assertDictMatch(expected, msg['payload']) - cg2 = db.consistencygroup_get( - context.get_admin_context(read_deleted='yes'), - group2_id) - self.assertEqual('deleted', cg2['status']) + cg2 = objects.ConsistencyGroup.get_by_id( + context.get_admin_context(read_deleted='yes'), group2.id) + self.assertEqual('deleted', cg2.status) self.assertRaises(exception.NotFound, db.consistencygroup_get, self.context, - group2_id) + group2.id) # Create CG from source CG. group3 = tests_utils.create_consistencygroup( @@ -4900,28 +4896,27 @@ class VolumeTestCase(BaseVolumeTestCase): availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', source_cgid=group_id) - group3_id = group3['id'] + group3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id) volume3 = tests_utils.create_volume( self.context, - consistencygroup_id=group3_id, + consistencygroup_id=group3.id, source_volid=volume_id, **self.volume_params) volume3_id = volume3['id'] self.volume.create_volume(self.context, volume3_id) self.volume.create_consistencygroup_from_src( - self.context, group3_id, source_cgid=group_id) + self.context, group3, source_cg=group) - cg3 = db.consistencygroup_get( - self.context, - group3_id) - self.assertEqual('available', cg3['status']) - self.assertEqual(group3_id, cg3['id']) - self.assertEqual(group_id, cg3['source_cgid']) - self.assertIsNone(cg3['cgsnapshot_id']) + cg3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id) + + self.assertEqual('available', cg3.status) + self.assertEqual(group3.id, cg3.id) + self.assertEqual(group_id, cg3.source_cgid) + self.assertIsNone(cg3.cgsnapshot_id) self.volume.delete_cgsnapshot(self.context, cgsnapshot_id) - self.volume.delete_consistencygroup(self.context, group_id) - self.volume.delete_consistencygroup(self.context, group3_id) + self.volume.delete_consistencygroup(self.context, group) + self.volume.delete_consistencygroup(self.context, group3) def test_sort_snapshots(self): vol1 = {'id': '1', 'name': 'volume 1', @@ -5056,16 +5051,16 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') - group_id = group['id'] + group = objects.ConsistencyGroup.get_by_id(self.context, group['id']) volume = tests_utils.create_volume( self.context, - consistencygroup_id=group_id, + consistencygroup_id=group.id, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) cgsnapshot = tests_utils.create_cgsnapshot( self.context, - consistencygroup_id=group_id) + consistencygroup_id=group.id) cgsnapshot_id = cgsnapshot['id'] if len(self.notifier.notifications) > 2: @@ -5073,9 +5068,9 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) - cgsnapshot_returns = self._create_cgsnapshot(group_id, volume_id) + cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_id) cgsnapshot_id = cgsnapshot_returns[0]['id'] - self.volume.create_cgsnapshot(self.context, group_id, cgsnapshot_id) + self.volume.create_cgsnapshot(self.context, group.id, cgsnapshot_id) self.assertEqual(cgsnapshot_id, db.cgsnapshot_get(context.get_admin_context(), cgsnapshot_id).id) @@ -5092,7 +5087,7 @@ class VolumeTestCase(BaseVolumeTestCase): 'status': 'creating', 'tenant_id': 'fake', 'user_id': 'fake', - 'consistencygroup_id': group_id + 'consistencygroup_id': group.id } self.assertDictMatch(expected, msg['payload']) msg = self.notifier.notifications[3] @@ -5131,7 +5126,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, cgsnapshot_id) - self.volume.delete_consistencygroup(self.context, group_id) + self.volume.delete_consistencygroup(self.context, group) self.assertTrue(mock_create_cgsnap.called) self.assertTrue(mock_del_cgsnap.called) @@ -5155,10 +5150,10 @@ class VolumeTestCase(BaseVolumeTestCase): availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') - group_id = group['id'] + group = objects.ConsistencyGroup.get_by_id(self.context, group['id']) volume = tests_utils.create_volume( self.context, - consistencygroup_id=group_id, + consistencygroup_id=group.id, host='host1@backend1#pool1', status='creating', size=1) @@ -5166,15 +5161,15 @@ class VolumeTestCase(BaseVolumeTestCase): volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) - self.volume.delete_consistencygroup(self.context, group_id) + self.volume.delete_consistencygroup(self.context, group) cg = db.consistencygroup_get( context.get_admin_context(read_deleted='yes'), - group_id) + group.id) self.assertEqual('deleted', cg['status']) self.assertRaises(exception.NotFound, db.consistencygroup_get, self.context, - group_id) + group.id) self.assertTrue(mock_del_cg.called) @@ -5192,10 +5187,10 @@ class VolumeTestCase(BaseVolumeTestCase): availability_zone=CONF.storage_availability_zone, volume_type='type1,type2') - group_id = group['id'] + group = objects.ConsistencyGroup.get_by_id(self.context, group['id']) volume = tests_utils.create_volume( self.context, - consistencygroup_id=group_id, + consistencygroup_id=group.id, host='host1@backend1#pool1', status='creating', size=1) @@ -5206,9 +5201,9 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.InvalidVolume, self.volume.delete_consistencygroup, self.context, - group_id) + group) cg = db.consistencygroup_get(self.context, - group_id) + group.id) # Group is not deleted self.assertEqual('available', cg['status']) diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index 68e6de200..546ca5dc3 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -27,6 +27,7 @@ from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import utils as tests_utils from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils CONF = cfg.CONF @@ -79,14 +80,16 @@ class VolumeRpcAPITestCase(test.TestCase): host='fakehost@fakedrv#fakepool', source_cgid=source_group['id']) + group = objects.ConsistencyGroup.get_by_id(self.context, group.id) + group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self.fake_reservations = ["RESERVATION"] - self.fake_cg = jsonutils.to_primitive(group) - self.fake_cg2 = jsonutils.to_primitive(group2) + self.fake_cg = group + self.fake_cg2 = group2 self.fake_src_cg = jsonutils.to_primitive(source_group) self.fake_cgsnap = jsonutils.to_primitive(cgsnapshot) @@ -135,11 +138,6 @@ class VolumeRpcAPITestCase(test.TestCase): del expected_msg['new_volume'] expected_msg['new_volume_id'] = volume['id'] - if 'group' in expected_msg: - group = expected_msg['group'] - del expected_msg['group'] - expected_msg['group_id'] = group['id'] - if 'cgsnapshot' in expected_msg: cgsnapshot = expected_msg['cgsnapshot'] if cgsnapshot: @@ -148,20 +146,14 @@ class VolumeRpcAPITestCase(test.TestCase): else: expected_msg['cgsnapshot_id'] = None - if 'source_cg' in expected_msg: - source_cg = expected_msg['source_cg'] - if source_cg: - del expected_msg['source_cg'] - expected_msg['source_cgid'] = source_cg['id'] - else: - expected_msg['source_cgid'] = None - if 'host' in kwargs: host = kwargs['host'] + elif 'group' in kwargs: + host = kwargs['group']['host'] else: host = kwargs['volume']['host'] - target['server'] = host + target['server'] = utils.extract_host(host) target['topic'] = '%s.%s' % (CONF.volume_topic, host) self.fake_args = None @@ -194,6 +186,10 @@ class VolumeRpcAPITestCase(test.TestCase): expected_snapshot = expected_msg[kwarg].obj_to_primitive() snapshot = value.obj_to_primitive() self.assertEqual(expected_snapshot, snapshot) + elif isinstance(value, objects.ConsistencyGroup): + expected_cg = expected_msg[kwarg].obj_to_primitive() + cg = value.obj_to_primitive() + self.assertEqual(expected_cg, cg) else: self.assertEqual(expected_msg[kwarg], value) @@ -363,16 +359,14 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg, - host='fakehost', cgsnapshot=self.fake_cgsnap, source_cg=None, - version='1.25') + version='1.26') def test_create_consistencygroup_from_src_cg(self): self._test_volume_api('create_consistencygroup_from_src', rpc_method='cast', group=self.fake_cg2, - host='fakehost', cgsnapshot=None, source_cg=self.fake_src_cg, - version='1.25') + version='1.26') diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py index d244d150d..0f46fdec6 100644 --- a/cinder/tests/unit/utils.py +++ b/cinder/tests/unit/utils.py @@ -112,8 +112,8 @@ def create_consistencygroup(ctxt, """Create a consistencygroup object in the DB.""" cg = {} cg['host'] = host - cg['user_id'] = ctxt.user_id - cg['project_id'] = ctxt.project_id + cg['user_id'] = ctxt.user_id or 'fake_user_id' + cg['project_id'] = ctxt.project_id or 'fake_project_id' cg['status'] = status cg['name'] = name cg['description'] = description diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py index 5ec263d0b..a55cce2f4 100644 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py @@ -19,6 +19,7 @@ import mock from cinder import context from cinder import exception from cinder import test +from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.flows import fake_volume_api @@ -42,9 +43,13 @@ class CreateVolumeFlowTestCase(test.TestCase): self.counter = float(0) @mock.patch('time.time', side_effect=time_inc) - def test_cast_create_volume(self, mock_time): - + @mock.patch('cinder.objects.ConsistencyGroup.get_by_id') + def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time): props = {} + consistencygroup_obj = \ + fake_consistencygroup.fake_consistencyobject_obj( + self.ctxt, consistencygroup_id=1, host=None) + consistencygroup_get_by_id.return_value = consistencygroup_obj spec = {'volume_id': None, 'source_volid': None, 'snapshot_id': None, @@ -76,6 +81,7 @@ class CreateVolumeFlowTestCase(test.TestCase): fake_volume_api.FakeDb()) task._cast_create_volume(self.ctxt, spec, props) + consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5) class CreateVolumeFlowManagerTestCase(test.TestCase): diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py index 5f1a4adfc..4bd95a8a0 100644 --- a/cinder/volume/flows/api/create_volume.py +++ b/cinder/volume/flows/api/create_volume.py @@ -671,9 +671,8 @@ class VolumeCastTask(flow_utils.CinderTask): cgsnapshot_id = request_spec['cgsnapshot_id'] if cgroup_id: - cgroup = self.db.consistencygroup_get(context, cgroup_id) - if cgroup: - host = cgroup.get('host', None) + cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) + host = cgroup.host elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. # diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 3b937939f..76cd056f5 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -189,7 +189,7 @@ def locked_snapshot_operation(f): class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - RPC_API_VERSION = '1.25' + RPC_API_VERSION = '1.26' target = messaging.Target(version=RPC_API_VERSION) @@ -1563,7 +1563,7 @@ class VolumeManager(manager.SchedulerDependentManager): extra_usage_info=extra_usage_info, host=self.host) if not volumes: - volumes = self.db.volume_get_all_by_group(context, group['id']) + volumes = self.db.volume_get_all_by_group(context, group.id) if volumes: for volume in volumes: vol_utils.notify_about_volume_usage( @@ -1909,65 +1909,55 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.exception(_LE("Get replication status for volume failed."), resource=vol) - def create_consistencygroup(self, context, group_id): + def create_consistencygroup(self, context, group): """Creates the consistency group.""" context = context.elevated() - group_ref = self.db.consistencygroup_get(context, group_id) status = 'available' model_update = False self._notify_about_consistencygroup_usage( - context, group_ref, "create.start") + context, group, "create.start") try: utils.require_driver_initialized(self.driver) - LOG.info(_LI("Consistency group %s: creating"), group_ref['name']) + LOG.info(_LI("Consistency group %s: creating"), group.name) model_update = self.driver.create_consistencygroup(context, - group_ref) + group) if model_update: - group_ref = self.db.consistencygroup_update( - context, group_ref['id'], model_update) - + group.update(model_update) + group.save() except Exception: with excutils.save_and_reraise_exception(): - self.db.consistencygroup_update( - context, - group_ref['id'], - {'status': 'error'}) + group.status = 'error' + group.save() LOG.error(_LE("Consistency group %s: create failed"), - group_ref['name']) + group.name) - now = timeutils.utcnow() - self.db.consistencygroup_update(context, - group_ref['id'], - {'status': status, - 'created_at': now}) + group.status = status + group.created_at = timeutils.utcnow() + group.save() LOG.info(_LI("Consistency group %s: created successfully"), - group_ref['name']) + group.name) self._notify_about_consistencygroup_usage( - context, group_ref, "create.end") + context, group, "create.end") LOG.info(_LI("Create consistency group completed successfully."), resource={'type': 'consistency_group', - 'id': group_ref['id']}) - return group_ref['id'] + 'id': group.id}) + return group - def create_consistencygroup_from_src(self, context, group_id, - cgsnapshot_id=None, - source_cgid=None): + def create_consistencygroup_from_src(self, context, group, + cgsnapshot_id=None, source_cg=None): """Creates the consistency group from source. The source can be a CG snapshot or a source CG. """ - group_ref = self.db.consistencygroup_get(context, group_id) - try: - volumes = self.db.volume_get_all_by_group( - context, group_id) + volumes = self.db.volume_get_all_by_group(context, group.id) cgsnapshot = None snapshots = None @@ -1980,7 +1970,7 @@ class VolumeManager(manager.SchedulerDependentManager): "SnapshotNotFound."), {'snap': cgsnapshot_id}, resource={'type': 'consistency_group', - 'id': group_ref['id']}) + 'id': group.id}) raise if cgsnapshot: snapshots = objects.SnapshotList.get_all_for_cgsnapshot( @@ -1992,28 +1982,26 @@ class VolumeManager(manager.SchedulerDependentManager): "%(group)s because snapshot %(snap)s is " "not in a valid state. Valid states are: " "%(valid)s.") % - {'group': group_id, + {'group': group.id, 'snap': snap['id'], 'valid': VALID_CREATE_CG_SRC_SNAP_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) - source_cg = None - source_vols = None - if source_cgid: + if source_cg: try: - source_cg = self.db.consistencygroup_get( - context, source_cgid) + source_cg = objects.ConsistencyGroup.get_by_id( + context, source_cg.id) except exception.ConsistencyGroupNotFound: LOG.error(_LE("Create consistency group " "from source cg-%(cg)s failed: " "ConsistencyGroupNotFound."), - {'cg': source_cgid}, + {'cg': source_cg.id}, resource={'type': 'consistency_group', - 'id': group_ref['id']}) + 'id': group.id}) raise if source_cg: source_vols = self.db.volume_get_all_by_group( - context, source_cgid) + context, source_cg.id) for source_vol in source_vols: if (source_vol['status'] not in VALID_CREATE_CG_SRC_CG_STATUS): @@ -2022,7 +2010,7 @@ class VolumeManager(manager.SchedulerDependentManager): "%(source_vol)s is not in a valid " "state. Valid states are: " "%(valid)s.") % - {'group': group_id, + {'group': group.id, 'source_vol': source_vol['id'], 'valid': VALID_CREATE_CG_SRC_CG_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) @@ -2041,13 +2029,13 @@ class VolumeManager(manager.SchedulerDependentManager): source_vols) self._notify_about_consistencygroup_usage( - context, group_ref, "create.start") + context, group, "create.start") utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( - context, group_ref, volumes, cgsnapshot, + context, group, volumes, cgsnapshot, sorted_snapshots, source_cg, sorted_source_vols)) if volumes_model_update: @@ -2055,26 +2043,24 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_update(context, update['id'], update) if model_update: - group_ref = self.db.consistencygroup_update( - context, group_id, model_update) + group.update(model_update) + group.save() except Exception: with excutils.save_and_reraise_exception(): - self.db.consistencygroup_update( - context, - group_id, - {'status': 'error'}) + group.status = 'error' + group.save() if cgsnapshot_id: source = _("snapshot-%s") % cgsnapshot_id - elif source_cgid: - source = _("cg-%s") % source_cgid + elif source_cg: + source = _("cg-%s") % source_cg.id else: source = None LOG.error(_LE("Create consistency group " "from source %(source)s failed."), {'source': source}, resource={'type': 'consistency_group', - 'id': group_ref['id']}) + 'id': group.id}) # Update volume status to 'error' as well. for vol in volumes: self.db.volume_update( @@ -2084,24 +2070,22 @@ class VolumeManager(manager.SchedulerDependentManager): status = 'available' for vol in volumes: update = {'status': status, 'created_at': now} - self._update_volume_from_src(context, vol, update, - group_id=group_id) + self._update_volume_from_src(context, vol, update, group=group) self._update_allocated_capacity(vol) - self.db.consistencygroup_update(context, - group_id, - {'status': status, - 'created_at': now}) + group.status = status + group.created_at = now + group.save() self._notify_about_consistencygroup_usage( - context, group_ref, "create.end") + context, group, "create.end") LOG.info(_LI("Create consistency group " "from snapshot-%(snap)s completed successfully."), {'snap': cgsnapshot_id}, resource={'type': 'consistency_group', - 'id': group_ref['id']}) - return group_ref['id'] + 'id': group.id}) + return group def _sort_snapshots(self, volumes, snapshots): # Sort source snapshots so that they are in the same order as their @@ -2150,7 +2134,7 @@ class VolumeManager(manager.SchedulerDependentManager): return sorted_source_vols - def _update_volume_from_src(self, context, vol, update, group_id=None): + def _update_volume_from_src(self, context, vol, update, group=None): try: snapshot_id = vol.get('snapshot_id') if snapshot_id: @@ -2166,9 +2150,9 @@ class VolumeManager(manager.SchedulerDependentManager): {'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) - if group_id: - self.db.consistencygroup_update( - context, group_id, {'status': 'error'}) + if group: + group.status = 'error' + group.save() raise except exception.VolumeNotFound: LOG.error(_LE("The source volume %(volume_id)s " @@ -2176,9 +2160,9 @@ class VolumeManager(manager.SchedulerDependentManager): {'volume_id': snapshot.volume_id}) self.db.volume_update(context, vol['id'], {'status': 'error'}) - if group_id: - self.db.consistencygroup_update( - context, group_id, {'status': 'error'}) + if group: + group.status = 'error' + group.save() raise except exception.CinderException as ex: LOG.error(_LE("Failed to update %(volume_id)s" @@ -2188,9 +2172,9 @@ class VolumeManager(manager.SchedulerDependentManager): 'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) - if group_id: - self.db.consistencygroup_update( - context, group_id, {'status': 'error'}) + if group: + group.status = 'error' + group.save() raise exception.MetadataCopyFailure(reason=six.text_type(ex)) self.db.volume_update(context, vol['id'], update) @@ -2211,18 +2195,17 @@ class VolumeManager(manager.SchedulerDependentManager): self.stats['pools'][pool] = dict( allocated_capacity_gb=vol['size']) - def delete_consistencygroup(self, context, group_id): + def delete_consistencygroup(self, context, group): """Deletes consistency group and the volumes in the group.""" context = context.elevated() - group_ref = self.db.consistencygroup_get(context, group_id) - project_id = group_ref['project_id'] + project_id = group.project_id - if context.project_id != group_ref['project_id']: - project_id = group_ref['project_id'] + if context.project_id != group.project_id: + project_id = group.project_id else: project_id = context.project_id - volumes = self.db.volume_get_all_by_group(context, group_id) + volumes = self.db.volume_get_all_by_group(context, group.id) for volume_ref in volumes: if volume_ref['attach_status'] == "attached": @@ -2237,13 +2220,13 @@ class VolumeManager(manager.SchedulerDependentManager): reason=_("Volume is not local to this node")) self._notify_about_consistencygroup_usage( - context, group_ref, "delete.start") + context, group, "delete.start") try: utils.require_driver_initialized(self.driver) model_update, volumes = self.driver.delete_consistencygroup( - context, group_ref) + context, group) if volumes: for volume in volumes: @@ -2262,18 +2245,16 @@ class VolumeManager(manager.SchedulerDependentManager): msg = (_('Delete consistency group failed.')) LOG.exception(msg, resource={'type': 'consistency_group', - 'id': group_ref['id']}) + 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: - self.db.consistencygroup_update(context, group_ref['id'], - model_update) + group.update(model_update) + group.save() except Exception: with excutils.save_and_reraise_exception(): - self.db.consistencygroup_update( - context, - group_ref['id'], - {'status': 'error_deleting'}) + group.status = 'error_deleting' + group.save() # Get reservations for group try: @@ -2286,7 +2267,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.exception(_LE("Delete consistency group " "failed to update usages."), resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) for volume_ref in volumes: # Get reservations for volume @@ -2305,7 +2286,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.exception(_LE("Delete consistency group " "failed to update usages."), resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) # Delete glance metadata if it exists self.db.volume_glance_metadata_delete_by_volume(context, volume_id) @@ -2322,25 +2303,24 @@ class VolumeManager(manager.SchedulerDependentManager): CGQUOTAS.commit(context, cgreservations, project_id=project_id) - self.db.consistencygroup_destroy(context, group_id) + group.destroy() self._notify_about_consistencygroup_usage( - context, group_ref, "delete.end", volumes) + context, group, "delete.end", volumes) self.publish_service_capabilities(context) LOG.info(_LI("Delete consistency group " "completed successfully."), resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) return True - def update_consistencygroup(self, context, group_id, + def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates consistency group. Update consistency group by adding volumes to the group, or removing volumes from the group. """ - group = self.db.consistencygroup_get(context, group_id) add_volumes_ref = [] remove_volumes_ref = [] @@ -2359,14 +2339,14 @@ class VolumeManager(manager.SchedulerDependentManager): "VolumeNotFound."), {'volume_id': add_vol_ref['id']}, resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) raise if add_vol_ref['status'] not in ['in-use', 'available']: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume is in an invalid " "state: %(status)s. Valid states are: %(valid)s.") % {'volume_id': add_vol_ref['id'], - 'group_id': group_id, + 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) @@ -2388,7 +2368,7 @@ class VolumeManager(manager.SchedulerDependentManager): "VolumeNotFound."), {'volume_id': remove_vol_ref['id']}, resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) raise remove_volumes_ref.append(remove_vol_ref) @@ -2415,19 +2395,19 @@ class VolumeManager(manager.SchedulerDependentManager): if model_update: if model_update['status'] in ['error']: msg = (_('Error occurred when updating consistency group ' - '%s.') % group_id) + '%s.') % group.id) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) - self.db.consistencygroup_update(context, group_id, - model_update) + group.update(model_update) + group.save() except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred in the volume driver when " "updating consistency group %(group_id)s."), - {'group_id': group_id}) - self.db.consistencygroup_update(context, group_id, - {'status': 'error'}) + {'group_id': group.id}) + group.status = 'error' + group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) @@ -2438,9 +2418,9 @@ class VolumeManager(manager.SchedulerDependentManager): with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when updating consistency " "group %(group_id)s."), - {'group_id': group['id']}) - self.db.consistencygroup_update(context, group_id, - {'status': 'error'}) + {'group_id': group.id}) + group.status = 'error' + group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) @@ -2449,12 +2429,12 @@ class VolumeManager(manager.SchedulerDependentManager): {'status': 'error'}) now = timeutils.utcnow() - self.db.consistencygroup_update(context, group_id, - {'status': 'available', - 'updated_at': now}) + group.status = 'available' + group.update_at = now + group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], - {'consistencygroup_id': group_id, + {'consistencygroup_id': group.id, 'updated_at': now}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], @@ -2466,11 +2446,11 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.info(_LI("Delete consistency group " "completed successfully."), resource={'type': 'consistency_group', - 'id': group_id}) + 'id': group.id}) return True - def create_cgsnapshot(self, context, group_id, cgsnapshot_id): + def create_cgsnapshot(self, context, group, cgsnapshot_id): """Creates the cgsnapshot.""" caller_context = context context = context.elevated() diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 4952ecceb..e980b46a4 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -69,6 +69,9 @@ class VolumeAPI(object): cgsnapshot_id from create_volume. All off them are already passed either in request_spec or available in the DB. 1.25 - Add source_cg to create_consistencygroup_from_src. + 1.26 - Adds support for sending objects over RPC in + create_consistencygroup(), create_consistencygroup_from_src(), + update_consistencygroup() and delete_consistencygroup(). """ BASE_RPC_API_VERSION = '1.0' @@ -78,45 +81,44 @@ class VolumeAPI(object): target = messaging.Target(topic=CONF.volume_topic, version=self.BASE_RPC_API_VERSION) serializer = objects_base.CinderObjectSerializer() - self.client = rpc.get_client(target, '1.25', serializer=serializer) + self.client = rpc.get_client(target, '1.26', serializer=serializer) def create_consistencygroup(self, ctxt, group, host): new_host = utils.extract_host(host) - cctxt = self.client.prepare(server=new_host, version='1.18') + cctxt = self.client.prepare(server=new_host, version='1.26') cctxt.cast(ctxt, 'create_consistencygroup', - group_id=group['id']) + group=group) def delete_consistencygroup(self, ctxt, group): - host = utils.extract_host(group['host']) - cctxt = self.client.prepare(server=host, version='1.18') + host = utils.extract_host(group.host) + cctxt = self.client.prepare(server=host, version='1.26') cctxt.cast(ctxt, 'delete_consistencygroup', - group_id=group['id']) + group=group) def update_consistencygroup(self, ctxt, group, add_volumes=None, remove_volumes=None): - host = utils.extract_host(group['host']) - cctxt = self.client.prepare(server=host, version='1.21') + host = utils.extract_host(group.host) + cctxt = self.client.prepare(server=host, version='1.26') cctxt.cast(ctxt, 'update_consistencygroup', - group_id=group['id'], + group=group, add_volumes=add_volumes, remove_volumes=remove_volumes) - def create_consistencygroup_from_src(self, ctxt, group, host, - cgsnapshot=None, + def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None, source_cg=None): - new_host = utils.extract_host(host) - cctxt = self.client.prepare(server=new_host, version='1.25') + new_host = utils.extract_host(group.host) + cctxt = self.client.prepare(server=new_host, version='1.26') cctxt.cast(ctxt, 'create_consistencygroup_from_src', - group_id=group['id'], + group=group, cgsnapshot_id=cgsnapshot['id'] if cgsnapshot else None, - source_cgid=source_cg['id'] if source_cg else None) + source_cg=source_cg) def create_cgsnapshot(self, ctxt, group, cgsnapshot): host = utils.extract_host(group['host']) - cctxt = self.client.prepare(server=host, version='1.18') + cctxt = self.client.prepare(server=host, version='1.26') cctxt.cast(ctxt, 'create_cgsnapshot', - group_id=group['id'], + group=group, cgsnapshot_id=cgsnapshot['id']) def delete_cgsnapshot(self, ctxt, cgsnapshot, host): diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index b1095cba0..c2d297bbd 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -201,13 +201,13 @@ def notify_about_replication_error(context, volume, suffix, def _usage_from_consistencygroup(group_ref, **kw): - usage_info = dict(tenant_id=group_ref['project_id'], - user_id=group_ref['user_id'], - availability_zone=group_ref['availability_zone'], - consistencygroup_id=group_ref['id'], - name=group_ref['name'], - created_at=group_ref['created_at'].isoformat(), - status=group_ref['status']) + usage_info = dict(tenant_id=group_ref.project_id, + user_id=group_ref.user_id, + availability_zone=group_ref.availability_zone, + consistencygroup_id=group_ref.id, + name=group_ref.name, + created_at=group_ref.created_at.isoformat(), + status=group_ref.status) usage_info.update(kw) return usage_info