]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Consistency Groups
authorXing Yang <xing.yang@emc.com>
Wed, 27 Aug 2014 03:53:34 +0000 (23:53 -0400)
committerXing Yang <xing.yang@emc.com>
Wed, 27 Aug 2014 05:47:31 +0000 (01:47 -0400)
This patch enables Consistency Groups support in Cinder.
It will be implemented for snapshots for CGs in phase 1.

Design
------------------------------------------------
The workflow is as follows:

1) Create a CG, specifying all volume types that can be supported by this
CG. The scheduler chooses a backend that supports all specified volume types.
The CG will be empty when it is first created.   Backend needs to report
consistencygroup_support = True.  Volume type can have the following in
extra specs: {'capabilities:consistencygroup_support': '<is> True'}.
If consistencygroup_support is not in volume type extra specs, it will be
added to filter_properties by the scheduler to make sure that the scheduler
will select the backend which reports consistency group support capability.

Create CG CLI:
cinder consisgroup-create --volume-type type1,type2 mycg1

This will add a CG entry in the new consistencygroups table.

2) After the CG is created, create a new volume and add to the CG.
Repeat until all volumes are created for the CG.

Create volume CLI (with CG):
cinder create --volume-type type1 --consisgroup-id <CG uuid> 10

This will add a consistencygroup_id foreign key in the new volume
entry in the db.

3) Create a snapshot of the CG (cgsnapshot).

Create cgsnapshot CLI:
cinder cgsnapshot-create <CG uuid>

This will add a cgsnapshot entry in the new cgsnapshots table, create
snapshot for each volume in the CG, and add a cgsnapshot_id foreign key
in each newly created snapshot entry in the db.

DocImpact
Implements: blueprint consistency-groups

Change-Id: Ic105698aaad86ee30ef57ecf5107c224fdadf724

39 files changed:
cinder/api/contrib/cgsnapshots.py [new file with mode: 0644]
cinder/api/contrib/consistencygroups.py [new file with mode: 0644]
cinder/api/v2/views/volumes.py
cinder/api/v2/volumes.py
cinder/api/views/cgsnapshots.py [new file with mode: 0644]
cinder/api/views/consistencygroups.py [new file with mode: 0644]
cinder/common/config.py
cinder/consistencygroup/__init__.py [new file with mode: 0644]
cinder/consistencygroup/api.py [new file with mode: 0644]
cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/exception.py
cinder/quota.py
cinder/scheduler/driver.py
cinder/scheduler/filter_scheduler.py
cinder/scheduler/manager.py
cinder/scheduler/rpcapi.py
cinder/tests/api/contrib/test_cgsnapshots.py [new file with mode: 0644]
cinder/tests/api/contrib/test_consistencygroups.py [new file with mode: 0644]
cinder/tests/api/v2/test_volumes.py
cinder/tests/policy.json
cinder/tests/scheduler/fakes.py
cinder/tests/scheduler/test_filter_scheduler.py
cinder/tests/test_create_volume_flow.py
cinder/tests/test_volume.py
cinder/tests/test_volume_rpcapi.py
cinder/tests/utils.py
cinder/volume/api.py
cinder/volume/driver.py
cinder/volume/flows/api/create_volume.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py
cinder/volume/rpcapi.py
cinder/volume/utils.py
etc/cinder/cinder.conf.sample
etc/cinder/policy.json

diff --git a/cinder/api/contrib/cgsnapshots.py b/cinder/api/contrib/cgsnapshots.py
new file mode 100644 (file)
index 0000000..4eb728e
--- /dev/null
@@ -0,0 +1,219 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""The cgsnapshots api."""
+
+
+import webob
+from webob import exc
+
+from cinder.api import common
+from cinder.api import extensions
+from cinder.api.openstack import wsgi
+from cinder.api.views import cgsnapshots as cgsnapshot_views
+from cinder.api import xmlutil
+from cinder import consistencygroup as consistencygroupAPI
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder import utils
+
+LOG = logging.getLogger(__name__)
+
+
+def make_cgsnapshot(elem):
+    elem.set('id')
+    elem.set('consistencygroup_id')
+    elem.set('status')
+    elem.set('created_at')
+    elem.set('name')
+    elem.set('description')
+
+
+class CgsnapshotTemplate(xmlutil.TemplateBuilder):
+    def construct(self):
+        root = xmlutil.TemplateElement('cgsnapshot', selector='cgsnapshot')
+        make_cgsnapshot(root)
+        alias = Cgsnapshots.alias
+        namespace = Cgsnapshots.namespace
+        return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+
+
+class CgsnapshotsTemplate(xmlutil.TemplateBuilder):
+    def construct(self):
+        root = xmlutil.TemplateElement('cgsnapshots')
+        elem = xmlutil.SubTemplateElement(root, 'cgsnapshot',
+                                          selector='cgsnapshots')
+        make_cgsnapshot(elem)
+        alias = Cgsnapshots.alias
+        namespace = Cgsnapshots.namespace
+        return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+
+
+class CreateDeserializer(wsgi.MetadataXMLDeserializer):
+    def default(self, string):
+        dom = utils.safe_minidom_parse_string(string)
+        cgsnapshot = self._extract_cgsnapshot(dom)
+        return {'body': {'cgsnapshot': cgsnapshot}}
+
+    def _extract_cgsnapshot(self, node):
+        cgsnapshot = {}
+        cgsnapshot_node = self.find_first_child_named(node, 'cgsnapshot')
+
+        attributes = ['name',
+                      'description']
+
+        for attr in attributes:
+            if cgsnapshot_node.getAttribute(attr):
+                cgsnapshot[attr] = cgsnapshot_node.getAttribute(attr)
+        return cgsnapshot
+
+
+class CgsnapshotsController(wsgi.Controller):
+    """The cgsnapshots API controller for the OpenStack API."""
+
+    _view_builder_class = cgsnapshot_views.ViewBuilder
+
+    def __init__(self):
+        self.cgsnapshot_api = consistencygroupAPI.API()
+        super(CgsnapshotsController, self).__init__()
+
+    @wsgi.serializers(xml=CgsnapshotTemplate)
+    def show(self, req, id):
+        """Return data about the given cgsnapshot."""
+        LOG.debug('show called for member %s', id)
+        context = req.environ['cinder.context']
+
+        try:
+            cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
+                context,
+                cgsnapshot_id=id)
+        except exception.CgSnapshotNotFound as error:
+            raise exc.HTTPNotFound(explanation=error.msg)
+
+        return self._view_builder.detail(req, cgsnapshot)
+
+    def delete(self, req, id):
+        """Delete a cgsnapshot."""
+        LOG.debug('delete called for member %s', id)
+        context = req.environ['cinder.context']
+
+        LOG.info(_('Delete cgsnapshot with id: %s'), id, context=context)
+
+        try:
+            cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
+                context,
+                cgsnapshot_id=id)
+            self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot)
+        except exception.CgSnapshotNotFound:
+            msg = _("Cgsnapshot could not be found")
+            raise exc.HTTPNotFound(explanation=msg)
+        except exception.InvalidCgSnapshot:
+            msg = _("Invalid cgsnapshot")
+            raise exc.HTTPBadRequest(explanation=msg)
+        except Exception:
+            msg = _("Failed cgsnapshot")
+            raise exc.HTTPBadRequest(explanation=msg)
+
+        return webob.Response(status_int=202)
+
+    @wsgi.serializers(xml=CgsnapshotsTemplate)
+    def index(self, req):
+        """Returns a summary list of cgsnapshots."""
+        return self._get_cgsnapshots(req, is_detail=False)
+
+    @wsgi.serializers(xml=CgsnapshotsTemplate)
+    def detail(self, req):
+        """Returns a detailed list of cgsnapshots."""
+        return self._get_cgsnapshots(req, is_detail=True)
+
+    def _get_cgsnapshots(self, req, is_detail):
+        """Returns a list of cgsnapshots, transformed through view builder."""
+        context = req.environ['cinder.context']
+        cgsnapshots = self.cgsnapshot_api.get_all_cgsnapshots(context)
+        limited_list = common.limited(cgsnapshots, req)
+
+        if is_detail:
+            cgsnapshots = self._view_builder.detail_list(req, limited_list)
+        else:
+            cgsnapshots = self._view_builder.summary_list(req, limited_list)
+        return cgsnapshots
+
+    @wsgi.response(202)
+    @wsgi.serializers(xml=CgsnapshotTemplate)
+    @wsgi.deserializers(xml=CreateDeserializer)
+    def create(self, req, body):
+        """Create a new cgsnapshot."""
+        LOG.debug('Creating new cgsnapshot %s', body)
+        if not self.is_valid_body(body, 'cgsnapshot'):
+            raise exc.HTTPBadRequest()
+
+        context = req.environ['cinder.context']
+
+        try:
+            cgsnapshot = body['cgsnapshot']
+        except KeyError:
+            msg = _("Incorrect request body format")
+            raise exc.HTTPBadRequest(explanation=msg)
+
+        try:
+            group_id = cgsnapshot['consistencygroup_id']
+        except KeyError:
+            msg = _("'consistencygroup_id' must be specified")
+            raise exc.HTTPBadRequest(explanation=msg)
+
+        try:
+            group = self.cgsnapshot_api.get(context, group_id)
+        except exception.NotFound:
+            msg = _("Consistency group could not be found")
+            raise exc.HTTPNotFound(explanation=msg)
+
+        name = cgsnapshot.get('name', None)
+        description = cgsnapshot.get('description', None)
+
+        LOG.info(_("Creating cgsnapshot %(name)s."),
+                 {'name': name},
+                 context=context)
+
+        try:
+            new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot(
+                context, group, name, description)
+        except exception.InvalidCgSnapshot as error:
+            raise exc.HTTPBadRequest(explanation=error.msg)
+        except exception.CgSnapshotNotFound as error:
+            raise exc.HTTPNotFound(explanation=error.msg)
+
+        retval = self._view_builder.summary(
+            req,
+            dict(new_cgsnapshot.iteritems()))
+
+        return retval
+
+
+class Cgsnapshots(extensions.ExtensionDescriptor):
+    """cgsnapshots support."""
+
+    name = 'Cgsnapshots'
+    alias = 'cgsnapshots'
+    namespace = 'http://docs.openstack.org/volume/ext/cgsnapshots/api/v1'
+    updated = '2014-08-18T00:00:00+00:00'
+
+    def get_resources(self):
+        resources = []
+        res = extensions.ResourceExtension(
+            Cgsnapshots.alias, CgsnapshotsController(),
+            collection_actions={'detail': 'GET'})
+        resources.append(res)
+        return resources
diff --git a/cinder/api/contrib/consistencygroups.py b/cinder/api/contrib/consistencygroups.py
new file mode 100644 (file)
index 0000000..9ae8863
--- /dev/null
@@ -0,0 +1,217 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""The consistencygroups api."""
+
+
+import webob
+from webob import exc
+
+from cinder.api import common
+from cinder.api import extensions
+from cinder.api.openstack import wsgi
+from cinder.api.views import consistencygroups as consistencygroup_views
+from cinder.api import xmlutil
+from cinder import consistencygroup as consistencygroupAPI
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder import utils
+
+LOG = logging.getLogger(__name__)
+
+
+def make_consistencygroup(elem):
+    elem.set('id')
+    elem.set('status')
+    elem.set('availability_zone')
+    elem.set('created_at')
+    elem.set('name')
+    elem.set('description')
+
+
+class ConsistencyGroupTemplate(xmlutil.TemplateBuilder):
+    def construct(self):
+        root = xmlutil.TemplateElement('consistencygroup',
+                                       selector='consistencygroup')
+        make_consistencygroup(root)
+        alias = Consistencygroups.alias
+        namespace = Consistencygroups.namespace
+        return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+
+
+class ConsistencyGroupsTemplate(xmlutil.TemplateBuilder):
+    def construct(self):
+        root = xmlutil.TemplateElement('consistencygroups')
+        elem = xmlutil.SubTemplateElement(root, 'consistencygroup',
+                                          selector='consistencygroups')
+        make_consistencygroup(elem)
+        alias = Consistencygroups.alias
+        namespace = Consistencygroups.namespace
+        return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
+
+
+class CreateDeserializer(wsgi.MetadataXMLDeserializer):
+    def default(self, string):
+        dom = utils.safe_minidom_parse_string(string)
+        consistencygroup = self._extract_consistencygroup(dom)
+        return {'body': {'consistencygroup': consistencygroup}}
+
+    def _extract_consistencygroup(self, node):
+        consistencygroup = {}
+        consistencygroup_node = self.find_first_child_named(
+            node,
+            'consistencygroup')
+
+        attributes = ['name',
+                      'description']
+
+        for attr in attributes:
+            if consistencygroup_node.getAttribute(attr):
+                consistencygroup[attr] = consistencygroup_node.\
+                    getAttribute(attr)
+        return consistencygroup
+
+
+class ConsistencyGroupsController(wsgi.Controller):
+    """The ConsistencyGroups API controller for the OpenStack API."""
+
+    _view_builder_class = consistencygroup_views.ViewBuilder
+
+    def __init__(self):
+        self.consistencygroup_api = consistencygroupAPI.API()
+        super(ConsistencyGroupsController, self).__init__()
+
+    @wsgi.serializers(xml=ConsistencyGroupTemplate)
+    def show(self, req, id):
+        """Return data about the given consistency group."""
+        LOG.debug('show called for member %s', id)
+        context = req.environ['cinder.context']
+
+        try:
+            consistencygroup = self.consistencygroup_api.get(
+                context,
+                group_id=id)
+        except exception.ConsistencyGroupNotFound as error:
+            raise exc.HTTPNotFound(explanation=error.msg)
+
+        return self._view_builder.detail(req, consistencygroup)
+
+    def delete(self, req, id, body):
+        """Delete a consistency group."""
+        LOG.debug('delete called for member %s', id)
+        context = req.environ['cinder.context']
+        force = False
+        if body:
+            cg_body = body['consistencygroup']
+            force = cg_body.get('force', False)
+
+        LOG.info(_('Delete consistency group with id: %s'), id,
+                 context=context)
+
+        try:
+            group = self.consistencygroup_api.get(context, id)
+            self.consistencygroup_api.delete(context, group, force)
+        except exception.ConsistencyGroupNotFound:
+            msg = _("Consistency group could not be found")
+            raise exc.HTTPNotFound(explanation=msg)
+        except exception.InvalidConsistencyGroup:
+            msg = _("Invalid consistency group")
+            raise exc.HTTPBadRequest(explanation=msg)
+
+        return webob.Response(status_int=202)
+
+    @wsgi.serializers(xml=ConsistencyGroupsTemplate)
+    def index(self, req):
+        """Returns a summary list of consistency groups."""
+        return self._get_consistencygroups(req, is_detail=False)
+
+    @wsgi.serializers(xml=ConsistencyGroupsTemplate)
+    def detail(self, req):
+        """Returns a detailed list of consistency groups."""
+        return self._get_consistencygroups(req, is_detail=True)
+
+    def _get_consistencygroups(self, req, is_detail):
+        """Returns a list of consistency groups through view builder."""
+        context = req.environ['cinder.context']
+        consistencygroups = self.consistencygroup_api.get_all(context)
+        limited_list = common.limited(consistencygroups, req)
+
+        if is_detail:
+            consistencygroups = self._view_builder.detail_list(req,
+                                                               limited_list)
+        else:
+            consistencygroups = self._view_builder.summary_list(req,
+                                                                limited_list)
+        return consistencygroups
+
+    @wsgi.response(202)
+    @wsgi.serializers(xml=ConsistencyGroupTemplate)
+    @wsgi.deserializers(xml=CreateDeserializer)
+    def create(self, req, body):
+        """Create a new consistency group."""
+        LOG.debug('Creating new consistency group %s', body)
+        if not self.is_valid_body(body, 'consistencygroup'):
+            raise exc.HTTPBadRequest()
+
+        context = req.environ['cinder.context']
+
+        try:
+            consistencygroup = body['consistencygroup']
+        except KeyError:
+            msg = _("Incorrect request body format")
+            raise exc.HTTPBadRequest(explanation=msg)
+        name = consistencygroup.get('name', None)
+        description = consistencygroup.get('description', None)
+        volume_types = consistencygroup.get('volume_types', None)
+        availability_zone = consistencygroup.get('availability_zone', None)
+
+        LOG.info(_("Creating consistency group %(name)s."),
+                 {'name': name},
+                 context=context)
+
+        try:
+            new_consistencygroup = self.consistencygroup_api.create(
+                context, name, description, cg_volume_types=volume_types,
+                availability_zone=availability_zone)
+        except exception.InvalidConsistencyGroup as error:
+            raise exc.HTTPBadRequest(explanation=error.msg)
+        except exception.InvalidVolumeType as error:
+            raise exc.HTTPBadRequest(explanation=error.msg)
+        except exception.ConsistencyGroupNotFound as error:
+            raise exc.HTTPNotFound(explanation=error.msg)
+
+        retval = self._view_builder.summary(
+            req,
+            dict(new_consistencygroup.iteritems()))
+        return retval
+
+
+class Consistencygroups(extensions.ExtensionDescriptor):
+    """consistency groups support."""
+
+    name = 'Consistencygroups'
+    alias = 'consistencygroups'
+    namespace = 'http://docs.openstack.org/volume/ext/consistencygroups/api/v1'
+    updated = '2014-08-18T00:00:00+00:00'
+
+    def get_resources(self):
+        resources = []
+        res = extensions.ResourceExtension(
+            Consistencygroups.alias, ConsistencyGroupsController(),
+            collection_actions={'detail': 'GET'},
+            member_actions={'delete': 'POST'})
+        resources.append(res)
+        return resources
index d19eb04dda8163037911da568ce6728cae320b4b..c67748daf184cb3825cd3f431c7fcf70ed484f59 100644 (file)
@@ -69,7 +69,8 @@ class ViewBuilder(common.ViewBuilder):
                 'user_id': volume.get('user_id'),
                 'bootable': str(volume.get('bootable')).lower(),
                 'encrypted': self._is_volume_encrypted(volume),
-                'replication_status': volume.get('replication_status')
+                'replication_status': volume.get('replication_status'),
+                'consistencygroup_id': volume.get('consistencygroup_id')
             }
         }
 
index 9cb980ba911e167d77509a6bfb3289c0aa8c5cdd..7e841806a59a4aac726a3e69b67477006ff1f82f 100644 (file)
@@ -25,6 +25,7 @@ from cinder.api import common
 from cinder.api.openstack import wsgi
 from cinder.api.v2.views import volumes as volume_views
 from cinder.api import xmlutil
+from cinder import consistencygroup as consistencygroupAPI
 from cinder import exception
 from cinder.i18n import _
 from cinder.openstack.common import log as logging
@@ -60,6 +61,7 @@ def make_volume(elem):
     elem.set('volume_type')
     elem.set('snapshot_id')
     elem.set('source_volid')
+    elem.set('consistencygroup_id')
 
     attachments = xmlutil.SubTemplateElement(elem, 'attachments')
     attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
@@ -120,7 +122,7 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
 
         attributes = ['name', 'description', 'size',
                       'volume_type', 'availability_zone', 'imageRef',
-                      'snapshot_id', 'source_volid']
+                      'snapshot_id', 'source_volid', 'consistencygroup_id']
         for attr in attributes:
             if volume_node.getAttribute(attr):
                 volume[attr] = volume_node.getAttribute(attr)
@@ -157,6 +159,7 @@ class VolumeController(wsgi.Controller):
 
     def __init__(self, ext_mgr):
         self.volume_api = cinder_volume.API()
+        self.consistencygroup_api = consistencygroupAPI.API()
         self.ext_mgr = ext_mgr
         super(VolumeController, self).__init__()
 
@@ -343,6 +346,19 @@ class VolumeController(wsgi.Controller):
         else:
             kwargs['source_replica'] = None
 
+        consistencygroup_id = volume.get('consistencygroup_id')
+        if consistencygroup_id is not None:
+            try:
+                kwargs['consistencygroup'] = \
+                    self.consistencygroup_api.get(context,
+                                                  consistencygroup_id)
+            except exception.NotFound:
+                explanation = _('Consistency group id:%s not found') % \
+                    consistencygroup_id
+                raise exc.HTTPNotFound(explanation=explanation)
+        else:
+            kwargs['consistencygroup'] = None
+
         size = volume.get('size', None)
         if size is None and kwargs['snapshot'] is not None:
             size = kwargs['snapshot']['volume_size']
diff --git a/cinder/api/views/cgsnapshots.py b/cinder/api/views/cgsnapshots.py
new file mode 100644 (file)
index 0000000..5d9186a
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from cinder.api import common
+from cinder.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ViewBuilder(common.ViewBuilder):
+    """Model cgsnapshot API responses as a python dictionary."""
+
+    _collection_name = "cgsnapshots"
+
+    def __init__(self):
+        """Initialize view builder."""
+        super(ViewBuilder, self).__init__()
+
+    def summary_list(self, request, cgsnapshots):
+        """Show a list of cgsnapshots without many details."""
+        return self._list_view(self.summary, request, cgsnapshots)
+
+    def detail_list(self, request, cgsnapshots):
+        """Detailed view of a list of cgsnapshots ."""
+        return self._list_view(self.detail, request, cgsnapshots)
+
+    def summary(self, request, cgsnapshot):
+        """Generic, non-detailed view of a cgsnapshot."""
+        return {
+            'cgsnapshot': {
+                'id': cgsnapshot['id'],
+                'name': cgsnapshot['name']
+            }
+        }
+
+    def detail(self, request, cgsnapshot):
+        """Detailed view of a single cgsnapshot."""
+        return {
+            'cgsnapshot': {
+                'id': cgsnapshot.get('id'),
+                'consistencygroup_id': cgsnapshot.get('consistencygroup_id'),
+                'status': cgsnapshot.get('status'),
+                'created_at': cgsnapshot.get('created_at'),
+                'name': cgsnapshot.get('name'),
+                'description': cgsnapshot.get('description')
+            }
+        }
+
+    def _list_view(self, func, request, cgsnapshots):
+        """Provide a view for a list of cgsnapshots."""
+        cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot']
+                            for cgsnapshot in cgsnapshots]
+        cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list)
+
+        return cgsnapshots_dict
diff --git a/cinder/api/views/consistencygroups.py b/cinder/api/views/consistencygroups.py
new file mode 100644 (file)
index 0000000..b2458a0
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from cinder.api import common
+from cinder.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ViewBuilder(common.ViewBuilder):
+    """Model consistencygroup API responses as a python dictionary."""
+
+    _collection_name = "consistencygroups"
+
+    def __init__(self):
+        """Initialize view builder."""
+        super(ViewBuilder, self).__init__()
+
+    def summary_list(self, request, consistencygroups):
+        """Show a list of consistency groups without many details."""
+        return self._list_view(self.summary, request, consistencygroups)
+
+    def detail_list(self, request, consistencygroups):
+        """Detailed view of a list of consistency groups ."""
+        return self._list_view(self.detail, request, consistencygroups)
+
+    def summary(self, request, consistencygroup):
+        """Generic, non-detailed view of a consistency group."""
+        return {
+            'consistencygroup': {
+                'id': consistencygroup['id'],
+                'name': consistencygroup['name']
+            }
+        }
+
+    def detail(self, request, consistencygroup):
+        """Detailed view of a single consistency group."""
+        return {
+            'consistencygroup': {
+                'id': consistencygroup.get('id'),
+                'status': consistencygroup.get('status'),
+                'availability_zone': consistencygroup.get('availability_zone'),
+                'created_at': consistencygroup.get('created_at'),
+                'name': consistencygroup.get('name'),
+                'description': consistencygroup.get('description')
+            }
+        }
+
+    def _list_view(self, func, request, consistencygroups):
+        """Provide a view for a list of consistency groups."""
+        consistencygroups_list = [
+            func(request, consistencygroup)['consistencygroup']
+            for consistencygroup in consistencygroups]
+        consistencygroups_dict = dict(consistencygroups=consistencygroups_list)
+
+        return consistencygroups_dict
index 2affb30eac63cbe2b30c2a5134ea4f84f3109b22..36d88a569ef54dc26a3b1f15b789140a2e781086 100644 (file)
@@ -197,6 +197,9 @@ global_opts = [
                help='The full class name of the volume transfer API class'),
     cfg.StrOpt('replication_api_class',
                default='cinder.replication.api.API',
-               help='The full class name of the volume replication API class'), ]
+               help='The full class name of the volume replication API class'),
+    cfg.StrOpt('consistencygroup_api_class',
+               default='cinder.consistencygroup.api.API',
+               help='The full class name of the consistencygroup API class'), ]
 
 CONF.register_opts(global_opts)
diff --git a/cinder/consistencygroup/__init__.py b/cinder/consistencygroup/__init__.py
new file mode 100644 (file)
index 0000000..f89791d
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Importing full names to not pollute the namespace and cause possible
+# collisions with use of 'from cinder.transfer import <foo>' elsewhere.
+
+
+from cinder.common import config
+import cinder.openstack.common.importutils
+
+
+CONF = config.CONF
+
+API = cinder.openstack.common.importutils.import_class(
+    CONF.consistencygroup_api_class)
diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py
new file mode 100644 (file)
index 0000000..b1ae8e4
--- /dev/null
@@ -0,0 +1,416 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Handles all requests relating to consistency groups.
+"""
+
+
+import functools
+
+from oslo.config import cfg
+
+from cinder.db import base
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import timeutils
+import cinder.policy
+from cinder import quota
+from cinder.scheduler import rpcapi as scheduler_rpcapi
+from cinder.volume import api as volume_api
+from cinder.volume import rpcapi as volume_rpcapi
+from cinder.volume import volume_types
+
+
+CONF = cfg.CONF
+CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
+
+LOG = logging.getLogger(__name__)
+CGQUOTAS = quota.CGQUOTAS
+
+
+def wrap_check_policy(func):
+    """Check policy corresponding to the wrapped methods prior to execution.
+
+    This decorator requires the first 3 args of the wrapped function
+    to be (self, context, consistencygroup)
+    """
+    @functools.wraps(func)
+    def wrapped(self, context, target_obj, *args, **kwargs):
+        check_policy(context, func.__name__, target_obj)
+        return func(self, context, target_obj, *args, **kwargs)
+
+    return wrapped
+
+
+def check_policy(context, action, target_obj=None):
+    target = {
+        'project_id': context.project_id,
+        'user_id': context.user_id,
+    }
+    target.update(target_obj or {})
+    _action = 'consistencygroup:%s' % action
+    cinder.policy.enforce(context, _action, target)
+
+
+class API(base.Base):
+    """API for interacting with the volume manager for consistency groups."""
+
+    def __init__(self, db_driver=None):
+        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+        self.volume_rpcapi = volume_rpcapi.VolumeAPI()
+        self.availability_zone_names = ()
+        self.volume_api = volume_api.API()
+
+        super(API, self).__init__(db_driver)
+
+    def _valid_availability_zone(self, availability_zone):
+        if availability_zone in self.availability_zone_names:
+            return True
+        if CONF.storage_availability_zone == availability_zone:
+            return True
+        azs = self.volume_api.list_availability_zones()
+        self.availability_zone_names = [az['name'] for az in azs]
+        return availability_zone in self.availability_zone_names
+
+    def _extract_availability_zone(self, availability_zone):
+        if availability_zone is None:
+            if CONF.default_availability_zone:
+                availability_zone = CONF.default_availability_zone
+            else:
+                # For backwards compatibility use the storage_availability_zone
+                availability_zone = CONF.storage_availability_zone
+
+        valid = self._valid_availability_zone(availability_zone)
+        if not valid:
+            msg = _("Availability zone '%s' is invalid") % (availability_zone)
+            LOG.warn(msg)
+            raise exception.InvalidInput(reason=msg)
+
+        return availability_zone
+
+    def create(self, context, name, description,
+               cg_volume_types=None, availability_zone=None):
+
+        check_policy(context, 'create')
+        volume_type_list = None
+        if cg_volume_types:
+            volume_type_list = cg_volume_types.split(',')
+
+        req_volume_types = []
+        if volume_type_list:
+            req_volume_types = (self.db.volume_types_get_by_name_or_id(
+                context, volume_type_list))
+
+        if not req_volume_types:
+            volume_type = volume_types.get_default_volume_type()
+            req_volume_types.append(volume_type)
+
+        req_volume_type_ids = ""
+        for voltype in req_volume_types:
+            if voltype:
+                req_volume_type_ids = (
+                    req_volume_type_ids + voltype.get('id') + ",")
+        if len(req_volume_type_ids) == 0:
+            req_volume_type_ids = None
+
+        availability_zone = self._extract_availability_zone(availability_zone)
+
+        options = {'user_id': context.user_id,
+                   'project_id': context.project_id,
+                   'availability_zone': availability_zone,
+                   'status': "creating",
+                   'name': name,
+                   'description': description,
+                   'volume_type_id': req_volume_type_ids}
+
+        group = None
+        try:
+            group = self.db.consistencygroup_create(context, options)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_("Error occurred when creating consistency group"
+                            " %s."), name)
+
+        request_spec_list = []
+        filter_properties_list = []
+        for req_volume_type in req_volume_types:
+            request_spec = {'volume_type': req_volume_type.copy(),
+                            'consistencygroup_id': group['id']}
+            filter_properties = {}
+            request_spec_list.append(request_spec)
+            filter_properties_list.append(filter_properties)
+
+        # Update quota for consistencygroups
+        self.update_quota(context, group['id'])
+
+        self._cast_create_consistencygroup(context, group['id'],
+                                           request_spec_list,
+                                           filter_properties_list)
+
+        return group
+
+    def _cast_create_consistencygroup(self, context, group_id,
+                                      request_spec_list,
+                                      filter_properties_list):
+
+        try:
+            for request_spec in request_spec_list:
+                volume_type = request_spec.get('volume_type', None)
+                volume_type_id = None
+                if volume_type:
+                    volume_type_id = volume_type.get('id', None)
+
+                specs = {}
+                if volume_type_id:
+                    qos_specs = volume_types.get_volume_type_qos_specs(
+                        volume_type_id)
+                    specs = qos_specs['qos_specs']
+                if not specs:
+                    # to make sure we don't pass empty dict
+                    specs = None
+
+                volume_properties = {
+                    'size': 0,  # Need to populate size for the scheduler
+                    'user_id': context.user_id,
+                    'project_id': context.project_id,
+                    'status': 'creating',
+                    'attach_status': 'detached',
+                    'encryption_key_id': request_spec.get('encryption_key_id',
+                                                          None),
+                    'display_description': request_spec.get('description',
+                                                            None),
+                    'display_name': request_spec.get('name', None),
+                    'volume_type_id': volume_type_id,
+                }
+
+                request_spec['volume_properties'] = volume_properties
+                request_spec['qos_specs'] = specs
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                try:
+                    self.db.consistencygroup_destroy(context, group_id)
+                finally:
+                    LOG.error(_("Error occurred when building "
+                                "request spec list for consistency group "
+                                "%s."), group_id)
+
+        # Cast to the scheduler and let it handle whatever is needed
+        # to select the target host for this group.
+        self.scheduler_rpcapi.create_consistencygroup(
+            context,
+            CONF.volume_topic,
+            group_id,
+            request_spec_list=request_spec_list,
+            filter_properties_list=filter_properties_list)
+
+    def update_quota(self, context, group_id):
+        reserve_opts = {'consistencygroups': 1}
+        try:
+            reservations = CGQUOTAS.reserve(context, **reserve_opts)
+            CGQUOTAS.commit(context, reservations)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                try:
+                    self.db.consistencygroup_destroy(context, group_id)
+                finally:
+                    LOG.error(_("Failed to update quota for creating"
+                                "consistency group %s."), group_id)
+
+    @wrap_check_policy
+    def delete(self, context, group, force=False):
+        if not force and group['status'] not in ["available", "error"]:
+            msg = _("Consistency group status must be available or error, "
+                    "but current status is: %s") % group['status']
+            raise exception.InvalidConsistencyGroup(reason=msg)
+
+        cgsnaps = self.db.cgsnapshot_get_all_by_group(
+            context.elevated(),
+            group['id'])
+        if cgsnaps:
+            msg = _("Consistency group %s still has dependent "
+                    "cgsnapshots.") % group['id']
+            LOG.error(msg)
+            raise exception.InvalidConsistencyGroup(reason=msg)
+
+        volumes = self.db.volume_get_all_by_group(context.elevated(),
+                                                  group['id'])
+
+        if volumes and not force:
+            msg = _("Consistency group %s still contains volumes. "
+                    "The force flag is required to delete it.") % group['id']
+            LOG.error(msg)
+            raise exception.InvalidConsistencyGroup(reason=msg)
+
+        for volume in volumes:
+            if volume['attach_status'] == "attached":
+                msg = _("Volume in consistency group %s is attached. "
+                        "Need to detach first.") % group['id']
+                LOG.error(msg)
+                raise exception.InvalidConsistencyGroup(reason=msg)
+
+            snapshots = self.db.snapshot_get_all_for_volume(context,
+                                                            volume['id'])
+            if snapshots:
+                msg = _("Volume in consistency group still has "
+                        "dependent snapshots.")
+                LOG.error(msg)
+                raise exception.InvalidConsistencyGroup(reason=msg)
+
+        now = timeutils.utcnow()
+        self.db.consistencygroup_update(context, group['id'],
+                                        {'status': 'deleting',
+                                         'terminated_at': now})
+
+        self.volume_rpcapi.delete_consistencygroup(context, group)
+
+    @wrap_check_policy
+    def update(self, context, group, fields):
+        self.db.consistencygroup_update(context, group['id'], fields)
+
+    def get(self, context, group_id):
+        rv = self.db.consistencygroup_get(context, group_id)
+        group = dict(rv.iteritems())
+        check_policy(context, 'get', group)
+        return group
+
+    def get_all(self, context, marker=None, limit=None, sort_key='created_at',
+                sort_dir='desc', filters=None):
+        check_policy(context, 'get_all')
+        if filters is None:
+            filters = {}
+
+        try:
+            if limit is not None:
+                limit = int(limit)
+                if limit < 0:
+                    msg = _('limit param must be positive')
+                    raise exception.InvalidInput(reason=msg)
+        except ValueError:
+            msg = _('limit param must be an integer')
+            raise exception.InvalidInput(reason=msg)
+
+        if filters:
+            LOG.debug("Searching by: %s" % str(filters))
+
+        if (context.is_admin and 'all_tenants' in filters):
+            # Need to remove all_tenants to pass the filtering below.
+            del filters['all_tenants']
+            groups = self.db.consistencygroup_get_all(context)
+        else:
+            groups = self.db.consistencygroup_get_all_by_project(
+                context,
+                context.project_id)
+
+        return groups
+
+    def get_group(self, context, group_id):
+        check_policy(context, 'get_group')
+        rv = self.db.consistencygroup_get(context, group_id)
+        return dict(rv.iteritems())
+
+    def create_cgsnapshot(self, context,
+                          group, name,
+                          description):
+        return self._create_cgsnapshot(context, group, name, description)
+
+    def _create_cgsnapshot(self, context,
+                           group, name, description):
+        options = {'consistencygroup_id': group['id'],
+                   'user_id': context.user_id,
+                   'project_id': context.project_id,
+                   'status': "creating",
+                   'name': name,
+                   'description': description}
+
+        try:
+            cgsnapshot = self.db.cgsnapshot_create(context, options)
+            cgsnapshot_id = cgsnapshot['id']
+
+            volumes = self.db.volume_get_all_by_group(
+                context.elevated(),
+                cgsnapshot['consistencygroup_id'])
+
+            if not volumes:
+                msg = _("Consistency group is empty. No cgsnapshot "
+                        "will be created.")
+                raise exception.InvalidConsistencyGroup(reason=msg)
+
+            snap_name = cgsnapshot['name']
+            snap_desc = cgsnapshot['description']
+            self.volume_api.create_snapshots_in_db(
+                context, volumes, snap_name, snap_desc, True, cgsnapshot_id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                try:
+                    self.db.cgsnapshot_destroy(context, cgsnapshot_id)
+                finally:
+                    LOG.error(_("Error occurred when creating cgsnapshot"
+                                " %s."), cgsnapshot_id)
+
+        self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)
+
+        return cgsnapshot
+
+    def delete_cgsnapshot(self, context, cgsnapshot, force=False):
+        if cgsnapshot['status'] not in ["available", "error"]:
+            msg = _("Cgsnapshot status must be available or error")
+            raise exception.InvalidCgSnapshot(reason=msg)
+        self.db.cgsnapshot_update(context, cgsnapshot['id'],
+                                  {'status': 'deleting'})
+        group = self.db.consistencygroup_get(
+            context,
+            cgsnapshot['consistencygroup_id'])
+        self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot,
+                                             group['host'])
+
+    def update_cgsnapshot(self, context, cgsnapshot, fields):
+        self.db.cgsnapshot_update(context, cgsnapshot['id'], fields)
+
+    def get_cgsnapshot(self, context, cgsnapshot_id):
+        check_policy(context, 'get_cgsnapshot')
+        rv = self.db.cgsnapshot_get(context, cgsnapshot_id)
+        return dict(rv.iteritems())
+
+    def get_all_cgsnapshots(self, context, search_opts=None):
+        check_policy(context, 'get_all_cgsnapshots')
+
+        search_opts = search_opts or {}
+
+        if (context.is_admin and 'all_tenants' in search_opts):
+            # Need to remove all_tenants to pass the filtering below.
+            del search_opts['all_tenants']
+            cgsnapshots = self.db.cgsnapshot_get_all(context)
+        else:
+            cgsnapshots = self.db.cgsnapshot_get_all_by_project(
+                context.elevated(), context.project_id)
+
+        if search_opts:
+            LOG.debug("Searching by: %s" % search_opts)
+
+            results = []
+            not_found = object()
+            for cgsnapshot in cgsnapshots:
+                for opt, value in search_opts.iteritems():
+                    if cgsnapshot.get(opt, not_found) != value:
+                        break
+                else:
+                    results.append(cgsnapshot)
+            cgsnapshots = results
+        return cgsnapshots
index 2e5b82f258feb9236dcb456feefc36a0f15c3881..034504717656613a66c6a9f4afedd32d1860535c 100644 (file)
@@ -222,6 +222,11 @@ def volume_get_all_by_host(context, host):
     return IMPL.volume_get_all_by_host(context, host)
 
 
+def volume_get_all_by_group(context, group_id):
+    """Get all volumes belonging to a consistency group."""
+    return IMPL.volume_get_all_by_group(context, group_id)
+
+
 def volume_get_all_by_project(context, project_id, marker, limit, sort_key,
                               sort_dir, filters=None):
     """Get all volumes belonging to a project."""
@@ -271,6 +276,11 @@ def snapshot_get_all_by_project(context, project_id):
     return IMPL.snapshot_get_all_by_project(context, project_id)
 
 
+def snapshot_get_all_for_cgsnapshot(context, project_id):
+    """Get all snapshots belonging to a cgsnapshot."""
+    return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
+
+
 def snapshot_get_all_for_volume(context, volume_id):
     """Get all snapshots for a volume."""
     return IMPL.snapshot_get_all_for_volume(context, volume_id)
@@ -379,6 +389,11 @@ def volume_type_get_by_name(context, name):
     return IMPL.volume_type_get_by_name(context, name)
 
 
+def volume_types_get_by_name_or_id(context, volume_type_list):
+    """Get volume types by name or id."""
+    return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
+
+
 def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
     """Get volume types that are associated with specific qos specs."""
     return IMPL.volume_type_qos_associations_get(context,
@@ -792,3 +807,90 @@ def transfer_destroy(context, transfer_id):
 def transfer_accept(context, transfer_id, user_id, project_id):
     """Accept a volume transfer."""
     return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
+
+
+###################
+
+
+def consistencygroup_get(context, consistencygroup_id):
+    """Get a consistencygroup or raise if it does not exist."""
+    return IMPL.consistencygroup_get(context, consistencygroup_id)
+
+
+def consistencygroup_get_all(context):
+    """Get all consistencygroups."""
+    return IMPL.consistencygroup_get_all(context)
+
+
+def consistencygroup_get_all_by_host(context, host):
+    """Get all consistencygroups belonging to a host."""
+    return IMPL.consistencygroup_get_all_by_host(context, host)
+
+
+def consistencygroup_create(context, values):
+    """Create a consistencygroup from the values dictionary."""
+    return IMPL.consistencygroup_create(context, values)
+
+
+def consistencygroup_get_all_by_project(context, project_id):
+    """Get all consistencygroups belonging to a project."""
+    return IMPL.consistencygroup_get_all_by_project(context, project_id)
+
+
+def consistencygroup_update(context, consistencygroup_id, values):
+    """Set the given properties on a consistencygroup and update it.
+
+    Raises NotFound if consistencygroup does not exist.
+    """
+    return IMPL.consistencygroup_update(context, consistencygroup_id, values)
+
+
+def consistencygroup_destroy(context, consistencygroup_id):
+    """Destroy the consistencygroup or raise if it does not exist."""
+    return IMPL.consistencygroup_destroy(context, consistencygroup_id)
+
+
+###################
+
+
+def cgsnapshot_get(context, cgsnapshot_id):
+    """Get a cgsnapshot or raise if it does not exist."""
+    return IMPL.cgsnapshot_get(context, cgsnapshot_id)
+
+
+def cgsnapshot_get_all(context):
+    """Get all cgsnapshots."""
+    return IMPL.cgsnapshot_get_all(context)
+
+
+def cgsnapshot_get_all_by_host(context, host):
+    """Get all cgsnapshots belonging to a host."""
+    return IMPL.cgsnapshot_get_all_by_host(context, host)
+
+
+def cgsnapshot_create(context, values):
+    """Create a cgsnapshot from the values dictionary."""
+    return IMPL.cgsnapshot_create(context, values)
+
+
+def cgsnapshot_get_all_by_group(context, group_id):
+    """Get all cgsnapshots belonging to a consistency group."""
+    return IMPL.cgsnapshot_get_all_by_group(context, group_id)
+
+
+def cgsnapshot_get_all_by_project(context, project_id):
+    """Get all cgsnapshots belonging to a project."""
+    return IMPL.cgsnapshot_get_all_by_project(context, project_id)
+
+
+def cgsnapshot_update(context, cgsnapshot_id, values):
+    """Set the given properties on a cgsnapshot and update it.
+
+    Raises NotFound if cgsnapshot does not exist.
+    """
+    return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
+
+
+def cgsnapshot_destroy(context, cgsnapshot_id):
+    """Destroy the cgsnapshot or raise if it does not exist."""
+    return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
index 1db228df21a5641e8a4e64b42c3766479b85922b..252a43f12102d13938f147844e241b6046a10071 100644 (file)
@@ -284,10 +284,19 @@ def _sync_gigabytes(context, project_id, session, volume_type_id=None,
     return {key: vol_gigs + snap_gigs}
 
 
+def _sync_consistencygroups(context, project_id, session,
+                            volume_type_id=None,
+                            volume_type_name=None):
+    (_junk, groups) = _consistencygroup_data_get_for_project(
+        context, project_id, session=session)
+    key = 'consistencygroups'
+    return {key: groups}
+
 QUOTA_SYNC_FUNCTIONS = {
     '_sync_volumes': _sync_volumes,
     '_sync_snapshots': _sync_snapshots,
     '_sync_gigabytes': _sync_gigabytes,
+    '_sync_consistencygroups': _sync_consistencygroups,
 }
 
 
@@ -1153,12 +1162,14 @@ def _volume_get_query(context, session=None, project_only=False):
                            project_only=project_only).\
             options(joinedload('volume_metadata')).\
             options(joinedload('volume_admin_metadata')).\
-            options(joinedload('volume_type'))
+            options(joinedload('volume_type')).\
+            options(joinedload('consistencygroup'))
     else:
         return model_query(context, models.Volume, session=session,
                            project_only=project_only).\
             options(joinedload('volume_metadata')).\
-            options(joinedload('volume_type'))
+            options(joinedload('volume_type')).\
+            options(joinedload('consistencygroup'))
 
 
 @require_context
@@ -1211,6 +1222,12 @@ def volume_get_all_by_host(context, host):
     return _volume_get_query(context).filter_by(host=host).all()
 
 
+@require_admin_context
+def volume_get_all_by_group(context, group_id):
+    return _volume_get_query(context).filter_by(consistencygroup_id=group_id).\
+        all()
+
+
 @require_context
 def volume_get_all_by_project(context, project_id, marker, limit, sort_key,
                               sort_dir, filters=None):
@@ -1638,6 +1655,16 @@ def snapshot_get_all_for_volume(context, volume_id):
         all()
 
 
+@require_context
+def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
+    return model_query(context, models.Snapshot, read_deleted='no',
+                       project_only=True).\
+        filter_by(cgsnapshot_id=cgsnapshot_id).\
+        options(joinedload('volume')).\
+        options(joinedload('snapshot_metadata')).\
+        all()
+
+
 @require_context
 def snapshot_get_all_by_project(context, project_id):
     authorize_project_context(context, project_id)
@@ -1886,6 +1913,19 @@ def volume_type_get_by_name(context, name):
     return _volume_type_get_by_name(context, name)
 
 
+@require_context
+def volume_types_get_by_name_or_id(context, volume_type_list):
+    """Return a dict describing specific volume_type."""
+    req_volume_types = []
+    for vol_t in volume_type_list:
+        if not uuidutils.is_uuid_like(vol_t):
+            vol_type = _volume_type_get_by_name(context, vol_t)
+        else:
+            vol_type = _volume_type_get(context, vol_t)
+        req_volume_types.append(vol_type)
+    return req_volume_types
+
+
 @require_admin_context
 def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
     read_deleted = "yes" if inactive else "no"
@@ -2852,3 +2892,194 @@ def transfer_accept(context, transfer_id, user_id, project_id):
             update({'deleted': True,
                     'deleted_at': timeutils.utcnow(),
                     'updated_at': literal_column('updated_at')})
+
+
+###############################
+
+
+@require_admin_context
+def _consistencygroup_data_get_for_project(context, project_id,
+                                           session=None):
+    query = model_query(context,
+                        func.count(models.ConsistencyGroup.id),
+                        read_deleted="no",
+                        session=session).\
+        filter_by(project_id=project_id)
+
+    result = query.first()
+
+    return (0, result[0] or 0)
+
+
+@require_admin_context
+def consistencygroup_data_get_for_project(context, project_id):
+    return _consistencygroup_data_get_for_project(context, project_id)
+
+
+@require_context
+def _consistencygroup_get(context, consistencygroup_id, session=None):
+    result = model_query(context, models.ConsistencyGroup, session=session,
+                         project_only=True).\
+        filter_by(id=consistencygroup_id).\
+        first()
+
+    if not result:
+        raise exception.ConsistencyGroupNotFound(
+            consistencygroup_id=consistencygroup_id)
+
+    return result
+
+
+@require_context
+def consistencygroup_get(context, consistencygroup_id):
+    return _consistencygroup_get(context, consistencygroup_id)
+
+
+@require_admin_context
+def consistencygroup_get_all(context):
+    return model_query(context, models.ConsistencyGroup).all()
+
+
+@require_admin_context
+def consistencygroup_get_all_by_host(context, host):
+    return model_query(context, models.ConsistencyGroup).\
+        filter_by(host=host).all()
+
+
+@require_context
+def consistencygroup_get_all_by_project(context, project_id):
+    authorize_project_context(context, project_id)
+
+    return model_query(context, models.ConsistencyGroup).\
+        filter_by(project_id=project_id).all()
+
+
+@require_context
+def consistencygroup_create(context, values):
+    consistencygroup = models.ConsistencyGroup()
+    if not values.get('id'):
+        values['id'] = str(uuid.uuid4())
+
+    session = get_session()
+    with session.begin():
+        consistencygroup.update(values)
+        session.add(consistencygroup)
+
+        return _consistencygroup_get(context, values['id'], session=session)
+
+
+@require_context
+def consistencygroup_update(context, consistencygroup_id, values):
+    session = get_session()
+    with session.begin():
+        result = model_query(context, models.ConsistencyGroup, project_only=True).\
+            filter_by(id=consistencygroup_id).\
+            first()
+
+        if not result:
+            raise exception.ConsistencyGroupNotFound(
+                _("No consistency group with id %s") % consistencygroup_id)
+
+        result.update(values)
+        result.save(session=session)
+    return result
+
+
+@require_admin_context
+def consistencygroup_destroy(context, consistencygroup_id):
+    session = get_session()
+    with session.begin():
+        model_query(context, models.ConsistencyGroup, session=session).\
+            filter_by(id=consistencygroup_id).\
+            update({'status': 'deleted',
+                    'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
+
+
+###############################
+
+
+@require_context
+def _cgsnapshot_get(context, cgsnapshot_id, session=None):
+    result = model_query(context, models.Cgsnapshot, session=session,
+                         project_only=True).\
+        filter_by(id=cgsnapshot_id).\
+        first()
+
+    if not result:
+        raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
+
+    return result
+
+
+@require_context
+def cgsnapshot_get(context, cgsnapshot_id):
+    return _cgsnapshot_get(context, cgsnapshot_id)
+
+
+@require_admin_context
+def cgsnapshot_get_all(context):
+    return model_query(context, models.Cgsnapshot).all()
+
+
+@require_admin_context
+def cgsnapshot_get_all_by_host(context, host):
+    return model_query(context, models.Cgsnapshot).filter_by(host=host).all()
+
+
+@require_admin_context
+def cgsnapshot_get_all_by_group(context, group_id):
+    return model_query(context, models.Cgsnapshot).\
+        filter_by(consistencygroup_id=group_id).all()
+
+
+@require_context
+def cgsnapshot_get_all_by_project(context, project_id):
+    authorize_project_context(context, project_id)
+
+    return model_query(context, models.Cgsnapshot).\
+        filter_by(project_id=project_id).all()
+
+
+@require_context
+def cgsnapshot_create(context, values):
+    cgsnapshot = models.Cgsnapshot()
+    if not values.get('id'):
+        values['id'] = str(uuid.uuid4())
+
+    session = get_session()
+    with session.begin():
+        cgsnapshot.update(values)
+        session.add(cgsnapshot)
+
+        return _cgsnapshot_get(context, values['id'], session=session)
+
+
+@require_context
+def cgsnapshot_update(context, cgsnapshot_id, values):
+    session = get_session()
+    with session.begin():
+        result = model_query(context, models.Cgsnapshot, project_only=True).\
+            filter_by(id=cgsnapshot_id).\
+            first()
+
+        if not result:
+            raise exception.CgSnapshotNotFound(
+                _("No cgsnapshot with id %s") % cgsnapshot_id)
+
+        result.update(values)
+        result.save(session=session)
+    return result
+
+
+@require_admin_context
+def cgsnapshot_destroy(context, cgsnapshot_id):
+    session = get_session()
+    with session.begin():
+        model_query(context, models.Cgsnapshot, session=session).\
+            filter_by(id=cgsnapshot_id).\
+            update({'status': 'deleted',
+                    'deleted': True,
+                    'deleted_at': timeutils.utcnow(),
+                    'updated_at': literal_column('updated_at')})
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py b/cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py
new file mode 100644 (file)
index 0000000..2be6c51
--- /dev/null
@@ -0,0 +1,135 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Boolean, Column, DateTime
+from sqlalchemy import ForeignKey, MetaData, String, Table
+
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    # New table
+    consistencygroups = Table(
+        'consistencygroups', meta,
+        Column('created_at', DateTime(timezone=False)),
+        Column('updated_at', DateTime(timezone=False)),
+        Column('deleted_at', DateTime(timezone=False)),
+        Column('deleted', Boolean(create_constraint=True, name=None)),
+        Column('id', String(36), primary_key=True, nullable=False),
+        Column('user_id', String(length=255)),
+        Column('project_id', String(length=255)),
+        Column('host', String(length=255)),
+        Column('availability_zone', String(length=255)),
+        Column('name', String(length=255)),
+        Column('description', String(length=255)),
+        Column('volume_type_id', String(length=255)),
+        Column('status', String(length=255)),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8',
+    )
+
+    try:
+        consistencygroups.create()
+    except Exception:
+        LOG.error(_("Table |%s| not created!"), repr(consistencygroups))
+        raise
+
+    # New table
+    cgsnapshots = Table(
+        'cgsnapshots', meta,
+        Column('created_at', DateTime(timezone=False)),
+        Column('updated_at', DateTime(timezone=False)),
+        Column('deleted_at', DateTime(timezone=False)),
+        Column('deleted', Boolean(create_constraint=True, name=None)),
+        Column('id', String(36), primary_key=True, nullable=False),
+        Column('consistencygroup_id', String(36),
+               ForeignKey('consistencygroups.id'),
+               nullable=False),
+        Column('user_id', String(length=255)),
+        Column('project_id', String(length=255)),
+        Column('name', String(length=255)),
+        Column('description', String(length=255)),
+        Column('status', String(length=255)),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8',
+    )
+
+    try:
+        cgsnapshots.create()
+    except Exception:
+        LOG.error(_("Table |%s| not created!"), repr(cgsnapshots))
+        raise
+
+    # Add column to volumes table
+    volumes = Table('volumes', meta, autoload=True)
+    consistencygroup_id = Column('consistencygroup_id', String(36),
+                                 ForeignKey('consistencygroups.id'))
+    try:
+        volumes.create_column(consistencygroup_id)
+        volumes.update().values(consistencygroup_id=None).execute()
+    except Exception:
+        LOG.error(_("Adding consistencygroup_id column to volumes table"
+                  " failed."))
+        raise
+
+    # Add column to snapshots table
+    snapshots = Table('snapshots', meta, autoload=True)
+    cgsnapshot_id = Column('cgsnapshot_id', String(36),
+                           ForeignKey('cgsnapshots.id'))
+
+    try:
+        snapshots.create_column(cgsnapshot_id)
+        snapshots.update().values(cgsnapshot_id=None).execute()
+    except Exception:
+        LOG.error(_("Adding cgsnapshot_id column to snapshots table"
+                  " failed."))
+        raise
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    # Drop column from snapshots table
+    snapshots = Table('snapshots', meta, autoload=True)
+    cgsnapshot_id = snapshots.columns.cgsnapshot_id
+    snapshots.drop_column(cgsnapshot_id)
+
+    # Drop column from volumes table
+    volumes = Table('volumes', meta, autoload=True)
+    consistencygroup_id = volumes.columns.consistencygroup_id
+    volumes.drop_column(consistencygroup_id)
+
+    # Drop table
+    cgsnapshots = Table('cgsnapshots', meta, autoload=True)
+    try:
+        cgsnapshots.drop()
+    except Exception:
+        LOG.error(_("cgsnapshots table not dropped"))
+        raise
+
+    # Drop table
+    consistencygroups = Table('consistencygroups', meta, autoload=True)
+    try:
+        consistencygroups.drop()
+    except Exception:
+        LOG.error(_("consistencygroups table not dropped"))
+        raise
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py
new file mode 100644 (file)
index 0000000..a682f00
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import datetime
+
+from oslo.config import cfg
+from sqlalchemy import MetaData, Table
+
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+
+# Get default values via config.  The defaults will either
+# come from the default values set in the quota option
+# configuration or via cinder.conf if the user has configured
+# default values for quotas there.
+CONF = cfg.CONF
+CONF.import_opt('quota_consistencygroups', 'cinder.quota')
+LOG = logging.getLogger(__name__)
+
+CLASS_NAME = 'default'
+CREATED_AT = datetime.datetime.now()
+
+
+def upgrade(migrate_engine):
+    """Add default quota class data into DB."""
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    quota_classes = Table('quota_classes', meta, autoload=True)
+
+    rows = quota_classes.count().\
+        where(quota_classes.c.resource == 'consistencygroups').\
+        execute().scalar()
+
+    # Do not add entries if there are already 'consistencygroups' entries.
+    if rows:
+        LOG.info(_("Found existing 'consistencygroups' entries in the"
+                 "quota_classes table.  Skipping insertion."))
+        return
+
+    try:
+        # Set consistencygroups
+        qci = quota_classes.insert()
+        qci.execute({'created_at': CREATED_AT,
+                     'class_name': CLASS_NAME,
+                     'resource': 'consistencygroups',
+                     'hard_limit': CONF.quota_consistencygroups,
+                     'deleted': False, })
+        LOG.info(_("Added default consistencygroups quota class data into "
+                 "the DB."))
+    except Exception:
+        LOG.error(_("Default consistencygroups quota class data not inserted "
+                  "into the DB."))
+        raise
+
+
+def downgrade(migrate_engine):
+    """Don't delete the 'default' entries at downgrade time.
+
+    We don't know if the user had default entries when we started.
+    If they did, we wouldn't want to remove them.  So, the safest
+    thing to do is just leave the 'default' entries at downgrade time.
+    """
+    pass
index cc16784d0324c6adfd08e63101c01cc2468b4dca..e5b26c8ca87d8685d15411df38720de60e501228 100644 (file)
@@ -66,6 +66,42 @@ class Service(BASE, CinderBase):
     disabled_reason = Column(String(255))
 
 
+class ConsistencyGroup(BASE, CinderBase):
+    """Represents a consistencygroup."""
+    __tablename__ = 'consistencygroups'
+    id = Column(String(36), primary_key=True)
+
+    user_id = Column(String(255), nullable=False)
+    project_id = Column(String(255), nullable=False)
+
+    host = Column(String(255))
+    availability_zone = Column(String(255))
+    name = Column(String(255))
+    description = Column(String(255))
+    volume_type_id = Column(String(255))
+    status = Column(String(255))
+
+
+class Cgsnapshot(BASE, CinderBase):
+    """Represents a cgsnapshot."""
+    __tablename__ = 'cgsnapshots'
+    id = Column(String(36), primary_key=True)
+
+    consistencygroup_id = Column(String(36))
+    user_id = Column(String(255), nullable=False)
+    project_id = Column(String(255), nullable=False)
+
+    name = Column(String(255))
+    description = Column(String(255))
+    status = Column(String(255))
+
+    consistencygroup = relationship(
+        ConsistencyGroup,
+        backref="cgsnapshots",
+        foreign_keys=consistencygroup_id,
+        primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id')
+
+
 class Volume(BASE, CinderBase):
     """Represents a block storage device that can be attached to a vm."""
     __tablename__ = 'volumes'
@@ -116,6 +152,8 @@ class Volume(BASE, CinderBase):
     source_volid = Column(String(36))
     encryption_key_id = Column(String(36))
 
+    consistencygroup_id = Column(String(36))
+
     deleted = Column(Boolean, default=False)
     bootable = Column(Boolean, default=False)
 
@@ -123,6 +161,12 @@ class Volume(BASE, CinderBase):
     replication_extended_status = Column(String(255))
     replication_driver_data = Column(String(255))
 
+    consistencygroup = relationship(
+        ConsistencyGroup,
+        backref="volumes",
+        foreign_keys=consistencygroup_id,
+        primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id')
+
 
 class VolumeMetadata(BASE, CinderBase):
     """Represents a metadata key/value pair for a volume."""
@@ -353,6 +397,7 @@ class Snapshot(BASE, CinderBase):
     project_id = Column(String(255))
 
     volume_id = Column(String(36))
+    cgsnapshot_id = Column(String(36))
     status = Column(String(255))
     progress = Column(String(255))
     volume_size = Column(Integer)
@@ -369,6 +414,12 @@ class Snapshot(BASE, CinderBase):
                           foreign_keys=volume_id,
                           primaryjoin='Snapshot.volume_id == Volume.id')
 
+    cgsnapshot = relationship(
+        Cgsnapshot,
+        backref="snapshots",
+        foreign_keys=cgsnapshot_id,
+        primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id')
+
 
 class SnapshotMetadata(BASE, CinderBase):
     """Represents a metadata key/value pair for a snapshot."""
@@ -487,6 +538,8 @@ def register_models():
               VolumeTypeExtraSpecs,
               VolumeTypes,
               VolumeGlanceMetadata,
+              ConsistencyGroup,
+              Cgsnapshot
               )
     engine = create_engine(CONF.database.connection, echo=False)
     for model in models:
index 4721a4df286186a6164b0147cd27f4bd7bfedd5c..65e6922ffa591326db9d80c54a63045ada51c56b 100644 (file)
@@ -782,3 +782,21 @@ class EMCVnxCLICmdError(VolumeBackendAPIException):
             LOG.error(msg)
         else:
             LOG.warn(msg)
+
+
+# ConsistencyGroup
+class ConsistencyGroupNotFound(NotFound):
+    message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.")
+
+
+class InvalidConsistencyGroup(Invalid):
+    message = _("Invalid ConsistencyGroup: %(reason)s")
+
+
+# CgSnapshot
+class CgSnapshotNotFound(NotFound):
+    message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")
+
+
+class InvalidCgSnapshot(Invalid):
+    message = _("Invalid CgSnapshot: %(reason)s")
index 8a234d7983df33accbc3eba5c6b6bed2e46617d4..1c0573ff00c8eaae69d7085a5fa4f0d03aaba2c6 100644 (file)
@@ -39,6 +39,9 @@ quota_opts = [
     cfg.IntOpt('quota_snapshots',
                default=10,
                help='Number of volume snapshots allowed per project'),
+    cfg.IntOpt('quota_consistencygroups',
+               default=10,
+               help='Number of consistencygroups allowed per project'),
     cfg.IntOpt('quota_gigabytes',
                default=1000,
                help='Total amount of storage, in gigabytes, allowed '
@@ -878,4 +881,29 @@ class VolumeTypeQuotaEngine(QuotaEngine):
     def register_resources(self, resources):
         raise NotImplementedError(_("Cannot register resources"))
 
+
+class CGQuotaEngine(QuotaEngine):
+    """Represent the consistencygroup quotas."""
+
+    @property
+    def resources(self):
+        """Fetches all possible quota resources."""
+
+        result = {}
+        # Global quotas.
+        argses = [('consistencygroups', '_sync_consistencygroups',
+                   'quota_consistencygroups'), ]
+        for args in argses:
+            resource = ReservableResource(*args)
+            result[resource.name] = resource
+
+        return result
+
+    def register_resource(self, resource):
+        raise NotImplementedError(_("Cannot register resource"))
+
+    def register_resources(self, resources):
+        raise NotImplementedError(_("Cannot register resources"))
+
 QUOTAS = VolumeTypeQuotaEngine()
+CGQUOTAS = CGQuotaEngine()
index 25df391de0efb458c43baf9e2a7f09d175017eb2..d3b2a8dd2d5b7be8b8a11c6d585ff1ae954f721f 100644 (file)
@@ -51,6 +51,16 @@ def volume_update_db(context, volume_id, host):
     return db.volume_update(context, volume_id, values)
 
 
+def group_update_db(context, group_id, host):
+    """Set the host and the scheduled_at field of a consistencygroup.
+
+    :returns: A Consistencygroup with the updated fields set properly.
+    """
+    now = timeutils.utcnow()
+    values = {'host': host, 'updated_at': now}
+    return db.consistencygroup_update(context, group_id, values)
+
+
 class Scheduler(object):
     """The base class that all Scheduler classes should inherit from."""
 
@@ -81,3 +91,10 @@ class Scheduler(object):
     def schedule_create_volume(self, context, request_spec, filter_properties):
         """Must override schedule method for scheduler to work."""
         raise NotImplementedError(_("Must implement schedule_create_volume"))
+
+    def schedule_create_consistencygroup(self, context, group_id,
+                                         request_spec_list,
+                                         filter_properties_list):
+        """Must override schedule method for scheduler to work."""
+        raise NotImplementedError(_(
+            "Must implement schedule_create_consistencygroup"))
index b32c4d39ff858f81e64b2375659ebe3e05b93ab9..74fca224b0a4e53215824f8844b0d845cc49d2d0 100644 (file)
@@ -61,6 +61,25 @@ class FilterScheduler(driver.Scheduler):
         filter_properties['metadata'] = vol.get('metadata')
         filter_properties['qos_specs'] = vol.get('qos_specs')
 
+    def schedule_create_consistencygroup(self, context, group_id,
+                                         request_spec_list,
+                                         filter_properties_list):
+
+        weighed_host = self._schedule_group(
+            context,
+            request_spec_list,
+            filter_properties_list)
+
+        if not weighed_host:
+            raise exception.NoValidHost(reason="No weighed hosts available")
+
+        host = weighed_host.obj.host
+
+        updated_group = driver.group_update_db(context, group_id, host)
+
+        self.volume_rpcapi.create_consistencygroup(context,
+                                                   updated_group, host)
+
     def schedule_create_volume(self, context, request_spec, filter_properties):
         weighed_host = self._schedule(context, request_spec,
                                       filter_properties)
@@ -265,6 +284,95 @@ class FilterScheduler(driver.Scheduler):
                                                             filter_properties)
         return weighed_hosts
 
+    def _get_weighted_candidates_group(self, context, request_spec_list,
+                                       filter_properties_list=None):
+        """Finds hosts that supports the consistencygroup.
+
+        Returns a list of hosts that meet the required specs,
+        ordered by their fitness.
+        """
+        elevated = context.elevated()
+
+        weighed_hosts = []
+        index = 0
+        for request_spec in request_spec_list:
+            volume_properties = request_spec['volume_properties']
+            # Since Cinder is using mixed filters from Oslo and it's own, which
+            # takes 'resource_XX' and 'volume_XX' as input respectively,
+            # copying 'volume_XX' to 'resource_XX' will make both filters
+            # happy.
+            resource_properties = volume_properties.copy()
+            volume_type = request_spec.get("volume_type", None)
+            resource_type = request_spec.get("volume_type", None)
+            request_spec.update({'resource_properties': resource_properties})
+
+            config_options = self._get_configuration_options()
+
+            filter_properties = {}
+            if filter_properties_list:
+                filter_properties = filter_properties_list[index]
+                if filter_properties is None:
+                    filter_properties = {}
+            self._populate_retry(filter_properties, resource_properties)
+
+            # Add consistencygroup_support in extra_specs if it is not there.
+            # Make sure it is populated in filter_properties
+            if 'consistencygroup_support' not in resource_type.get(
+                    'extra_specs', {}):
+                resource_type['extra_specs'].update(
+                    consistencygroup_support='<is> True')
+
+            filter_properties.update({'context': context,
+                                      'request_spec': request_spec,
+                                      'config_options': config_options,
+                                      'volume_type': volume_type,
+                                      'resource_type': resource_type})
+
+            self.populate_filter_properties(request_spec,
+                                            filter_properties)
+
+            # Find our local list of acceptable hosts by filtering and
+            # weighing our options. we virtually consume resources on
+            # it so subsequent selections can adjust accordingly.
+
+            # Note: remember, we are using an iterator here. So only
+            # traverse this list once.
+            all_hosts = self.host_manager.get_all_host_states(elevated)
+            if not all_hosts:
+                return []
+
+            # Filter local hosts based on requirements ...
+            hosts = self.host_manager.get_filtered_hosts(all_hosts,
+                                                         filter_properties)
+
+            if not hosts:
+                return []
+
+            LOG.debug("Filtered %s" % hosts)
+
+            # weighted_host = WeightedHost() ... the best
+            # host for the job.
+            temp_weighed_hosts = self.host_manager.get_weighed_hosts(
+                hosts,
+                filter_properties)
+            if not temp_weighed_hosts:
+                return []
+            if index == 0:
+                weighed_hosts = temp_weighed_hosts
+            else:
+                new_weighed_hosts = []
+                for host1 in weighed_hosts:
+                    for host2 in temp_weighed_hosts:
+                        if host1.obj.host == host2.obj.host:
+                            new_weighed_hosts.append(host1)
+                weighed_hosts = new_weighed_hosts
+                if not weighed_hosts:
+                    return []
+
+            index += 1
+
+        return weighed_hosts
+
     def _schedule(self, context, request_spec, filter_properties=None):
         weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                       filter_properties)
@@ -275,6 +383,16 @@ class FilterScheduler(driver.Scheduler):
             return None
         return self._choose_top_host(weighed_hosts, request_spec)
 
+    def _schedule_group(self, context, request_spec_list,
+                        filter_properties_list=None):
+        weighed_hosts = self._get_weighted_candidates_group(
+            context,
+            request_spec_list,
+            filter_properties_list)
+        if not weighed_hosts:
+            return None
+        return self._choose_top_host_group(weighed_hosts, request_spec_list)
+
     def _choose_top_host(self, weighed_hosts, request_spec):
         top_host = weighed_hosts[0]
         host_state = top_host.obj
@@ -282,3 +400,9 @@ class FilterScheduler(driver.Scheduler):
         volume_properties = request_spec['volume_properties']
         host_state.consume_from_volume(volume_properties)
         return top_host
+
+    def _choose_top_host_group(self, weighed_hosts, request_spec_list):
+        top_host = weighed_hosts[0]
+        host_state = top_host.obj
+        LOG.debug("Choosing %s" % host_state.host)
+        return top_host
index a6993c1c5c1bb604609785cf57540d5aded7a835..68a022e2475f844df4aa0f31dd3e6eff3dd961b1 100644 (file)
@@ -53,7 +53,7 @@ LOG = logging.getLogger(__name__)
 class SchedulerManager(manager.Manager):
     """Chooses a host to create volumes."""
 
-    RPC_API_VERSION = '1.5'
+    RPC_API_VERSION = '1.6'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -87,6 +87,30 @@ class SchedulerManager(manager.Manager):
                                                 host,
                                                 capabilities)
 
+    def create_consistencygroup(self, context, topic,
+                                group_id,
+                                request_spec_list=None,
+                                filter_properties_list=None):
+        try:
+            self.driver.schedule_create_consistencygroup(
+                context, group_id,
+                request_spec_list,
+                filter_properties_list)
+        except exception.NoValidHost as ex:
+            msg = (_("Could not find a host for consistency group "
+                     "%(group_id)s.") %
+                   {'group_id': group_id})
+            LOG.error(msg)
+            db.consistencygroup_update(context, group_id,
+                                       {'status': 'error'})
+        except Exception as ex:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_("Failed to create consistency group "
+                            "%(group_id)s."))
+                LOG.exception(ex)
+                db.consistencygroup_update(context, group_id,
+                                           {'status': 'error'})
+
     def create_volume(self, context, topic, volume_id, snapshot_id=None,
                       image_id=None, request_spec=None,
                       filter_properties=None):
index fb5ab1e13b9ce21620acbd620e432df958ce2e07..b6e7b2b2ac96e44f6562cc0b82a5241b547f5d17 100644 (file)
@@ -38,6 +38,7 @@ class SchedulerAPI(object):
         1.3 - Add migrate_volume_to_host() method
         1.4 - Add retype method
         1.5 - Add manage_existing method
+        1.6 - Add create_consistencygroup method
     '''
 
     RPC_API_VERSION = '1.0'
@@ -46,7 +47,23 @@ class SchedulerAPI(object):
         super(SchedulerAPI, self).__init__()
         target = messaging.Target(topic=CONF.scheduler_topic,
                                   version=self.RPC_API_VERSION)
-        self.client = rpc.get_client(target, version_cap='1.5')
+        self.client = rpc.get_client(target, version_cap='1.6')
+
+    def create_consistencygroup(self, ctxt, topic, group_id,
+                                request_spec_list=None,
+                                filter_properties_list=None):
+
+        cctxt = self.client.prepare(version='1.6')
+        request_spec_p_list = []
+        for request_spec in request_spec_list:
+            request_spec_p = jsonutils.to_primitive(request_spec)
+            request_spec_p_list.append(request_spec_p)
+
+        return cctxt.cast(ctxt, 'create_consistencygroup',
+                          topic=topic,
+                          group_id=group_id,
+                          request_spec_list=request_spec_p_list,
+                          filter_properties_list=filter_properties_list)
 
     def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
                       image_id=None, request_spec=None,
diff --git a/cinder/tests/api/contrib/test_cgsnapshots.py b/cinder/tests/api/contrib/test_cgsnapshots.py
new file mode 100644 (file)
index 0000000..695b54c
--- /dev/null
@@ -0,0 +1,450 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Tests for cgsnapshot code.
+"""
+
+import json
+from xml.dom import minidom
+
+import webob
+
+from cinder import context
+from cinder import db
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.tests.api import fakes
+from cinder.tests import utils
+import cinder.volume
+
+
+LOG = logging.getLogger(__name__)
+
+
+class CgsnapshotsAPITestCase(test.TestCase):
+    """Test Case for cgsnapshots API."""
+
+    def setUp(self):
+        super(CgsnapshotsAPITestCase, self).setUp()
+        self.volume_api = cinder.volume.API()
+        self.context = context.get_admin_context()
+        self.context.project_id = 'fake'
+        self.context.user_id = 'fake'
+
+    @staticmethod
+    def _create_cgsnapshot(
+            name='test_cgsnapshot',
+            description='this is a test cgsnapshot',
+            consistencygroup_id='1',
+            status='creating'):
+        """Create a cgsnapshot object."""
+        cgsnapshot = {}
+        cgsnapshot['user_id'] = 'fake'
+        cgsnapshot['project_id'] = 'fake'
+        cgsnapshot['consistencygroup_id'] = consistencygroup_id
+        cgsnapshot['name'] = name
+        cgsnapshot['description'] = description
+        cgsnapshot['status'] = status
+        return db.cgsnapshot_create(context.get_admin_context(),
+                                    cgsnapshot)['id']
+
+    @staticmethod
+    def _get_cgsnapshot_attrib(cgsnapshot_id, attrib_name):
+        return db.cgsnapshot_get(context.get_admin_context(),
+                                 cgsnapshot_id)[attrib_name]
+
+    def test_show_cgsnapshot(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id)
+        LOG.debug('Created cgsnapshot with id %s' % cgsnapshot_id)
+        req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
+                                  cgsnapshot_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['cgsnapshot']['description'],
+                         'this is a test cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshot']['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshot']['status'], 'creating')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_show_cgsnapshot_xml_content_type(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id)
+        req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
+                                  cgsnapshot_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        cgsnapshot = dom.getElementsByTagName('cgsnapshot')
+        name = cgsnapshot.item(0).getAttribute('name')
+        self.assertEqual(name.strip(), "test_cgsnapshot")
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_show_cgsnapshot_with_cgsnapshot_NotFound(self):
+        req = webob.Request.blank('/v2/fake/cgsnapshots/9999')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 404)
+        self.assertEqual(res_dict['itemNotFound']['code'], 404)
+        self.assertEqual(res_dict['itemNotFound']['message'],
+                         'CgSnapshot 9999 could not be found.')
+
+    def test_list_cgsnapshots_json(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id1 = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id)
+        cgsnapshot_id2 = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id)
+        cgsnapshot_id3 = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id)
+
+        req = webob.Request.blank('/v2/fake/cgsnapshots')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['cgsnapshots'][0]['id'],
+                         cgsnapshot_id1)
+        self.assertEqual(res_dict['cgsnapshots'][0]['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][1]['id'],
+                         cgsnapshot_id2)
+        self.assertEqual(res_dict['cgsnapshots'][1]['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][2]['id'],
+                         cgsnapshot_id3)
+        self.assertEqual(res_dict['cgsnapshots'][2]['name'],
+                         'test_cgsnapshot')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id3)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id2)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id1)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_list_cgsnapshots_xml(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id1 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id2 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id3 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+
+        req = webob.Request.blank('/v2/fake/cgsnapshots')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        cgsnapshot_list = dom.getElementsByTagName('cgsnapshot')
+
+        self.assertEqual(cgsnapshot_list.item(0).getAttribute('id'),
+                         cgsnapshot_id1)
+        self.assertEqual(cgsnapshot_list.item(1).getAttribute('id'),
+                         cgsnapshot_id2)
+        self.assertEqual(cgsnapshot_list.item(2).getAttribute('id'),
+                         cgsnapshot_id3)
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id3)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id2)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id1)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_list_cgsnapshots_detail_json(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id1 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id2 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id3 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+
+        req = webob.Request.blank('/v2/fake/cgsnapshots/detail')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        req.headers['Accept'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['cgsnapshots'][0]['description'],
+                         'this is a test cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][0]['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][0]['id'],
+                         cgsnapshot_id1)
+        self.assertEqual(res_dict['cgsnapshots'][0]['status'],
+                         'creating')
+
+        self.assertEqual(res_dict['cgsnapshots'][1]['description'],
+                         'this is a test cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][1]['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][1]['id'],
+                         cgsnapshot_id2)
+        self.assertEqual(res_dict['cgsnapshots'][1]['status'],
+                         'creating')
+
+        self.assertEqual(res_dict['cgsnapshots'][2]['description'],
+                         'this is a test cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][2]['name'],
+                         'test_cgsnapshot')
+        self.assertEqual(res_dict['cgsnapshots'][2]['id'],
+                         cgsnapshot_id3)
+        self.assertEqual(res_dict['cgsnapshots'][2]['status'],
+                         'creating')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id3)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id2)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id1)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_list_cgsnapshots_detail_xml(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(self.context,
+                                        consistencygroup_id=
+                                        consistencygroup_id)['id']
+        cgsnapshot_id1 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id2 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+        cgsnapshot_id3 = self._create_cgsnapshot(consistencygroup_id=
+                                                 consistencygroup_id)
+
+        req = webob.Request.blank('/v2/fake/cgsnapshots/detail')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        cgsnapshot_detail = dom.getElementsByTagName('cgsnapshot')
+
+        self.assertEqual(
+            cgsnapshot_detail.item(0).getAttribute('description'),
+            'this is a test cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(0).getAttribute('name'),
+            'test_cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(0).getAttribute('id'),
+            cgsnapshot_id1)
+        self.assertEqual(
+            cgsnapshot_detail.item(0).getAttribute('status'), 'creating')
+
+        self.assertEqual(
+            cgsnapshot_detail.item(1).getAttribute('description'),
+            'this is a test cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(1).getAttribute('name'),
+            'test_cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(1).getAttribute('id'),
+            cgsnapshot_id2)
+        self.assertEqual(
+            cgsnapshot_detail.item(1).getAttribute('status'), 'creating')
+
+        self.assertEqual(
+            cgsnapshot_detail.item(2).getAttribute('description'),
+            'this is a test cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(2).getAttribute('name'),
+            'test_cgsnapshot')
+        self.assertEqual(
+            cgsnapshot_detail.item(2).getAttribute('id'),
+            cgsnapshot_id3)
+        self.assertEqual(
+            cgsnapshot_detail.item(2).getAttribute('status'), 'creating')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id3)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id2)
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id1)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_create_cgsnapshot_json(self):
+        cgsnapshot_id = "1"
+
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        utils.create_volume(
+            self.context,
+            consistencygroup_id=consistencygroup_id)['id']
+
+        body = {"cgsnapshot": {"name": "cg1",
+                               "description":
+                               "CG Snapshot 1",
+                               "consistencygroup_id": consistencygroup_id}}
+        req = webob.Request.blank('/v2/fake/cgsnapshots')
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+
+        res_dict = json.loads(res.body)
+        LOG.info(res_dict)
+
+        self.assertEqual(res.status_int, 202)
+        self.assertIn('id', res_dict['cgsnapshot'])
+
+        db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id)
+
+    def test_create_cgsnapshot_with_no_body(self):
+        # omit body from the request
+        req = webob.Request.blank('/v2/fake/cgsnapshots')
+        req.body = json.dumps(None)
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.headers['Accept'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 400)
+        self.assertEqual(res_dict['badRequest']['code'], 400)
+        self.assertEqual(res_dict['badRequest']['message'],
+                         'The server could not comply with the request since'
+                         ' it is either malformed or otherwise incorrect.')
+
+    def test_delete_cgsnapshot_available(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(
+            self.context,
+            consistencygroup_id=consistencygroup_id)['id']
+        cgsnapshot_id = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id,
+            status='available')
+        req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
+                                  cgsnapshot_id)
+        req.method = 'DELETE'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 202)
+        self.assertEqual(self._get_cgsnapshot_attrib(cgsnapshot_id,
+                         'status'),
+                         'deleting')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_delete_cgsnapshot_with_cgsnapshot_NotFound(self):
+        req = webob.Request.blank('/v2/fake/cgsnapshots/9999')
+        req.method = 'DELETE'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 404)
+        self.assertEqual(res_dict['itemNotFound']['code'], 404)
+        self.assertEqual(res_dict['itemNotFound']['message'],
+                         'Cgsnapshot could not be found')
+
+    def test_delete_cgsnapshot_with_Invalidcgsnapshot(self):
+        consistencygroup_id = utils.create_consistencygroup(self.context)['id']
+        volume_id = utils.create_volume(
+            self.context,
+            consistencygroup_id=consistencygroup_id)['id']
+        cgsnapshot_id = self._create_cgsnapshot(
+            consistencygroup_id=consistencygroup_id,
+            status='invalid')
+        req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
+                                  cgsnapshot_id)
+        req.method = 'DELETE'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 400)
+        self.assertEqual(res_dict['badRequest']['code'], 400)
+        self.assertEqual(res_dict['badRequest']['message'],
+                         'Invalid cgsnapshot')
+
+        db.cgsnapshot_destroy(context.get_admin_context(),
+                              cgsnapshot_id)
+        db.volume_destroy(context.get_admin_context(),
+                          volume_id)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
diff --git a/cinder/tests/api/contrib/test_consistencygroups.py b/cinder/tests/api/contrib/test_consistencygroups.py
new file mode 100644 (file)
index 0000000..f4bee26
--- /dev/null
@@ -0,0 +1,379 @@
+# Copyright (C) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Tests for consistency group code.
+"""
+
+import json
+from xml.dom import minidom
+
+import webob
+
+from cinder import context
+from cinder import db
+from cinder import test
+from cinder.tests.api import fakes
+import cinder.volume
+
+
+class ConsistencyGroupsAPITestCase(test.TestCase):
+    """Test Case for consistency groups API."""
+
+    def setUp(self):
+        super(ConsistencyGroupsAPITestCase, self).setUp()
+        self.volume_api = cinder.volume.API()
+        self.context = context.get_admin_context()
+        self.context.project_id = 'fake'
+        self.context.user_id = 'fake'
+
+    @staticmethod
+    def _create_consistencygroup(
+            name='test_consistencygroup',
+            description='this is a test consistency group',
+            volume_type_id='123456',
+            availability_zone='az1',
+            status='creating'):
+        """Create a consistency group object."""
+        consistencygroup = {}
+        consistencygroup['user_id'] = 'fake'
+        consistencygroup['project_id'] = 'fake'
+        consistencygroup['availability_zone'] = availability_zone
+        consistencygroup['name'] = name
+        consistencygroup['description'] = description
+        consistencygroup['volume_type_id'] = volume_type_id
+        consistencygroup['status'] = status
+        return db.consistencygroup_create(
+            context.get_admin_context(),
+            consistencygroup)['id']
+
+    @staticmethod
+    def _get_consistencygroup_attrib(consistencygroup_id, attrib_name):
+        return db.consistencygroup_get(context.get_admin_context(),
+                                       consistencygroup_id)[attrib_name]
+
+    def test_show_consistencygroup(self):
+        consistencygroup_id = self._create_consistencygroup()
+        req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
+                                  consistencygroup_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['consistencygroup']['availability_zone'],
+                         'az1')
+        self.assertEqual(res_dict['consistencygroup']['description'],
+                         'this is a test consistency group')
+        self.assertEqual(res_dict['consistencygroup']['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroup']['status'], 'creating')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_show_consistencygroup_xml_content_type(self):
+        consistencygroup_id = self._create_consistencygroup()
+        req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
+                                  consistencygroup_id)
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        consistencygroup = dom.getElementsByTagName('consistencygroup')
+        name = consistencygroup.item(0).getAttribute('name')
+        self.assertEqual(name.strip(), "test_consistencygroup")
+        db.consistencygroup_destroy(
+            context.get_admin_context(),
+            consistencygroup_id)
+
+    def test_show_consistencygroup_with_consistencygroup_NotFound(self):
+        req = webob.Request.blank('/v2/fake/consistencygroups/9999')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 404)
+        self.assertEqual(res_dict['itemNotFound']['code'], 404)
+        self.assertEqual(res_dict['itemNotFound']['message'],
+                         'ConsistencyGroup 9999 could not be found.')
+
+    def test_list_consistencygroups_json(self):
+        consistencygroup_id1 = self._create_consistencygroup()
+        consistencygroup_id2 = self._create_consistencygroup()
+        consistencygroup_id3 = self._create_consistencygroup()
+
+        req = webob.Request.blank('/v2/fake/consistencygroups')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['consistencygroups'][0]['id'],
+                         consistencygroup_id1)
+        self.assertEqual(res_dict['consistencygroups'][0]['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroups'][1]['id'],
+                         consistencygroup_id2)
+        self.assertEqual(res_dict['consistencygroups'][1]['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroups'][2]['id'],
+                         consistencygroup_id3)
+        self.assertEqual(res_dict['consistencygroups'][2]['name'],
+                         'test_consistencygroup')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id3)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id2)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id1)
+
+    def test_list_consistencygroups_xml(self):
+        consistencygroup_id1 = self._create_consistencygroup()
+        consistencygroup_id2 = self._create_consistencygroup()
+        consistencygroup_id3 = self._create_consistencygroup()
+
+        req = webob.Request.blank('/v2/fake/consistencygroups')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        consistencygroup_list = dom.getElementsByTagName('consistencygroup')
+
+        self.assertEqual(consistencygroup_list.item(0).getAttribute('id'),
+                         consistencygroup_id1)
+        self.assertEqual(consistencygroup_list.item(1).getAttribute('id'),
+                         consistencygroup_id2)
+        self.assertEqual(consistencygroup_list.item(2).getAttribute('id'),
+                         consistencygroup_id3)
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id3)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id2)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id1)
+
+    def test_list_consistencygroups_detail_json(self):
+        consistencygroup_id1 = self._create_consistencygroup()
+        consistencygroup_id2 = self._create_consistencygroup()
+        consistencygroup_id3 = self._create_consistencygroup()
+
+        req = webob.Request.blank('/v2/fake/consistencygroups/detail')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/json'
+        req.headers['Accept'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 200)
+        self.assertEqual(res_dict['consistencygroups'][0]['availability_zone'],
+                         'az1')
+        self.assertEqual(res_dict['consistencygroups'][0]['description'],
+                         'this is a test consistency group')
+        self.assertEqual(res_dict['consistencygroups'][0]['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroups'][0]['id'],
+                         consistencygroup_id1)
+        self.assertEqual(res_dict['consistencygroups'][0]['status'],
+                         'creating')
+
+        self.assertEqual(res_dict['consistencygroups'][1]['availability_zone'],
+                         'az1')
+        self.assertEqual(res_dict['consistencygroups'][1]['description'],
+                         'this is a test consistency group')
+        self.assertEqual(res_dict['consistencygroups'][1]['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroups'][1]['id'],
+                         consistencygroup_id2)
+        self.assertEqual(res_dict['consistencygroups'][1]['status'],
+                         'creating')
+
+        self.assertEqual(res_dict['consistencygroups'][2]['availability_zone'],
+                         'az1')
+        self.assertEqual(res_dict['consistencygroups'][2]['description'],
+                         'this is a test consistency group')
+        self.assertEqual(res_dict['consistencygroups'][2]['name'],
+                         'test_consistencygroup')
+        self.assertEqual(res_dict['consistencygroups'][2]['id'],
+                         consistencygroup_id3)
+        self.assertEqual(res_dict['consistencygroups'][2]['status'],
+                         'creating')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id3)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id2)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id1)
+
+    def test_list_consistencygroups_detail_xml(self):
+        consistencygroup_id1 = self._create_consistencygroup()
+        consistencygroup_id2 = self._create_consistencygroup()
+        consistencygroup_id3 = self._create_consistencygroup()
+
+        req = webob.Request.blank('/v2/fake/consistencygroups/detail')
+        req.method = 'GET'
+        req.headers['Content-Type'] = 'application/xml'
+        req.headers['Accept'] = 'application/xml'
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 200)
+        dom = minidom.parseString(res.body)
+        consistencygroup_detail = dom.getElementsByTagName('consistencygroup')
+
+        self.assertEqual(
+            consistencygroup_detail.item(0).getAttribute('availability_zone'),
+            'az1')
+        self.assertEqual(
+            consistencygroup_detail.item(0).getAttribute('description'),
+            'this is a test consistency group')
+        self.assertEqual(
+            consistencygroup_detail.item(0).getAttribute('name'),
+            'test_consistencygroup')
+        self.assertEqual(
+            consistencygroup_detail.item(0).getAttribute('id'),
+            consistencygroup_id1)
+        self.assertEqual(
+            consistencygroup_detail.item(0).getAttribute('status'), 'creating')
+
+        self.assertEqual(
+            consistencygroup_detail.item(1).getAttribute('availability_zone'),
+            'az1')
+        self.assertEqual(
+            consistencygroup_detail.item(1).getAttribute('description'),
+            'this is a test consistency group')
+        self.assertEqual(
+            consistencygroup_detail.item(1).getAttribute('name'),
+            'test_consistencygroup')
+        self.assertEqual(
+            consistencygroup_detail.item(1).getAttribute('id'),
+            consistencygroup_id2)
+        self.assertEqual(
+            consistencygroup_detail.item(1).getAttribute('status'), 'creating')
+
+        self.assertEqual(
+            consistencygroup_detail.item(2).getAttribute('availability_zone'),
+            'az1')
+        self.assertEqual(
+            consistencygroup_detail.item(2).getAttribute('description'),
+            'this is a test consistency group')
+        self.assertEqual(
+            consistencygroup_detail.item(2).getAttribute('name'),
+            'test_consistencygroup')
+        self.assertEqual(
+            consistencygroup_detail.item(2).getAttribute('id'),
+            consistencygroup_id3)
+        self.assertEqual(
+            consistencygroup_detail.item(2).getAttribute('status'), 'creating')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id3)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id2)
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id1)
+
+    def test_create_consistencygroup_json(self):
+        group_id = "1"
+        body = {"consistencygroup": {"name": "cg1",
+                                     "description":
+                                     "Consistency Group 1", }}
+        req = webob.Request.blank('/v2/fake/consistencygroups')
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 202)
+        self.assertIn('id', res_dict['consistencygroup'])
+
+        db.consistencygroup_destroy(context.get_admin_context(), group_id)
+
+    def test_create_consistencygroup_with_no_body(self):
+        # omit body from the request
+        req = webob.Request.blank('/v2/fake/consistencygroups')
+        req.body = json.dumps(None)
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.headers['Accept'] = 'application/json'
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 400)
+        self.assertEqual(res_dict['badRequest']['code'], 400)
+        self.assertEqual(res_dict['badRequest']['message'],
+                         'The server could not comply with the request since'
+                         ' it is either malformed or otherwise incorrect.')
+
+    def test_delete_consistencygroup_available(self):
+        consistencygroup_id = self._create_consistencygroup(status='available')
+        req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
+                                  consistencygroup_id)
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        body = {"consistencygroup": {"force": True}}
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+
+        self.assertEqual(res.status_int, 202)
+        self.assertEqual(self._get_consistencygroup_attrib(consistencygroup_id,
+                         'status'),
+                         'deleting')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
+
+    def test_delete_consistencygroup_with_consistencygroup_NotFound(self):
+        req = webob.Request.blank('/v2/fake/consistencygroups/9999/delete')
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        req.body = json.dumps(None)
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 404)
+        self.assertEqual(res_dict['itemNotFound']['code'], 404)
+        self.assertEqual(res_dict['itemNotFound']['message'],
+                         'Consistency group could not be found')
+
+    def test_delete_consistencygroup_with_Invalidconsistencygroup(self):
+        consistencygroup_id = self._create_consistencygroup(status='invalid')
+        req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
+                                  consistencygroup_id)
+        req.method = 'POST'
+        req.headers['Content-Type'] = 'application/json'
+        body = {"consistencygroup": {"force": False}}
+        req.body = json.dumps(body)
+        res = req.get_response(fakes.wsgi_app())
+        res_dict = json.loads(res.body)
+
+        self.assertEqual(res.status_int, 400)
+        self.assertEqual(res_dict['badRequest']['code'], 400)
+        self.assertEqual(res_dict['badRequest']['message'],
+                         'Invalid consistency group')
+
+        db.consistencygroup_destroy(context.get_admin_context(),
+                                    consistencygroup_id)
index 708a2e034fb00f87f64f9451370ca7f39f586471..4369324686cc4c69e15378572264b9a67c45e876 100644 (file)
@@ -94,6 +94,7 @@ class VolumeApiTest(test.TestCase):
                            'volume_id': '1'}],
                          'availability_zone': 'zone1:host1',
                          'bootable': 'false',
+                         'consistencygroup_id': None,
                          'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
                          'description': 'Volume Test Desc',
                          'id': '1',
@@ -197,6 +198,7 @@ class VolumeApiTest(test.TestCase):
                                           'volume_id': '1'}],
                          'availability_zone': 'nova',
                          'bootable': 'false',
+                         'consistencygroup_id': None,
                          'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
                          'description': 'Volume Test Desc',
                          'encrypted': False,
@@ -287,6 +289,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'Updated Test Name',
                 'replication_status': 'disabled',
                 'attachments': [
@@ -340,6 +343,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'Updated Test Name',
                 'replication_status': 'disabled',
                 'attachments': [
@@ -396,6 +400,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'New Name',
                 'replication_status': 'disabled',
                 'attachments': [
@@ -447,6 +452,7 @@ class VolumeApiTest(test.TestCase):
             'encrypted': False,
             'availability_zone': 'fakeaz',
             'bootable': 'false',
+            'consistencygroup_id': None,
             'name': 'displayname',
             'replication_status': 'disabled',
             'attachments': [{
@@ -509,6 +515,7 @@ class VolumeApiTest(test.TestCase):
             'encrypted': False,
             'availability_zone': 'fakeaz',
             'bootable': 'false',
+            'consistencygroup_id': None,
             'name': 'Updated Test Name',
             'replication_status': 'disabled',
             'attachments': [{
@@ -613,6 +620,7 @@ class VolumeApiTest(test.TestCase):
                     'encrypted': False,
                     'availability_zone': 'fakeaz',
                     'bootable': 'false',
+                    'consistencygroup_id': None,
                     'name': 'displayname',
                     'replication_status': 'disabled',
                     'attachments': [
@@ -674,6 +682,7 @@ class VolumeApiTest(test.TestCase):
                     'encrypted': False,
                     'availability_zone': 'fakeaz',
                     'bootable': 'false',
+                    'consistencygroup_id': None,
                     'name': 'displayname',
                     'replication_status': 'disabled',
                     'attachments': [
@@ -1074,6 +1083,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'displayname',
                 'replication_status': 'disabled',
                 'attachments': [
@@ -1124,6 +1134,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'displayname',
                 'replication_status': 'disabled',
                 'attachments': [],
@@ -1182,6 +1193,7 @@ class VolumeApiTest(test.TestCase):
                 'encrypted': False,
                 'availability_zone': 'fakeaz',
                 'bootable': 'false',
+                'consistencygroup_id': None,
                 'name': 'displayname',
                 'replication_status': 'disabled',
                 'attachments': [
index 2c0b0ceaabf9c4f20a77709741439c788e5a291b..a966be7ed978dc6d9aa9ec430247fd3e755833c0 100644 (file)
     "backup:backup-export": [["rule:admin_api"]],
 
     "volume_extension:replication:promote": [["rule:admin_api"]],
-    "volume_extension:replication:reenable": [["rule:admin_api"]]
+    "volume_extension:replication:reenable": [["rule:admin_api"]],
 
+    "consistencygroup:create" : [],
+    "consistencygroup:delete": [],
+    "consistencygroup:get": [],
+    "consistencygroup:get_all": [],
+
+    "consistencygroup:create_cgsnapshot" : [],
+    "consistencygroup:delete_cgsnapshot": [],
+    "consistencygroup:get_cgsnapshot": [],
+    "consistencygroup:get_all_cgsnapshots": []
 }
index 61bafa7610d1ff4cbd2e4d020fa402d67a3f6d5f..7a2403f578e93d918464f0692a1efc0dfcae4ceb 100644 (file)
@@ -55,7 +55,8 @@ class FakeHostManager(host_manager.HostManager):
                       'allocated_capacity_gb': 1848,
                       'reserved_percentage': 5,
                       'volume_backend_name': 'lvm4',
-                      'timestamp': None},
+                      'timestamp': None,
+                      'consistencygroup_support': True},
         }
 
 
index 532c20922ea51188afa438618f05635154793044..48f88e5e4a07fed51bce439ab5f2bdc133c18fa2 100644 (file)
@@ -31,6 +31,80 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
 
     driver_cls = filter_scheduler.FilterScheduler
 
+    def test_create_consistencygroup_no_hosts(self):
+        # Ensure empty hosts result in NoValidHosts exception.
+        sched = fakes.FakeFilterScheduler()
+
+        fake_context = context.RequestContext('user', 'project')
+        request_spec = {'volume_properties': {'project_id': 1,
+                                              'size': 0},
+                        'volume_type': {'name': 'Type1',
+                                        'extra_specs': {}}}
+        request_spec2 = {'volume_properties': {'project_id': 1,
+                                               'size': 0},
+                         'volume_type': {'name': 'Type2',
+                                         'extra_specs': {}}}
+        request_spec_list = [request_spec, request_spec2]
+        self.assertRaises(exception.NoValidHost,
+                          sched.schedule_create_consistencygroup,
+                          fake_context, 'faki-id1', request_spec_list, {})
+
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_schedule_consistencygroup(self,
+                                       _mock_service_get_all_by_topic):
+        # Make sure _schedule_group() can find host successfully.
+        sched = fakes.FakeFilterScheduler()
+        sched.host_manager = fakes.FakeHostManager()
+        fake_context = context.RequestContext('user', 'project',
+                                              is_admin=True)
+
+        fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
+
+        specs = {'capabilities:consistencygroup_support': '<is> True'}
+        request_spec = {'volume_properties': {'project_id': 1,
+                                              'size': 0},
+                        'volume_type': {'name': 'Type1',
+                                        'extra_specs': specs}}
+        request_spec2 = {'volume_properties': {'project_id': 1,
+                                               'size': 0},
+                         'volume_type': {'name': 'Type2',
+                                         'extra_specs': specs}}
+        request_spec_list = [request_spec, request_spec2]
+        weighed_host = sched._schedule_group(fake_context,
+                                             request_spec_list,
+                                             {})
+        self.assertIsNotNone(weighed_host.obj)
+        self.assertTrue(_mock_service_get_all_by_topic.called)
+
+    @mock.patch('cinder.db.service_get_all_by_topic')
+    def test_schedule_consistencygroup_no_cg_support_in_extra_specs(
+            self,
+            _mock_service_get_all_by_topic):
+        # Make sure _schedule_group() can find host successfully even
+        # when consistencygroup_support is not specified in volume type's
+        # extra specs
+        sched = fakes.FakeFilterScheduler()
+        sched.host_manager = fakes.FakeHostManager()
+        fake_context = context.RequestContext('user', 'project',
+                                              is_admin=True)
+
+        fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
+
+        request_spec = {'volume_properties': {'project_id': 1,
+                                              'size': 0},
+                        'volume_type': {'name': 'Type1',
+                                        'extra_specs': {}}}
+        request_spec2 = {'volume_properties': {'project_id': 1,
+                                               'size': 0},
+                         'volume_type': {'name': 'Type2',
+                                         'extra_specs': {}}}
+        request_spec_list = [request_spec, request_spec2]
+        weighed_host = sched._schedule_group(fake_context,
+                                             request_spec_list,
+                                             {})
+        self.assertIsNotNone(weighed_host.obj)
+        self.assertTrue(_mock_service_get_all_by_topic.called)
+
     def test_create_volume_no_hosts(self):
         # Ensure empty hosts/child_zones result in NoValidHosts exception.
         sched = fakes.FakeFilterScheduler()
index 112fbe55e4bb9219f431eea3c585f48536185420..9f35ec8ce5255d48edefe4752276c3bbcf6532d6 100644 (file)
@@ -64,6 +64,9 @@ class fake_db(object):
     def snapshot_get(self, *args, **kwargs):
         return {'volume_id': 1}
 
+    def consistencygroup_get(self, *args, **kwargs):
+        return {'consistencygroup_id': 1}
+
 
 class CreateVolumeFlowTestCase(test.TestCase):
 
@@ -88,7 +91,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
                 'source_volid': None,
                 'snapshot_id': None,
                 'image_id': None,
-                'source_replicaid': None}
+                'source_replicaid': None,
+                'consistencygroup_id': None}
 
         task = create_volume.VolumeCastTask(
             fake_scheduler_rpc_api(spec, self),
@@ -101,7 +105,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
                 'source_volid': 2,
                 'snapshot_id': 3,
                 'image_id': 4,
-                'source_replicaid': 5}
+                'source_replicaid': 5,
+                'consistencygroup_id': 5}
 
         task = create_volume.VolumeCastTask(
             fake_scheduler_rpc_api(spec, self),
index f1b63c95f4f1b8f4502fd63bb0363ac48eea20b2..d30738244280e7b6983577e112e957429dfce5ba 100644 (file)
@@ -69,6 +69,7 @@ from cinder.volume import volume_types
 
 
 QUOTAS = quota.QUOTAS
+CGQUOTAS = quota.CGQUOTAS
 
 CONF = cfg.CONF
 
@@ -2893,6 +2894,180 @@ class VolumeTestCase(BaseVolumeTestCase):
         # clean up
         self.volume.delete_volume(self.context, volume['id'])
 
+    def test_create_delete_consistencygroup(self):
+        """Test consistencygroup can be created and deleted."""
+        # Need to stub out reserve, commit, and rollback
+        def fake_reserve(context, expire=None, project_id=None, **deltas):
+            return ["RESERVATION"]
+
+        def fake_commit(context, reservations, project_id=None):
+            pass
+
+        def fake_rollback(context, reservations, project_id=None):
+            pass
+
+        self.stubs.Set(CGQUOTAS, "reserve", fake_reserve)
+        self.stubs.Set(CGQUOTAS, "commit", fake_commit)
+        self.stubs.Set(CGQUOTAS, "rollback", fake_rollback)
+
+        rval = {'status': 'available'}
+        driver.VolumeDriver.create_consistencygroup = \
+            mock.Mock(return_value=rval)
+
+        rval = {'status': 'deleted'}, []
+        driver.VolumeDriver.delete_consistencygroup = \
+            mock.Mock(return_value=rval)
+
+        group = tests_utils.create_consistencygroup(
+            self.context,
+            availability_zone=CONF.storage_availability_zone,
+            volume_type='type1,type2')
+        group_id = group['id']
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+        self.volume.create_consistencygroup(self.context, group_id)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+        msg = fake_notifier.NOTIFICATIONS[0]
+        self.assertEqual(msg['event_type'], 'consistencygroup.create.start')
+        expected = {
+            'status': 'available',
+            'name': 'test_cg',
+            'availability_zone': 'nova',
+            'tenant_id': 'fake',
+            'created_at': 'DONTCARE',
+            'user_id': 'fake',
+            'consistencygroup_id': group_id
+        }
+        self.assertDictMatch(msg['payload'], expected)
+        msg = fake_notifier.NOTIFICATIONS[1]
+        self.assertEqual(msg['event_type'], 'consistencygroup.create.end')
+        expected['status'] = 'available'
+        self.assertDictMatch(msg['payload'], expected)
+        self.assertEqual(
+            group_id,
+            db.consistencygroup_get(context.get_admin_context(),
+                                    group_id).id)
+
+        self.volume.delete_consistencygroup(self.context, group_id)
+        cg = db.consistencygroup_get(
+            context.get_admin_context(read_deleted='yes'),
+            group_id)
+        self.assertEqual(cg['status'], 'deleted')
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
+        msg = fake_notifier.NOTIFICATIONS[2]
+        self.assertEqual(msg['event_type'], 'consistencygroup.delete.start')
+        self.assertDictMatch(msg['payload'], expected)
+        msg = fake_notifier.NOTIFICATIONS[3]
+        self.assertEqual(msg['event_type'], 'consistencygroup.delete.end')
+        self.assertDictMatch(msg['payload'], expected)
+        self.assertRaises(exception.NotFound,
+                          db.consistencygroup_get,
+                          self.context,
+                          group_id)
+
+    @staticmethod
+    def _create_cgsnapshot(group_id, volume_id, size='0'):
+        """Create a cgsnapshot object."""
+        cgsnap = {}
+        cgsnap['user_id'] = 'fake'
+        cgsnap['project_id'] = 'fake'
+        cgsnap['consistencygroup_id'] = group_id
+        cgsnap['status'] = "creating"
+        cgsnapshot = db.cgsnapshot_create(context.get_admin_context(), cgsnap)
+
+        # Create a snapshot object
+        snap = {}
+        snap['volume_size'] = size
+        snap['user_id'] = 'fake'
+        snap['project_id'] = 'fake'
+        snap['volume_id'] = volume_id
+        snap['status'] = "available"
+        snap['cgsnapshot_id'] = cgsnapshot['id']
+        snapshot = db.snapshot_create(context.get_admin_context(), snap)
+
+        return cgsnapshot, snapshot
+
+    def test_create_delete_cgsnapshot(self):
+        """Test cgsnapshot can be created and deleted."""
+
+        rval = {'status': 'available'}
+        driver.VolumeDriver.create_consistencygroup = \
+            mock.Mock(return_value=rval)
+
+        rval = {'status': 'deleted'}, []
+        driver.VolumeDriver.delete_consistencygroup = \
+            mock.Mock(return_value=rval)
+
+        rval = {'status': 'available'}, []
+        driver.VolumeDriver.create_cgsnapshot = \
+            mock.Mock(return_value=rval)
+
+        rval = {'status': 'deleted'}, []
+        driver.VolumeDriver.delete_cgsnapshot = \
+            mock.Mock(return_value=rval)
+
+        group = tests_utils.create_consistencygroup(
+            self.context,
+            availability_zone=CONF.storage_availability_zone,
+            volume_type='type1,type2')
+        group_id = group['id']
+        volume = tests_utils.create_volume(
+            self.context,
+            consistencygroup_id = group_id,
+            **self.volume_params)
+        volume_id = volume['id']
+        self.volume.create_volume(self.context, volume_id)
+        cgsnapshot = tests_utils.create_cgsnapshot(
+            self.context,
+            consistencygroup_id = group_id)
+        cgsnapshot_id = cgsnapshot['id']
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+        cgsnapshot_returns = self._create_cgsnapshot(group_id, volume_id)
+        cgsnapshot_id = cgsnapshot_returns[0]['id']
+        self.volume.create_cgsnapshot(self.context, group_id, cgsnapshot_id)
+        self.assertEqual(cgsnapshot_id,
+                         db.cgsnapshot_get(context.get_admin_context(),
+                                           cgsnapshot_id).id)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6)
+        msg = fake_notifier.NOTIFICATIONS[2]
+        self.assertEqual(msg['event_type'], 'cgsnapshot.create.start')
+        expected = {
+            'created_at': 'DONTCARE',
+            'name': None,
+            'cgsnapshot_id': cgsnapshot_id,
+            'status': 'creating',
+            'tenant_id': 'fake',
+            'user_id': 'fake',
+            'consistencygroup_id': group_id
+        }
+        self.assertDictMatch(msg['payload'], expected)
+        msg = fake_notifier.NOTIFICATIONS[3]
+        self.assertEqual(msg['event_type'], 'snapshot.create.start')
+        msg = fake_notifier.NOTIFICATIONS[4]
+        self.assertEqual(msg['event_type'], 'cgsnapshot.create.end')
+        self.assertDictMatch(msg['payload'], expected)
+        msg = fake_notifier.NOTIFICATIONS[5]
+        self.assertEqual(msg['event_type'], 'snapshot.create.end')
+
+        self.volume.delete_cgsnapshot(self.context, cgsnapshot_id)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 10)
+        msg = fake_notifier.NOTIFICATIONS[6]
+        self.assertEqual(msg['event_type'], 'cgsnapshot.delete.start')
+        expected['status'] = 'available'
+        self.assertDictMatch(msg['payload'], expected)
+        msg = fake_notifier.NOTIFICATIONS[8]
+        self.assertEqual(msg['event_type'], 'cgsnapshot.delete.end')
+        self.assertDictMatch(msg['payload'], expected)
+
+        cgsnap = db.cgsnapshot_get(
+            context.get_admin_context(read_deleted='yes'),
+            cgsnapshot_id)
+        self.assertEqual(cgsnap['status'], 'deleted')
+        self.assertRaises(exception.NotFound,
+                          db.cgsnapshot_get,
+                          self.context,
+                          cgsnapshot_id)
+        self.volume.delete_consistencygroup(self.context, group_id)
+
 
 class CopyVolumeToImageTestCase(BaseVolumeTestCase):
     def fake_local_path(self, volume):
index ae15189983c1ae77d0cbb4f54ac925922be79d4e..b5460cd161a305795a6e1020f373ff1115b731b5 100644 (file)
@@ -147,6 +147,7 @@ class VolumeRpcAPITestCase(test.TestCase):
                               image_id='fake_image_id',
                               source_volid='fake_src_id',
                               source_replicaid='fake_replica_id',
+                              consistencygroup_id='fake_cg_id',
                               version='1.4')
 
     def test_create_volume_serialization(self):
@@ -162,6 +163,7 @@ class VolumeRpcAPITestCase(test.TestCase):
                               image_id='fake_image_id',
                               source_volid='fake_src_id',
                               source_replicaid='fake_replica_id',
+                              consistencygroup_id='fake_cg_id',
                               version='1.4')
 
     def test_delete_volume(self):
index a11f9d8238fdcd9d5979bc0a6d64bacc8de8f3c4..4b23b0f3b4d13599766305c32aac83770b0974e4 100644 (file)
@@ -34,6 +34,7 @@ def create_volume(ctxt,
                   replication_status='disabled',
                   replication_extended_status=None,
                   replication_driver_data=None,
+                  consistencygroup_id=None,
                   **kwargs):
     """Create a volume object in the DB."""
     vol = {}
@@ -47,6 +48,8 @@ def create_volume(ctxt,
     vol['display_description'] = display_description
     vol['attach_status'] = 'detached'
     vol['availability_zone'] = availability_zone
+    if consistencygroup_id:
+        vol['consistencygroup_id'] = consistencygroup_id
     if volume_type_id:
         vol['volume_type_id'] = volume_type_id
     for key in kwargs:
@@ -73,3 +76,46 @@ def create_snapshot(ctxt,
     snap['display_name'] = display_name
     snap['display_description'] = display_description
     return db.snapshot_create(ctxt, snap)
+
+
+def create_consistencygroup(ctxt,
+                            host='test_host',
+                            name='test_cg',
+                            description='this is a test cg',
+                            status='available',
+                            availability_zone='fake_az',
+                            volume_type_id=None,
+                            **kwargs):
+    """Create a consistencygroup object in the DB."""
+    cg = {}
+    cg['host'] = host
+    cg['user_id'] = ctxt.user_id
+    cg['project_id'] = ctxt.project_id
+    cg['status'] = status
+    cg['name'] = name
+    cg['description'] = description
+    cg['availability_zone'] = availability_zone
+    if volume_type_id:
+        cg['volume_type_id'] = volume_type_id
+    for key in kwargs:
+        cg[key] = kwargs[key]
+    return db.consistencygroup_create(ctxt, cg)
+
+
+def create_cgsnapshot(ctxt,
+                      name='test_cgsnap',
+                      description='this is a test cgsnap',
+                      status='available',
+                      consistencygroup_id=None,
+                      **kwargs):
+    """Create a cgsnapshot object in the DB."""
+    cgsnap = {}
+    cgsnap['user_id'] = ctxt.user_id
+    cgsnap['project_id'] = ctxt.project_id
+    cgsnap['status'] = status
+    cgsnap['name'] = name
+    cgsnap['description'] = description
+    cgsnap['consistencygroup_id'] = consistencygroup_id
+    for key in kwargs:
+        cgsnap[key] = kwargs[key]
+    return db.cgsnapshot_create(ctxt, cgsnap)
index 06cc222177e53e49304375cc03309b0255849651..c086ae3be7ece332c4c4ae4adc95f4db44309fd7 100644 (file)
@@ -153,7 +153,14 @@ class API(base.Base):
                image_id=None, volume_type=None, metadata=None,
                availability_zone=None, source_volume=None,
                scheduler_hints=None, backup_source_volume=None,
-               source_replica=None):
+               source_replica=None, consistencygroup=None):
+
+        if volume_type and consistencygroup:
+            cg_voltypeids = consistencygroup.get('volume_type_id')
+            if volume_type.get('id') not in cg_voltypeids:
+                msg = _("Invalid volume_type provided (requested type "
+                        "must be supported by this consistency group.")
+                raise exception.InvalidInput(reason=msg)
 
         if source_volume and volume_type:
             if volume_type['id'] != source_volume['volume_type_id']:
@@ -198,7 +205,8 @@ class API(base.Base):
             'key_manager': self.key_manager,
             'backup_source_volume': backup_source_volume,
             'source_replica': source_replica,
-            'optional_args': {'is_quota_committed': False}
+            'optional_args': {'is_quota_committed': False},
+            'consistencygroup': consistencygroup
         }
         try:
             flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
@@ -475,7 +483,19 @@ class API(base.Base):
 
     def _create_snapshot(self, context,
                          volume, name, description,
-                         force=False, metadata=None):
+                         force=False, metadata=None,
+                         cgsnapshot_id=None):
+        snapshot = self.create_snapshot_in_db(
+            context, volume, name,
+            description, force, metadata, cgsnapshot_id)
+        self.volume_rpcapi.create_snapshot(context, volume, snapshot)
+
+        return snapshot
+
+    def create_snapshot_in_db(self, context,
+                              volume, name, description,
+                              force, metadata,
+                              cgsnapshot_id):
         check_policy(context, 'create_snapshot', volume)
 
         if volume['migration_status'] is not None:
@@ -534,6 +554,7 @@ class API(base.Base):
 
         self._check_metadata_properties(metadata)
         options = {'volume_id': volume['id'],
+                   'cgsnapshot_id': cgsnapshot_id,
                    'user_id': context.user_id,
                    'project_id': context.project_id,
                    'status': "creating",
@@ -557,15 +578,131 @@ class API(base.Base):
                 finally:
                     QUOTAS.rollback(context, reservations)
 
-        self.volume_rpcapi.create_snapshot(context, volume, snapshot)
-
         return snapshot
 
+    def create_snapshots_in_db(self, context,
+                               volume_list,
+                               name, description,
+                               force, cgsnapshot_id):
+        snapshot_list = []
+        for volume in volume_list:
+            self._create_snapshot_in_db_validate(context, volume, force)
+
+        reservations = self._create_snapshots_in_db_reserve(
+            context, volume_list)
+
+        options_list = []
+        for volume in volume_list:
+            options = self._create_snapshot_in_db_options(
+                context, volume, name, description, cgsnapshot_id)
+            options_list.append(options)
+
+        try:
+            for options in options_list:
+                snapshot = self.db.snapshot_create(context, options)
+                snapshot_list.append(snapshot)
+
+            QUOTAS.commit(context, reservations)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                try:
+                    for snap in snapshot_list:
+                        self.db.snapshot_destroy(context, snap['id'])
+                finally:
+                    QUOTAS.rollback(context, reservations)
+
+        return snapshot_list
+
+    def _create_snapshot_in_db_validate(self, context, volume, force):
+        check_policy(context, 'create_snapshot', volume)
+
+        if volume['migration_status'] is not None:
+            # Volume is migrating, wait until done
+            msg = _("Snapshot cannot be created while volume is migrating")
+            raise exception.InvalidVolume(reason=msg)
+
+        if ((not force) and (volume['status'] != "available")):
+            msg = _("Snapshot cannot be created because volume '%s' is not "
+                    "available.") % volume['id']
+            raise exception.InvalidVolume(reason=msg)
+
+    def _create_snapshots_in_db_reserve(self, context, volume_list):
+        reserve_opts_list = []
+        total_reserve_opts = {}
+        try:
+            for volume in volume_list:
+                if CONF.no_snapshot_gb_quota:
+                    reserve_opts = {'snapshots': 1}
+                else:
+                    reserve_opts = {'snapshots': 1,
+                                    'gigabytes': volume['size']}
+                QUOTAS.add_volume_type_opts(context,
+                                            reserve_opts,
+                                            volume.get('volume_type_id'))
+                reserve_opts_list.append(reserve_opts)
+
+            for reserve_opts in reserve_opts_list:
+                for (key, value) in reserve_opts.items():
+                    if key not in total_reserve_opts.keys():
+                        total_reserve_opts[key] = value
+                    else:
+                        total_reserve_opts[key] = \
+                            total_reserve_opts[key] + value
+            reservations = QUOTAS.reserve(context, **total_reserve_opts)
+        except exception.OverQuota as e:
+            overs = e.kwargs['overs']
+            usages = e.kwargs['usages']
+            quotas = e.kwargs['quotas']
+
+            def _consumed(name):
+                return (usages[name]['reserved'] + usages[name]['in_use'])
+
+            for over in overs:
+                if 'gigabytes' in over:
+                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                            "%(s_size)sG snapshot (%(d_consumed)dG of "
+                            "%(d_quota)dG already consumed)")
+                    LOG.warning(msg % {'s_pid': context.project_id,
+                                       's_size': volume['size'],
+                                       'd_consumed': _consumed(over),
+                                       'd_quota': quotas[over]})
+                    raise exception.VolumeSizeExceedsAvailableQuota(
+                        requested=volume['size'],
+                        consumed=_consumed('gigabytes'),
+                        quota=quotas['gigabytes'])
+                elif 'snapshots' in over:
+                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
+                            "snapshot (%(d_consumed)d snapshots "
+                            "already consumed)")
+
+                    LOG.warning(msg % {'s_pid': context.project_id,
+                                       'd_consumed': _consumed(over)})
+                    raise exception.SnapshotLimitExceeded(
+                        allowed=quotas[over])
+
+        return reservations
+
+    def _create_snapshot_in_db_options(self, context, volume,
+                                       name, description,
+                                       cgsnapshot_id):
+        options = {'volume_id': volume['id'],
+                   'cgsnapshot_id': cgsnapshot_id,
+                   'user_id': context.user_id,
+                   'project_id': context.project_id,
+                   'status': "creating",
+                   'progress': '0%',
+                   'volume_size': volume['size'],
+                   'display_name': name,
+                   'display_description': description,
+                   'volume_type_id': volume['volume_type_id'],
+                   'encryption_key_id': volume['encryption_key_id']}
+        return options
+
     def create_snapshot(self, context,
-                        volume, name,
-                        description, metadata=None):
+                        volume, name, description,
+                        metadata=None, cgsnapshot_id=None):
         return self._create_snapshot(context, volume, name, description,
-                                     False, metadata)
+                                     False, metadata, cgsnapshot_id)
 
     def create_snapshot_force(self, context,
                               volume, name,
@@ -578,6 +715,12 @@ class API(base.Base):
         if not force and snapshot['status'] not in ["available", "error"]:
             msg = _("Volume Snapshot status must be available or error")
             raise exception.InvalidSnapshot(reason=msg)
+        cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
+        if cgsnapshot_id:
+            msg = _("Snapshot %s is part of a cgsnapshot and has to be "
+                    "deleted together with the cgsnapshot.") % snapshot['id']
+            LOG.error(msg)
+            raise exception.InvalidSnapshot(reason=msg)
         self.db.snapshot_update(context, snapshot['id'],
                                 {'status': 'deleting'})
         volume = self.db.volume_get(context, snapshot['volume_id'])
@@ -859,6 +1002,12 @@ class API(base.Base):
             LOG.error(msg)
             raise exception.InvalidVolume(reason=msg)
 
+        cg_id = volume.get('consistencygroup_id', None)
+        if cg_id:
+            msg = _("Volume must not be part of a consistency group.")
+            LOG.error(msg)
+            raise exception.InvalidVolume(reason=msg)
+
         # Make sure the host is in the list of available hosts
         elevated = context.elevated()
         topic = CONF.volume_topic
@@ -952,6 +1101,12 @@ class API(base.Base):
             LOG.error(msg)
             raise exception.InvalidInput(reason=msg)
 
+        cg_id = volume.get('consistencygroup_id', None)
+        if cg_id:
+            msg = _("Volume must not be part of a consistency group.")
+            LOG.error(msg)
+            raise exception.InvalidVolume(reason=msg)
+
         # Support specifying volume type by ID or name
         try:
             if uuidutils.is_uuid_like(new_type):
index 64374d361dc0252655f83cbb44d484c24f57cafb..8078b2167ffc44f1ece4c84929407700063a495a 100644 (file)
@@ -794,6 +794,22 @@ class VolumeDriver(object):
     def terminate_connection(self, volume, connector, **kwargs):
         """Disallow connection from connector"""
 
+    def create_consistencygroup(self, context, group):
+        """Creates a consistencygroup."""
+        raise NotImplementedError()
+
+    def delete_consistencygroup(self, context, group):
+        """Deletes a consistency group."""
+        raise NotImplementedError()
+
+    def create_cgsnapshot(self, context, cgsnapshot):
+        """Creates a cgsnapshot."""
+        raise NotImplementedError()
+
+    def delete_cgsnapshot(self, context, cgsnapshot):
+        """Deletes a cgsnapshot."""
+        raise NotImplementedError()
+
 
 class ISCSIDriver(VolumeDriver):
     """Executes commands relating to ISCSI volumes.
index 639d60b22c004fb3c2c058c813b724c1d2015372..cd9432181de042ab802908693e84377fe7079c85 100644 (file)
@@ -41,6 +41,7 @@ QUOTAS = quota.QUOTAS
 SNAPSHOT_PROCEED_STATUS = ('available',)
 SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
 REPLICA_PROCEED_STATUS = ('active', 'active-stopped')
+CG_PROCEED_STATUS = ('available',)
 
 
 class ExtractVolumeRequestTask(flow_utils.CinderTask):
@@ -59,7 +60,8 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
     # reconstructed elsewhere and continued).
     default_provides = set(['availability_zone', 'size', 'snapshot_id',
                             'source_volid', 'volume_type', 'volume_type_id',
-                            'encryption_key_id', 'source_replicaid'])
+                            'encryption_key_id', 'source_replicaid',
+                            'consistencygroup_id'])
 
     def __init__(self, image_service, availability_zones, **kwargs):
         super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
@@ -67,6 +69,24 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
         self.image_service = image_service
         self.availability_zones = availability_zones
 
+    @staticmethod
+    def _extract_consistencygroup(consistencygroup):
+        """Extracts the consistencygroup id from the provided consistencygroup.
+
+        This function validates the input consistencygroup dict and checks that
+        the status of that consistencygroup is valid for creating a volume in.
+        """
+
+        consistencygroup_id = None
+        if consistencygroup is not None:
+            if consistencygroup['status'] not in CG_PROCEED_STATUS:
+                msg = _("Originating consistencygroup status must be one"
+                        " of '%s' values")
+                msg = msg % (", ".join(CG_PROCEED_STATUS))
+                raise exception.InvalidConsistencyGroup(reason=msg)
+            consistencygroup_id = consistencygroup['id']
+        return consistencygroup_id
+
     @staticmethod
     def _extract_snapshot(snapshot):
         """Extracts the snapshot id from the provided snapshot (if provided).
@@ -363,7 +383,8 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
 
     def execute(self, context, size, snapshot, image_id, source_volume,
                 availability_zone, volume_type, metadata,
-                key_manager, backup_source_volume, source_replica):
+                key_manager, backup_source_volume, source_replica,
+                consistencygroup):
 
         utils.check_exclusive_options(snapshot=snapshot,
                                       imageRef=image_id,
@@ -376,6 +397,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
         source_volid = self._extract_source_volume(source_volume)
         source_replicaid = self._extract_source_replica(source_replica)
         size = self._extract_size(size, source_volume, snapshot)
+        consistencygroup_id = self._extract_consistencygroup(consistencygroup)
 
         self._check_image_metadata(context, image_id, size)
 
@@ -429,6 +451,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
             'encryption_key_id': encryption_key_id,
             'qos_specs': specs,
             'source_replicaid': source_replicaid,
+            'consistencygroup_id': consistencygroup_id,
         }
 
 
@@ -444,7 +467,7 @@ class EntryCreateTask(flow_utils.CinderTask):
         requires = ['availability_zone', 'description', 'metadata',
                     'name', 'reservations', 'size', 'snapshot_id',
                     'source_volid', 'volume_type_id', 'encryption_key_id',
-                    'source_replicaid']
+                    'source_replicaid', 'consistencygroup_id', ]
         super(EntryCreateTask, self).__init__(addons=[ACTION],
                                               requires=requires)
         self.db = db
@@ -656,7 +679,8 @@ class VolumeCastTask(flow_utils.CinderTask):
     def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
         requires = ['image_id', 'scheduler_hints', 'snapshot_id',
                     'source_volid', 'volume_id', 'volume_type',
-                    'volume_properties', 'source_replicaid']
+                    'volume_properties', 'source_replicaid',
+                    'consistencygroup_id']
         super(VolumeCastTask, self).__init__(addons=[ACTION],
                                              requires=requires)
         self.volume_rpcapi = volume_rpcapi
@@ -669,9 +693,14 @@ class VolumeCastTask(flow_utils.CinderTask):
         volume_id = request_spec['volume_id']
         snapshot_id = request_spec['snapshot_id']
         image_id = request_spec['image_id']
+        group_id = request_spec['consistencygroup_id']
         host = None
 
-        if snapshot_id and CONF.snapshot_same_host:
+        if group_id:
+            group = self.db.consistencygroup_get(context, group_id)
+            if group:
+                host = group.get('host', None)
+        elif snapshot_id and CONF.snapshot_same_host:
             # NOTE(Rongze Zhu): A simple solution for bug 1008866.
             #
             # If snapshot_id is set, make the call create volume directly to
@@ -715,7 +744,8 @@ class VolumeCastTask(flow_utils.CinderTask):
                 snapshot_id=snapshot_id,
                 image_id=image_id,
                 source_volid=source_volid,
-                source_replicaid=source_replicaid)
+                source_replicaid=source_replicaid,
+                consistencygroup_id=group_id)
 
     def execute(self, context, **kwargs):
         scheduler_hints = kwargs.pop('scheduler_hints', None)
index 8dc2f50fbd7a9603a63d0a992a405d06e392c2ce..70ffeb719c7d0a4d3a81acca3e067e92737d1ce3 100644 (file)
@@ -709,7 +709,8 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
 def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
              allow_reschedule, reschedule_context, request_spec,
              filter_properties, snapshot_id=None, image_id=None,
-             source_volid=None, source_replicaid=None):
+             source_volid=None, source_replicaid=None,
+             consistencygroup_id=None):
     """Constructs and returns the manager entrypoint flow.
 
     This flow will do the following:
@@ -740,6 +741,7 @@ def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
         'source_volid': source_volid,
         'volume_id': volume_id,
         'source_replicaid': source_replicaid,
+        'consistencygroup_id': consistencygroup_id,
     }
 
     volume_flow.add(ExtractVolumeRefTask(db, host))
index 1614b5d5c3af034d248c3bd440ff6bfecd3def27..efc2559b85c7ee5a300a6666479c494984b70531 100644 (file)
@@ -70,6 +70,7 @@ from eventlet.greenpool import GreenPool
 LOG = logging.getLogger(__name__)
 
 QUOTAS = quota.QUOTAS
+CGQUOTAS = quota.CGQUOTAS
 
 volume_manager_opts = [
     cfg.StrOpt('volume_driver',
@@ -152,7 +153,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.17'
+    RPC_API_VERSION = '1.18'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -276,7 +277,7 @@ class VolumeManager(manager.SchedulerDependentManager):
     def create_volume(self, context, volume_id, request_spec=None,
                       filter_properties=None, allow_reschedule=True,
                       snapshot_id=None, image_id=None, source_volid=None,
-                      source_replicaid=None):
+                      source_replicaid=None, consistencygroup_id=None):
 
         """Creates the volume."""
         context_saved = context.deepcopy()
@@ -298,6 +299,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 image_id=image_id,
                 source_volid=source_volid,
                 source_replicaid=source_replicaid,
+                consistencygroup_id=consistencygroup_id,
                 allow_reschedule=allow_reschedule,
                 reschedule_context=context_saved,
                 request_spec=request_spec,
@@ -1168,6 +1170,39 @@ class VolumeManager(manager.SchedulerDependentManager):
             context, snapshot, event_suffix,
             extra_usage_info=extra_usage_info, host=self.host)
 
+    def _notify_about_consistencygroup_usage(self,
+                                             context,
+                                             group,
+                                             event_suffix,
+                                             extra_usage_info=None):
+        volume_utils.notify_about_consistencygroup_usage(
+            context, group, event_suffix,
+            extra_usage_info=extra_usage_info, host=self.host)
+
+        volumes = self.db.volume_get_all_by_group(context, group['id'])
+        if volumes:
+            for volume in volumes:
+                volume_utils.notify_about_volume_usage(
+                    context, volume, event_suffix,
+                    extra_usage_info=extra_usage_info, host=self.host)
+
+    def _notify_about_cgsnapshot_usage(self,
+                                       context,
+                                       cgsnapshot,
+                                       event_suffix,
+                                       extra_usage_info=None):
+        volume_utils.notify_about_cgsnapshot_usage(
+            context, cgsnapshot, event_suffix,
+            extra_usage_info=extra_usage_info, host=self.host)
+
+        snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+                                                            cgsnapshot['id'])
+        if snapshots:
+            for snapshot in snapshots:
+                volume_utils.notify_about_snapshot_usage(
+                    context, snapshot, event_suffix,
+                    extra_usage_info=extra_usage_info, host=self.host)
+
     def extend_volume(self, context, volume_id, new_size, reservations):
         try:
             # NOTE(flaper87): Verify the driver is enabled
@@ -1462,3 +1497,343 @@ class VolumeManager(manager.SchedulerDependentManager):
                 except Exception:
                     LOG.exception(_("Error checking replication status for "
                                     "volume %s") % vol['id'])
+
+    def create_consistencygroup(self, context, group_id):
+        """Creates the consistency group."""
+        context = context.elevated()
+        group_ref = self.db.consistencygroup_get(context, group_id)
+        group_ref['host'] = self.host
+
+        status = 'available'
+        model_update = False
+
+        self._notify_about_consistencygroup_usage(
+            context, group_ref, "create.start")
+
+        try:
+            utils.require_driver_initialized(self.driver)
+
+            LOG.info(_("Consistency group %s: creating"), group_ref['name'])
+            model_update = self.driver.create_consistencygroup(context,
+                                                               group_ref)
+
+            if model_update:
+                group_ref = self.db.consistencygroup_update(
+                    context, group_ref['id'], model_update)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.consistencygroup_update(
+                    context,
+                    group_ref['id'],
+                    {'status': 'error'})
+                LOG.error(_("Consistency group %s: create failed"),
+                          group_ref['name'])
+
+        now = timeutils.utcnow()
+        self.db.consistencygroup_update(context,
+                                        group_ref['id'],
+                                        {'status': status,
+                                         'created_at': now})
+        LOG.info(_("Consistency group %s: created successfully"),
+                 group_ref['name'])
+
+        self._notify_about_consistencygroup_usage(
+            context, group_ref, "create.end")
+
+        return group_ref['id']
+
+    def delete_consistencygroup(self, context, group_id):
+        """Deletes consistency group and the volumes in the group."""
+        context = context.elevated()
+        group_ref = self.db.consistencygroup_get(context, group_id)
+        project_id = group_ref['project_id']
+
+        if context.project_id != group_ref['project_id']:
+            project_id = group_ref['project_id']
+        else:
+            project_id = context.project_id
+
+        LOG.info(_("Consistency group %s: deleting"), group_ref['id'])
+
+        volumes = self.db.volume_get_all_by_group(context, group_id)
+
+        for volume_ref in volumes:
+            if volume_ref['attach_status'] == "attached":
+                # Volume is still attached, need to detach first
+                raise exception.VolumeAttached(volume_id=volume_ref['id'])
+            if volume_ref['host'] != self.host:
+                raise exception.InvalidVolume(
+                    reason=_("Volume is not local to this node"))
+
+        self._notify_about_consistencygroup_usage(
+            context, group_ref, "delete.start")
+
+        try:
+            utils.require_driver_initialized(self.driver)
+
+            LOG.debug("Consistency group %(group_id)s: deleting",
+                      {'group_id': group_id})
+
+            model_update, volumes = self.driver.delete_consistencygroup(
+                context, group_ref)
+
+            if volumes:
+                for volume in volumes:
+                    update = {'status': volume['status']}
+                    self.db.volume_update(context, volume['id'],
+                                          update)
+                    # If we failed to delete a volume, make sure the status
+                    # for the cg is set to error as well
+                    if (volume['status'] in ['error_deleting', 'error'] and
+                            model_update['status'] not in
+                            ['error_deleting', 'error']):
+                        model_update['status'] = volume['status']
+
+            if model_update:
+                if model_update['status'] in ['error_deleting', 'error']:
+                    msg = (_('Error occurred when deleting consistency group '
+                             '%s.') % group_ref['id'])
+                    LOG.exception(msg)
+                    raise exception.VolumeDriverException(message=msg)
+                else:
+                    self.db.consistencygroup_update(context, group_ref['id'],
+                                                    model_update)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.consistencygroup_update(
+                    context,
+                    group_ref['id'],
+                    {'status': 'error_deleting'})
+
+        # Get reservations for group
+        try:
+            reserve_opts = {'consistencygroups': -1}
+            cgreservations = CGQUOTAS.reserve(context,
+                                              project_id=project_id,
+                                              **reserve_opts)
+        except Exception:
+            cgreservations = None
+            LOG.exception(_("Failed to update usages deleting "
+                          "consistency groups."))
+
+        for volume_ref in volumes:
+            # Get reservations for volume
+            try:
+                volume_id = volume_ref['id']
+                reserve_opts = {'volumes': -1,
+                                'gigabytes': -volume_ref['size']}
+                QUOTAS.add_volume_type_opts(context,
+                                            reserve_opts,
+                                            volume_ref.get('volume_type_id'))
+                reservations = QUOTAS.reserve(context,
+                                              project_id=project_id,
+                                              **reserve_opts)
+            except Exception:
+                reservations = None
+                LOG.exception(_("Failed to update usages deleting volume."))
+
+            # Delete glance metadata if it exists
+            self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
+
+            self.db.volume_destroy(context, volume_id)
+
+            # Commit the reservations
+            if reservations:
+                QUOTAS.commit(context, reservations, project_id=project_id)
+
+            self.stats['allocated_capacity_gb'] -= volume_ref['size']
+
+        if cgreservations:
+            CGQUOTAS.commit(context, cgreservations,
+                            project_id=project_id)
+
+        self.db.consistencygroup_destroy(context, group_id)
+        LOG.info(_("Consistency group %s: deleted successfully."),
+                 group_id)
+        self._notify_about_consistencygroup_usage(
+            context, group_ref, "delete.end")
+        self.publish_service_capabilities(context)
+
+        return True
+
+    def create_cgsnapshot(self, context, group_id, cgsnapshot_id):
+        """Creates the cgsnapshot."""
+        caller_context = context
+        context = context.elevated()
+        cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
+        LOG.info(_("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
+
+        snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+                                                            cgsnapshot_id)
+
+        self._notify_about_cgsnapshot_usage(
+            context, cgsnapshot_ref, "create.start")
+
+        try:
+            utils.require_driver_initialized(self.driver)
+
+            LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
+                      {'cgsnap_id': cgsnapshot_id})
+
+            # Pass context so that drivers that want to use it, can,
+            # but it is not a requirement for all drivers.
+            cgsnapshot_ref['context'] = caller_context
+            for snapshot in snapshots:
+                snapshot['context'] = caller_context
+
+            model_update, snapshots = \
+                self.driver.create_cgsnapshot(context, cgsnapshot_ref)
+
+            if snapshots:
+                for snapshot in snapshots:
+                    # Update db if status is error
+                    if snapshot['status'] == 'error':
+                        update = {'status': snapshot['status']}
+                        self.db.snapshot_update(context, snapshot['id'],
+                                                update)
+                        # If status for one snapshot is error, make sure
+                        # the status for the cgsnapshot is also error
+                        if model_update['status'] != 'error':
+                            model_update['status'] = snapshot['status']
+
+            if model_update:
+                if model_update['status'] == 'error':
+                    msg = (_('Error occurred when creating cgsnapshot '
+                             '%s.') % cgsnapshot_ref['id'])
+                    LOG.error(msg)
+                    raise exception.VolumeDriverException(message=msg)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.cgsnapshot_update(context,
+                                          cgsnapshot_ref['id'],
+                                          {'status': 'error'})
+
+        for snapshot in snapshots:
+            volume_id = snapshot['volume_id']
+            snapshot_id = snapshot['id']
+            vol_ref = self.db.volume_get(context, volume_id)
+            if vol_ref.bootable:
+                try:
+                    self.db.volume_glance_metadata_copy_to_snapshot(
+                        context, snapshot['id'], volume_id)
+                except exception.CinderException as ex:
+                    LOG.error(_("Failed updating %(snapshot_id)s"
+                                " metadata using the provided volumes"
+                                " %(volume_id)s metadata") %
+                              {'volume_id': volume_id,
+                               'snapshot_id': snapshot_id})
+                    self.db.snapshot_update(context,
+                                            snapshot['id'],
+                                            {'status': 'error'})
+                    raise exception.MetadataCopyFailure(reason=ex)
+
+            self.db.snapshot_update(context,
+                                    snapshot['id'], {'status': 'available',
+                                                     'progress': '100%'})
+
+        self.db.cgsnapshot_update(context,
+                                  cgsnapshot_ref['id'],
+                                  {'status': 'available'})
+
+        LOG.info(_("cgsnapshot %s: created successfully"),
+                 cgsnapshot_ref['id'])
+        self._notify_about_cgsnapshot_usage(
+            context, cgsnapshot_ref, "create.end")
+        return cgsnapshot_id
+
+    def delete_cgsnapshot(self, context, cgsnapshot_id):
+        """Deletes cgsnapshot."""
+        caller_context = context
+        context = context.elevated()
+        cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
+        project_id = cgsnapshot_ref['project_id']
+
+        LOG.info(_("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
+
+        snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+                                                            cgsnapshot_id)
+
+        self._notify_about_cgsnapshot_usage(
+            context, cgsnapshot_ref, "delete.start")
+
+        try:
+            utils.require_driver_initialized(self.driver)
+
+            LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
+                      {'cgsnap_id': cgsnapshot_id})
+
+            # Pass context so that drivers that want to use it, can,
+            # but it is not a requirement for all drivers.
+            cgsnapshot_ref['context'] = caller_context
+            for snapshot in snapshots:
+                snapshot['context'] = caller_context
+
+            model_update, snapshots = \
+                self.driver.delete_cgsnapshot(context, cgsnapshot_ref)
+
+            if snapshots:
+                for snapshot in snapshots:
+                    update = {'status': snapshot['status']}
+                    self.db.snapshot_update(context, snapshot['id'],
+                                            update)
+                    if snapshot['status'] in ['error_deleting', 'error'] and \
+                            model_update['status'] not in \
+                            ['error_deleting', 'error']:
+                        model_update['status'] = snapshot['status']
+
+            if model_update:
+                if model_update['status'] in ['error_deleting', 'error']:
+                    msg = (_('Error occurred when deleting cgsnapshot '
+                             '%s.') % cgsnapshot_ref['id'])
+                    LOG.error(msg)
+                    raise exception.VolumeDriverException(message=msg)
+                else:
+                    self.db.cgsnapshot_update(context, cgsnapshot_ref['id'],
+                                              model_update)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.db.cgsnapshot_update(context,
+                                          cgsnapshot_ref['id'],
+                                          {'status': 'error_deleting'})
+
+        for snapshot in snapshots:
+            # Get reservations
+            try:
+                if CONF.no_snapshot_gb_quota:
+                    reserve_opts = {'snapshots': -1}
+                else:
+                    reserve_opts = {
+                        'snapshots': -1,
+                        'gigabytes': -snapshot['volume_size'],
+                    }
+                volume_ref = self.db.volume_get(context, snapshot['volume_id'])
+                QUOTAS.add_volume_type_opts(context,
+                                            reserve_opts,
+                                            volume_ref.get('volume_type_id'))
+                reservations = QUOTAS.reserve(context,
+                                              project_id=project_id,
+                                              **reserve_opts)
+
+            except Exception:
+                reservations = None
+                LOG.exception(_("Failed to update usages deleting snapshot"))
+
+            self.db.volume_glance_metadata_delete_by_snapshot(context,
+                                                              snapshot['id'])
+            self.db.snapshot_destroy(context, snapshot['id'])
+
+            # Commit the reservations
+            if reservations:
+                QUOTAS.commit(context, reservations, project_id=project_id)
+
+        self.db.cgsnapshot_destroy(context, cgsnapshot_id)
+        LOG.info(_("cgsnapshot %s: deleted successfully"),
+                 cgsnapshot_ref['id'])
+        self._notify_about_cgsnapshot_usage(
+            context, cgsnapshot_ref, "delete.end")
+
+        return True
index 63c10ed986a6f26511ddd6e60483c9946806a880..dd047e0d1def502be2bc737f9681e39438c4a771 100644 (file)
@@ -53,6 +53,9 @@ class VolumeAPI(object):
         1.16 - Removes create_export.
         1.17 - Add replica option to create_volume, promote_replica and
                sync_replica.
+        1.18 - Adds create_consistencygroup, delete_consistencygroup,
+               create_cgsnapshot, and delete_cgsnapshot. Also adds
+               the consistencygroup_id parameter in create_volume.
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -61,14 +64,37 @@ class VolumeAPI(object):
         super(VolumeAPI, self).__init__()
         target = messaging.Target(topic=CONF.volume_topic,
                                   version=self.BASE_RPC_API_VERSION)
-        self.client = rpc.get_client(target, '1.17')
+        self.client = rpc.get_client(target, '1.18')
+
+    def create_consistencygroup(self, ctxt, group, host):
+        cctxt = self.client.prepare(server=host, version='1.18')
+        cctxt.cast(ctxt, 'create_consistencygroup',
+                   group_id=group['id'])
+
+    def delete_consistencygroup(self, ctxt, group):
+        cctxt = self.client.prepare(server=group['host'], version='1.18')
+        cctxt.cast(ctxt, 'delete_consistencygroup',
+                   group_id=group['id'])
+
+    def create_cgsnapshot(self, ctxt, group, cgsnapshot):
+
+        cctxt = self.client.prepare(server=group['host'], version='1.18')
+        cctxt.cast(ctxt, 'create_cgsnapshot',
+                   group_id=group['id'],
+                   cgsnapshot_id=cgsnapshot['id'])
+
+    def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
+        cctxt = self.client.prepare(server=host, version='1.18')
+        cctxt.cast(ctxt, 'delete_cgsnapshot',
+                   cgsnapshot_id=cgsnapshot['id'])
 
     def create_volume(self, ctxt, volume, host,
                       request_spec, filter_properties,
                       allow_reschedule=True,
                       snapshot_id=None, image_id=None,
                       source_replicaid=None,
-                      source_volid=None):
+                      source_volid=None,
+                      consistencygroup_id=None):
 
         cctxt = self.client.prepare(server=host, version='1.4')
         request_spec_p = jsonutils.to_primitive(request_spec)
@@ -80,7 +106,8 @@ class VolumeAPI(object):
                    snapshot_id=snapshot_id,
                    image_id=image_id,
                    source_replicaid=source_replicaid,
-                   source_volid=source_volid),
+                   source_volid=source_volid,
+                   consistencygroup_id=consistencygroup_id)
 
     def delete_volume(self, ctxt, volume, unmanage_only=False):
         cctxt = self.client.prepare(server=volume['host'], version='1.15')
index 37c17db2e8e5362bc200667c7f9c2a0966b28b42..94aa5dcb20430ffaf6385ebed18544d747ea316c 100644 (file)
@@ -147,6 +147,69 @@ def notify_about_replication_error(context, volume, suffix,
                                                 usage_info)
 
 
+def _usage_from_consistencygroup(context, group_ref, **kw):
+    usage_info = dict(tenant_id=group_ref['project_id'],
+                      user_id=group_ref['user_id'],
+                      availability_zone=group_ref['availability_zone'],
+                      consistencygroup_id=group_ref['id'],
+                      name=group_ref['name'],
+                      created_at=null_safe_str(group_ref['created_at']),
+                      status=group_ref['status'])
+
+    usage_info.update(kw)
+    return usage_info
+
+
+def notify_about_consistencygroup_usage(context, group, event_suffix,
+                                        extra_usage_info=None, host=None):
+    if not host:
+        host = CONF.host
+
+    if not extra_usage_info:
+        extra_usage_info = {}
+
+    usage_info = _usage_from_consistencygroup(context,
+                                              group,
+                                              **extra_usage_info)
+
+    rpc.get_notifier("consistencygroup", host).info(
+        context,
+        'consistencygroup.%s' % event_suffix,
+        usage_info)
+
+
+def _usage_from_cgsnapshot(context, cgsnapshot_ref, **kw):
+    usage_info = dict(
+        tenant_id=cgsnapshot_ref['project_id'],
+        user_id=cgsnapshot_ref['user_id'],
+        cgsnapshot_id=cgsnapshot_ref['id'],
+        name=cgsnapshot_ref['name'],
+        consistencygroup_id=cgsnapshot_ref['consistencygroup_id'],
+        created_at=null_safe_str(cgsnapshot_ref['created_at']),
+        status=cgsnapshot_ref['status'])
+
+    usage_info.update(kw)
+    return usage_info
+
+
+def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
+                                  extra_usage_info=None, host=None):
+    if not host:
+        host = CONF.host
+
+    if not extra_usage_info:
+        extra_usage_info = {}
+
+    usage_info = _usage_from_cgsnapshot(context,
+                                        cgsnapshot,
+                                        **extra_usage_info)
+
+    rpc.get_notifier("cgsnapshot", host).info(
+        context,
+        'cgsnapshot.%s' % event_suffix,
+        usage_info)
+
+
 def setup_blkio_cgroup(srcpath, dstpath, bps_limit, execute=utils.execute):
     if not bps_limit:
         LOG.debug('Not using bps rate limiting on volume copy')
index 97d156bb8aec57eb8b40c9c76585b5be9855a277..0d76e3eb829adae89703d05eff0b8c9e3a981179 100644 (file)
 # value)
 #quota_snapshots=10
 
+# Number of consistencygroups allowed per project (integer
+# value)
+#quota_consistencygroups=10
+
 # Total amount of storage, in gigabytes, allowed for volumes
 # and snapshots per project (integer value)
 #quota_gigabytes=1000
 # (string value)
 #replication_api_class=cinder.replication.api.API
 
+# The full class name of the consistencygroup API class
+# (string value)
+#consistencygroup_api_class=cinder.consistencygroup.api.API
+
 
 #
 # Options defined in cinder.compute
index 3ec7fb48401c29128597a60850d5653a8d7ca880..3eecf4dcb665840b8a4c6eb8d5f839c9d7b5c326 100644 (file)
     "backup:backup-import": [["rule:admin_api"]],
     "backup:backup-export": [["rule:admin_api"]],
 
-    "snapshot_extension:snapshot_actions:update_snapshot_status": []
+    "snapshot_extension:snapshot_actions:update_snapshot_status": [],
+
+    "consistencygroup:create" : [["group:nobody"]],
+    "consistencygroup:delete": [["group:nobody"]],
+    "consistencygroup:get": [["group:nobody"]],
+    "consistencygroup:get_all": [["group:nobody"]],
+
+    "consistencygroup:create_cgsnapshot" : [],
+    "consistencygroup:delete_cgsnapshot": [],
+    "consistencygroup:get_cgsnapshot": [],
+    "consistencygroup:get_all_cgsnapshots": []
 }