--- /dev/null
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Module dedicated functions/classes dealing with rate limiting requests.
+"""
+
+import collections
+import copy
+import httplib
+import math
+import re
+import time
+
+import webob.dec
+import webob.exc
+
+from cinder.api.openstack import wsgi
+from cinder.api.views import limits as limits_views
+from cinder.api import xmlutil
+from cinder.openstack.common import importutils
+from cinder.openstack.common import jsonutils
+from cinder import quota
+from cinder import wsgi as base_wsgi
+
+QUOTAS = quota.QUOTAS
+
+
+# Convenience constants for the limits dictionary passed to Limiter().
+PER_SECOND = 1
+PER_MINUTE = 60
+PER_HOUR = 60 * 60
+PER_DAY = 60 * 60 * 24
+
+
+limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
+
+
+class LimitsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('limits', selector='limits')
+
+ rates = xmlutil.SubTemplateElement(root, 'rates')
+ rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
+ rate.set('uri', 'uri')
+ rate.set('regex', 'regex')
+ limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
+ limit.set('value', 'value')
+ limit.set('verb', 'verb')
+ limit.set('remaining', 'remaining')
+ limit.set('unit', 'unit')
+ limit.set('next-available', 'next-available')
+
+ absolute = xmlutil.SubTemplateElement(root, 'absolute',
+ selector='absolute')
+ limit = xmlutil.SubTemplateElement(absolute, 'limit',
+ selector=xmlutil.get_items)
+ limit.set('name', 0)
+ limit.set('value', 1)
+
+ return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
+
+
+class LimitsController(object):
+ """
+ Controller for accessing limits in the OpenStack API.
+ """
+
+ @wsgi.serializers(xml=LimitsTemplate)
+ def index(self, req):
+ """
+ Return all global and rate limit information.
+ """
+ context = req.environ['cinder.context']
+ quotas = QUOTAS.get_project_quotas(context, context.project_id,
+ usages=False)
+ abs_limits = dict((k, v['limit']) for k, v in quotas.items())
+ rate_limits = req.environ.get("cinder.limits", [])
+
+ builder = self._get_view_builder(req)
+ return builder.build(rate_limits, abs_limits)
+
+ def _get_view_builder(self, req):
+ return limits_views.ViewBuilder()
+
+
+def create_resource():
+ return wsgi.Resource(LimitsController())
+
+
+class Limit(object):
+ """
+ Stores information about a limit for HTTP requests.
+ """
+
+ UNITS = {
+ 1: "SECOND",
+ 60: "MINUTE",
+ 60 * 60: "HOUR",
+ 60 * 60 * 24: "DAY",
+ }
+
+ UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
+
+ def __init__(self, verb, uri, regex, value, unit):
+ """
+ Initialize a new `Limit`.
+
+ @param verb: HTTP verb (POST, PUT, etc.)
+ @param uri: Human-readable URI
+ @param regex: Regular expression format for this limit
+ @param value: Integer number of requests which can be made
+ @param unit: Unit of measure for the value parameter
+ """
+ self.verb = verb
+ self.uri = uri
+ self.regex = regex
+ self.value = int(value)
+ self.unit = unit
+ self.unit_string = self.display_unit().lower()
+ self.remaining = int(value)
+
+ if value <= 0:
+ raise ValueError("Limit value must be > 0")
+
+ self.last_request = None
+ self.next_request = None
+
+ self.water_level = 0
+ self.capacity = self.unit
+ self.request_value = float(self.capacity) / float(self.value)
+ msg = _("Only %(value)s %(verb)s request(s) can be "
+ "made to %(uri)s every %(unit_string)s.")
+ self.error_message = msg % self.__dict__
+
+ def __call__(self, verb, url):
+ """
+ Represents a call to this limit from a relevant request.
+
+ @param verb: string http verb (POST, GET, etc.)
+ @param url: string URL
+ """
+ if self.verb != verb or not re.match(self.regex, url):
+ return
+
+ now = self._get_time()
+
+ if self.last_request is None:
+ self.last_request = now
+
+ leak_value = now - self.last_request
+
+ self.water_level -= leak_value
+ self.water_level = max(self.water_level, 0)
+ self.water_level += self.request_value
+
+ difference = self.water_level - self.capacity
+
+ self.last_request = now
+
+ if difference > 0:
+ self.water_level -= self.request_value
+ self.next_request = now + difference
+ return difference
+
+ cap = self.capacity
+ water = self.water_level
+ val = self.value
+
+ self.remaining = math.floor(((cap - water) / cap) * val)
+ self.next_request = now
+
+ def _get_time(self):
+ """Retrieve the current time. Broken out for testability."""
+ return time.time()
+
+ def display_unit(self):
+ """Display the string name of the unit."""
+ return self.UNITS.get(self.unit, "UNKNOWN")
+
+ def display(self):
+ """Return a useful representation of this class."""
+ return {
+ "verb": self.verb,
+ "URI": self.uri,
+ "regex": self.regex,
+ "value": self.value,
+ "remaining": int(self.remaining),
+ "unit": self.display_unit(),
+ "resetTime": int(self.next_request or self._get_time()),
+ }
+
+# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
+# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
+
+DEFAULT_LIMITS = [
+ Limit("POST", "*", ".*", 10, PER_MINUTE),
+ Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
+ Limit("PUT", "*", ".*", 10, PER_MINUTE),
+ Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
+ Limit("DELETE", "*", ".*", 100, PER_MINUTE),
+]
+
+
+class RateLimitingMiddleware(base_wsgi.Middleware):
+ """
+ Rate-limits requests passing through this middleware. All limit information
+ is stored in memory for this implementation.
+ """
+
+ def __init__(self, application, limits=None, limiter=None, **kwargs):
+ """
+ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
+ application and sets up the given limits.
+
+ @param application: WSGI application to wrap
+ @param limits: String describing limits
+ @param limiter: String identifying class for representing limits
+
+ Other parameters are passed to the constructor for the limiter.
+ """
+ base_wsgi.Middleware.__init__(self, application)
+
+ # Select the limiter class
+ if limiter is None:
+ limiter = Limiter
+ else:
+ limiter = importutils.import_class(limiter)
+
+ # Parse the limits, if any are provided
+ if limits is not None:
+ limits = limiter.parse_limits(limits)
+
+ self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """
+ Represents a single call through this middleware. We should record the
+ request if we have a limit relevant to it. If no limit is relevant to
+ the request, ignore it.
+
+ If the request should be rate limited, return a fault telling the user
+ they are over the limit and need to retry later.
+ """
+ verb = req.method
+ url = req.url
+ context = req.environ.get("cinder.context")
+
+ if context:
+ username = context.user_id
+ else:
+ username = None
+
+ delay, error = self._limiter.check_for_delay(verb, url, username)
+
+ if delay:
+ msg = _("This request was rate-limited.")
+ retry = time.time() + delay
+ return wsgi.OverLimitFault(msg, error, retry)
+
+ req.environ["cinder.limits"] = self._limiter.get_limits(username)
+
+ return self.application
+
+
+class Limiter(object):
+ """
+ Rate-limit checking class which handles limits in memory.
+ """
+
+ def __init__(self, limits, **kwargs):
+ """
+ Initialize the new `Limiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self.limits = copy.deepcopy(limits)
+ self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
+
+ # Pick up any per-user limit information
+ for key, value in kwargs.items():
+ if key.startswith('user:'):
+ username = key[5:]
+ self.levels[username] = self.parse_limits(value)
+
+ def get_limits(self, username=None):
+ """
+ Return the limits for a given user.
+ """
+ return [limit.display() for limit in self.levels[username]]
+
+ def check_for_delay(self, verb, url, username=None):
+ """
+ Check the given verb/user/user triplet for limit.
+
+ @return: Tuple of delay (in seconds) and error message (or None, None)
+ """
+ delays = []
+
+ for limit in self.levels[username]:
+ delay = limit(verb, url)
+ if delay:
+ delays.append((delay, limit.error_message))
+
+ if delays:
+ delays.sort()
+ return delays[0]
+
+ return None, None
+
+ # Note: This method gets called before the class is instantiated,
+ # so this must be either a static method or a class method. It is
+ # used to develop a list of limits to feed to the constructor. We
+ # put this in the class so that subclasses can override the
+ # default limit parsing.
+ @staticmethod
+ def parse_limits(limits):
+ """
+ Convert a string into a list of Limit instances. This
+ implementation expects a semicolon-separated sequence of
+ parenthesized groups, where each group contains a
+ comma-separated sequence consisting of HTTP method,
+ user-readable URI, a URI reg-exp, an integer number of
+ requests which can be made, and a unit of measure. Valid
+ values for the latter are "SECOND", "MINUTE", "HOUR", and
+ "DAY".
+
+ @return: List of Limit instances.
+ """
+
+ # Handle empty limit strings
+ limits = limits.strip()
+ if not limits:
+ return []
+
+ # Split up the limits by semicolon
+ result = []
+ for group in limits.split(';'):
+ group = group.strip()
+ if group[:1] != '(' or group[-1:] != ')':
+ raise ValueError("Limit rules must be surrounded by "
+ "parentheses")
+ group = group[1:-1]
+
+ # Extract the Limit arguments
+ args = [a.strip() for a in group.split(',')]
+ if len(args) != 5:
+ raise ValueError("Limit rules must contain the following "
+ "arguments: verb, uri, regex, value, unit")
+
+ # Pull out the arguments
+ verb, uri, regex, value, unit = args
+
+ # Upper-case the verb
+ verb = verb.upper()
+
+ # Convert value--raises ValueError if it's not integer
+ value = int(value)
+
+ # Convert unit
+ unit = unit.upper()
+ if unit not in Limit.UNIT_MAP:
+ raise ValueError("Invalid units specified")
+ unit = Limit.UNIT_MAP[unit]
+
+ # Build a limit
+ result.append(Limit(verb, uri, regex, value, unit))
+
+ return result
+
+
+class WsgiLimiter(object):
+ """
+ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
+
+ To use, POST ``/<username>`` with JSON data such as::
+
+ {
+ "verb" : GET,
+ "path" : "/servers"
+ }
+
+ and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
+ header containing the number of seconds to wait before the action would
+ succeed.
+ """
+
+ def __init__(self, limits=None):
+ """
+ Initialize the new `WsgiLimiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self._limiter = Limiter(limits or DEFAULT_LIMITS)
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, request):
+ """
+ Handles a call to this application. Returns 204 if the request is
+ acceptable to the limiter, else a 403 is returned with a relevant
+ header indicating when the request *will* succeed.
+ """
+ if request.method != "POST":
+ raise webob.exc.HTTPMethodNotAllowed()
+
+ try:
+ info = dict(jsonutils.loads(request.body))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest()
+
+ username = request.path_info_pop()
+ verb = info.get("verb")
+ path = info.get("path")
+
+ delay, error = self._limiter.check_for_delay(verb, path, username)
+
+ if delay:
+ headers = {"X-Wait-Seconds": "%.2f" % delay}
+ return webob.exc.HTTPForbidden(headers=headers, explanation=error)
+ else:
+ return webob.exc.HTTPNoContent()
+
+
+class WsgiLimiterProxy(object):
+ """
+ Rate-limit requests based on answers from a remote source.
+ """
+
+ def __init__(self, limiter_address):
+ """
+ Initialize the new `WsgiLimiterProxy`.
+
+ @param limiter_address: IP/port combination of where to request limit
+ """
+ self.limiter_address = limiter_address
+
+ def check_for_delay(self, verb, path, username=None):
+ body = jsonutils.dumps({"verb": verb, "path": path})
+ headers = {"Content-Type": "application/json"}
+
+ conn = httplib.HTTPConnection(self.limiter_address)
+
+ if username:
+ conn.request("POST", "/%s" % (username), body, headers)
+ else:
+ conn.request("POST", "/", body, headers)
+
+ resp = conn.getresponse()
+
+ if 200 >= resp.status < 300:
+ return None, None
+
+ return resp.getheader("X-Wait-Seconds"), resp.read() or None
+
+ # Note: This method gets called before the class is instantiated,
+ # so this must be either a static method or a class method. It is
+ # used to develop a list of limits to feed to the constructor.
+ # This implementation returns an empty list, since all limit
+ # decisions are made by a remote server.
+ @staticmethod
+ def parse_limits(limits):
+ """
+ Ignore a limits string--simply doesn't apply for the limit
+ proxy.
+
+ @return: Empty list.
+ """
+
+ return []
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+WSGI middleware for OpenStack Volume API.
+"""
+
+from cinder.api import extensions
+import cinder.api.openstack
+from cinder.api.v2 import limits
+from cinder.api.v2 import snapshots
+from cinder.api.v2 import types
+from cinder.api.v2 import volumes
+from cinder.api import versions
+from cinder.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class APIRouter(cinder.api.openstack.APIRouter):
+ """
+ Routes requests on the OpenStack API to the appropriate controller
+ and method.
+ """
+ ExtensionManager = extensions.ExtensionManager
+
+ def _setup_routes(self, mapper, ext_mgr):
+ self.resources['versions'] = versions.create_resource()
+ mapper.connect("versions", "/",
+ controller=self.resources['versions'],
+ action='show')
+
+ mapper.redirect("", "/")
+
+ self.resources['volumes'] = volumes.create_resource(ext_mgr)
+ mapper.resource("volume", "volumes",
+ controller=self.resources['volumes'],
+ collection={'detail': 'GET'},
+ member={'action': 'POST'})
+
+ self.resources['types'] = types.create_resource()
+ mapper.resource("type", "types",
+ controller=self.resources['types'])
+
+ self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
+ mapper.resource("snapshot", "snapshots",
+ controller=self.resources['snapshots'],
+ collection={'detail': 'GET'},
+ member={'action': 'POST'})
+
+ self.resources['limits'] = limits.create_resource()
+ mapper.resource("limit", "limits",
+ controller=self.resources['limits'])
--- /dev/null
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The volumes snapshots api."""
+
+import webob
+from webob import exc
+
+from cinder.api import common
+from cinder.api.openstack import wsgi
+from cinder.api.v2 import volumes
+from cinder.api import xmlutil
+from cinder import exception
+from cinder import flags
+from cinder.openstack.common import log as logging
+from cinder import utils
+from cinder import volume
+
+
+LOG = logging.getLogger(__name__)
+
+
+FLAGS = flags.FLAGS
+
+
+def _translate_snapshot_detail_view(context, snapshot):
+ """Maps keys for snapshots details view."""
+
+ d = _translate_snapshot_summary_view(context, snapshot)
+
+ # NOTE(gagupta): No additional data / lookups at the moment
+ return d
+
+
+def _translate_snapshot_summary_view(context, snapshot):
+ """Maps keys for snapshots summary view."""
+ d = {}
+
+ d['id'] = snapshot['id']
+ d['created_at'] = snapshot['created_at']
+ d['display_name'] = snapshot['display_name']
+ d['display_description'] = snapshot['display_description']
+ d['volume_id'] = snapshot['volume_id']
+ d['status'] = snapshot['status']
+ d['size'] = snapshot['volume_size']
+
+ return d
+
+
+def make_snapshot(elem):
+ elem.set('id')
+ elem.set('status')
+ elem.set('size')
+ elem.set('created_at')
+ elem.set('display_name')
+ elem.set('display_description')
+ elem.set('volume_id')
+
+
+class SnapshotTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('snapshot', selector='snapshot')
+ make_snapshot(root)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class SnapshotsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('snapshots')
+ elem = xmlutil.SubTemplateElement(root, 'snapshot',
+ selector='snapshots')
+ make_snapshot(elem)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class SnapshotsController(wsgi.Controller):
+ """The Volumes API controller for the OpenStack API."""
+
+ def __init__(self, ext_mgr=None):
+ self.volume_api = volume.API()
+ self.ext_mgr = ext_mgr
+ super(SnapshotsController, self).__init__()
+
+ @wsgi.serializers(xml=SnapshotTemplate)
+ def show(self, req, id):
+ """Return data about the given snapshot."""
+ context = req.environ['cinder.context']
+
+ try:
+ vol = self.volume_api.get_snapshot(context, id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ return {'snapshot': _translate_snapshot_detail_view(context, vol)}
+
+ def delete(self, req, id):
+ """Delete a snapshot."""
+ context = req.environ['cinder.context']
+
+ LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
+
+ try:
+ snapshot = self.volume_api.get_snapshot(context, id)
+ self.volume_api.delete_snapshot(context, snapshot)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+ return webob.Response(status_int=202)
+
+ @wsgi.serializers(xml=SnapshotsTemplate)
+ def index(self, req):
+ """Returns a summary list of snapshots."""
+ return self._items(req, entity_maker=_translate_snapshot_summary_view)
+
+ @wsgi.serializers(xml=SnapshotsTemplate)
+ def detail(self, req):
+ """Returns a detailed list of snapshots."""
+ return self._items(req, entity_maker=_translate_snapshot_detail_view)
+
+ def _items(self, req, entity_maker):
+ """Returns a list of snapshots, transformed through entity_maker."""
+ context = req.environ['cinder.context']
+
+ search_opts = {}
+ search_opts.update(req.GET)
+ allowed_search_options = ('status', 'volume_id', 'display_name')
+ volumes.remove_invalid_options(context, search_opts,
+ allowed_search_options)
+
+ snapshots = self.volume_api.get_all_snapshots(context,
+ search_opts=search_opts)
+ limited_list = common.limited(snapshots, req)
+ res = [entity_maker(context, snapshot) for snapshot in limited_list]
+ return {'snapshots': res}
+
+ @wsgi.serializers(xml=SnapshotTemplate)
+ def create(self, req, body):
+ """Creates a new snapshot."""
+ context = req.environ['cinder.context']
+
+ if not self.is_valid_body(body, 'snapshot'):
+ raise exc.HTTPUnprocessableEntity()
+
+ snapshot = body['snapshot']
+ volume_id = snapshot['volume_id']
+ volume = self.volume_api.get(context, volume_id)
+ force = snapshot.get('force', False)
+ msg = _("Create snapshot from volume %s")
+ LOG.audit(msg, volume_id, context=context)
+
+ if not utils.is_valid_boolstr(force):
+ msg = _("Invalid value '%s' for force. ") % force
+ raise exception.InvalidParameterValue(err=msg)
+
+ if utils.bool_from_str(force):
+ new_snapshot = self.volume_api.create_snapshot_force(context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
+ else:
+ new_snapshot = self.volume_api.create_snapshot(context,
+ volume,
+ snapshot.get('display_name'),
+ snapshot.get('display_description'))
+
+ retval = _translate_snapshot_detail_view(context, new_snapshot)
+
+ return {'snapshot': retval}
+
+ @wsgi.serializers(xml=SnapshotTemplate)
+ def update(self, req, id, body):
+ """Update a snapshot."""
+ context = req.environ['cinder.context']
+
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ if not 'snapshot' in body:
+ raise exc.HTTPUnprocessableEntity()
+
+ snapshot = body['snapshot']
+ update_dict = {}
+
+ valid_update_keys = (
+ 'display_name',
+ 'display_description',
+ )
+
+ for key in valid_update_keys:
+ if key in snapshot:
+ update_dict[key] = snapshot[key]
+
+ try:
+ snapshot = self.volume_api.get_snapshot(context, id)
+ self.volume_api.update_snapshot(context, snapshot, update_dict)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ snapshot.update(update_dict)
+
+ return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
+
+
+def create_resource(ext_mgr):
+ return wsgi.Resource(SnapshotsController(ext_mgr))
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The volume type & volume types extra specs extension"""
+
+from webob import exc
+
+from cinder.api.openstack import wsgi
+from cinder.api.views import types as views_types
+from cinder.api import xmlutil
+from cinder import exception
+from cinder.volume import volume_types
+
+
+def make_voltype(elem):
+ elem.set('id')
+ elem.set('name')
+ extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
+ elem.append(extra_specs)
+
+
+class VolumeTypeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('volume_type', selector='volume_type')
+ make_voltype(root)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class VolumeTypesTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('volume_types')
+ elem = xmlutil.SubTemplateElement(root, 'volume_type',
+ selector='volume_types')
+ make_voltype(elem)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class VolumeTypesController(wsgi.Controller):
+ """ The volume types API controller for the OpenStack API """
+
+ _view_builder_class = views_types.ViewBuilder
+
+ @wsgi.serializers(xml=VolumeTypesTemplate)
+ def index(self, req):
+ """ Returns the list of volume types """
+ context = req.environ['cinder.context']
+ vol_types = volume_types.get_all_types(context).values()
+ return self._view_builder.index(req, vol_types)
+
+ @wsgi.serializers(xml=VolumeTypeTemplate)
+ def show(self, req, id):
+ """ Return a single volume type item """
+ context = req.environ['cinder.context']
+
+ try:
+ vol_type = volume_types.get_volume_type(context, id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ # TODO(bcwaldon): remove str cast once we use uuids
+ vol_type['id'] = str(vol_type['id'])
+ return self._view_builder.show(req, vol_type)
+
+
+def create_resource():
+ return wsgi.Resource(VolumeTypesController())
--- /dev/null
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The volumes api."""
+
+import webob
+from webob import exc
+from xml.dom import minidom
+
+from cinder.api import common
+from cinder.api.openstack import wsgi
+from cinder.api import xmlutil
+from cinder import exception
+from cinder import flags
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import uuidutils
+from cinder import volume
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+
+FLAGS = flags.FLAGS
+
+
+def _translate_attachment_detail_view(_context, vol):
+ """Maps keys for attachment details view."""
+
+ d = _translate_attachment_summary_view(_context, vol)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_attachment_summary_view(_context, vol):
+ """Maps keys for attachment summary view."""
+ d = {}
+
+ volume_id = vol['id']
+
+ # NOTE(justinsb): We use the volume id as the id of the attachment object
+ d['id'] = volume_id
+
+ d['volume_id'] = volume_id
+ d['server_id'] = vol['instance_uuid']
+ if vol.get('mountpoint'):
+ d['device'] = vol['mountpoint']
+
+ return d
+
+
+def _translate_volume_detail_view(context, vol, image_id=None):
+ """Maps keys for volumes details view."""
+
+ d = _translate_volume_summary_view(context, vol, image_id)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_volume_summary_view(context, vol, image_id=None):
+ """Maps keys for volumes summary view."""
+ d = {}
+
+ d['id'] = vol['id']
+ d['status'] = vol['status']
+ d['size'] = vol['size']
+ d['availability_zone'] = vol['availability_zone']
+ d['created_at'] = vol['created_at']
+
+ d['attachments'] = []
+ if vol['attach_status'] == 'attached':
+ attachment = _translate_attachment_detail_view(context, vol)
+ d['attachments'].append(attachment)
+
+ d['display_name'] = vol['display_name']
+ d['display_description'] = vol['display_description']
+
+ if vol['volume_type_id'] and vol.get('volume_type'):
+ d['volume_type'] = vol['volume_type']['name']
+ else:
+ # TODO(bcwaldon): remove str cast once we use uuids
+ d['volume_type'] = str(vol['volume_type_id'])
+
+ d['snapshot_id'] = vol['snapshot_id']
+
+ if image_id:
+ d['image_id'] = image_id
+
+ LOG.audit(_("vol=%s"), vol, context=context)
+
+ if vol.get('volume_metadata'):
+ metadata = vol.get('volume_metadata')
+ d['metadata'] = dict((item['key'], item['value']) for item in metadata)
+ # avoid circular ref when vol is a Volume instance
+ elif vol.get('metadata') and isinstance(vol.get('metadata'), dict):
+ d['metadata'] = vol['metadata']
+ else:
+ d['metadata'] = {}
+
+ return d
+
+
+def make_attachment(elem):
+ elem.set('id')
+ elem.set('server_id')
+ elem.set('volume_id')
+ elem.set('device')
+
+
+def make_volume(elem):
+ elem.set('id')
+ elem.set('status')
+ elem.set('size')
+ elem.set('availability_zone')
+ elem.set('created_at')
+ elem.set('display_name')
+ elem.set('display_description')
+ elem.set('volume_type')
+ elem.set('snapshot_id')
+
+ attachments = xmlutil.SubTemplateElement(elem, 'attachments')
+ attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
+ selector='attachments')
+ make_attachment(attachment)
+
+ # Attach metadata node
+ elem.append(common.MetadataTemplate())
+
+
+volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM}
+
+
+class VolumeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('volume', selector='volume')
+ make_volume(root)
+ return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
+
+
+class VolumesTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('volumes')
+ elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
+ make_volume(elem)
+ return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
+
+
+class CommonDeserializer(wsgi.MetadataXMLDeserializer):
+ """Common deserializer to handle xml-formatted volume requests.
+
+ Handles standard volume attributes as well as the optional metadata
+ attribute
+ """
+
+ metadata_deserializer = common.MetadataXMLDeserializer()
+
+ def _extract_volume(self, node):
+ """Marshal the volume attribute of a parsed request."""
+ volume = {}
+ volume_node = self.find_first_child_named(node, 'volume')
+
+ attributes = ['display_name', 'display_description', 'size',
+ 'volume_type', 'availability_zone']
+ for attr in attributes:
+ if volume_node.getAttribute(attr):
+ volume[attr] = volume_node.getAttribute(attr)
+
+ metadata_node = self.find_first_child_named(volume_node, 'metadata')
+ if metadata_node is not None:
+ volume['metadata'] = self.extract_metadata(metadata_node)
+
+ return volume
+
+
+class CreateDeserializer(CommonDeserializer):
+ """Deserializer to handle xml-formatted create volume requests.
+
+ Handles standard volume attributes as well as the optional metadata
+ attribute
+ """
+
+ def default(self, string):
+ """Deserialize an xml-formatted volume create request."""
+ dom = minidom.parseString(string)
+ volume = self._extract_volume(dom)
+ return {'body': {'volume': volume}}
+
+
+class VolumeController(wsgi.Controller):
+ """The Volumes API controller for the OpenStack API."""
+
+ def __init__(self, ext_mgr):
+ self.volume_api = volume.API()
+ self.ext_mgr = ext_mgr
+ super(VolumeController, self).__init__()
+
+ @wsgi.serializers(xml=VolumeTemplate)
+ def show(self, req, id):
+ """Return data about the given volume."""
+ context = req.environ['cinder.context']
+
+ try:
+ vol = self.volume_api.get(context, id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ return {'volume': _translate_volume_detail_view(context, vol)}
+
+ def delete(self, req, id):
+ """Delete a volume."""
+ context = req.environ['cinder.context']
+
+ LOG.audit(_("Delete volume with id: %s"), id, context=context)
+
+ try:
+ volume = self.volume_api.get(context, id)
+ self.volume_api.delete(context, volume)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+ return webob.Response(status_int=202)
+
+ @wsgi.serializers(xml=VolumesTemplate)
+ def index(self, req):
+ """Returns a summary list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_summary_view)
+
+ @wsgi.serializers(xml=VolumesTemplate)
+ def detail(self, req):
+ """Returns a detailed list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_detail_view)
+
+ def _items(self, req, entity_maker):
+ """Returns a list of volumes, transformed through entity_maker."""
+
+ search_opts = {}
+ search_opts.update(req.GET)
+
+ context = req.environ['cinder.context']
+ remove_invalid_options(context,
+ search_opts, self._get_volume_search_options())
+
+ volumes = self.volume_api.get_all(context, search_opts=search_opts)
+ limited_list = common.limited(volumes, req)
+ res = [entity_maker(context, vol) for vol in limited_list]
+ return {'volumes': res}
+
+ def _image_uuid_from_href(self, image_href):
+ # If the image href was generated by nova api, strip image_href
+ # down to an id.
+ try:
+ image_uuid = image_href.split('/').pop()
+ except (TypeError, AttributeError):
+ msg = _("Invalid imageRef provided.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if not uuidutils.is_uuid_like(image_uuid):
+ msg = _("Invalid imageRef provided.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return image_uuid
+
+ @wsgi.serializers(xml=VolumeTemplate)
+ @wsgi.deserializers(xml=CreateDeserializer)
+ def create(self, req, body):
+ """Creates a new volume."""
+ if not self.is_valid_body(body, 'volume'):
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ['cinder.context']
+ volume = body['volume']
+
+ kwargs = {}
+
+ req_volume_type = volume.get('volume_type', None)
+ if req_volume_type:
+ try:
+ kwargs['volume_type'] = volume_types.get_volume_type_by_name(
+ context, req_volume_type)
+ except exception.VolumeTypeNotFound:
+ explanation = 'Volume type not found.'
+ raise exc.HTTPNotFound(explanation=explanation)
+
+ kwargs['metadata'] = volume.get('metadata', None)
+
+ snapshot_id = volume.get('snapshot_id')
+ if snapshot_id is not None:
+ kwargs['snapshot'] = self.volume_api.get_snapshot(context,
+ snapshot_id)
+ else:
+ kwargs['snapshot'] = None
+
+ size = volume.get('size', None)
+ if size is None and kwargs['snapshot'] is not None:
+ size = kwargs['snapshot']['volume_size']
+
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
+
+ image_href = None
+ image_uuid = None
+ if self.ext_mgr.is_loaded('os-image-create'):
+ image_href = volume.get('imageRef')
+ if snapshot_id and image_href:
+ msg = _("Snapshot and image cannot be specified together.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if image_href:
+ image_uuid = self._image_uuid_from_href(image_href)
+ kwargs['image_id'] = image_uuid
+
+ kwargs['availability_zone'] = volume.get('availability_zone', None)
+
+ new_volume = self.volume_api.create(context,
+ size,
+ volume.get('display_name'),
+ volume.get('display_description'),
+ **kwargs)
+
+ # TODO(vish): Instance should be None at db layer instead of
+ # trying to lazy load, but for now we turn it into
+ # a dict to avoid an error.
+ retval = _translate_volume_detail_view(context,
+ dict(new_volume.iteritems()),
+ image_uuid)
+
+ return {'volume': retval}
+
+ def _get_volume_search_options(self):
+ """Return volume search options allowed by non-admin."""
+ return ('display_name', 'status')
+
+ @wsgi.serializers(xml=VolumeTemplate)
+ def update(self, req, id, body):
+ """Update a volume."""
+ context = req.environ['cinder.context']
+
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ if not 'volume' in body:
+ raise exc.HTTPUnprocessableEntity()
+
+ volume = body['volume']
+ update_dict = {}
+
+ valid_update_keys = (
+ 'display_name',
+ 'display_description',
+ 'metadata',
+ )
+
+ for key in valid_update_keys:
+ if key in volume:
+ update_dict[key] = volume[key]
+
+ try:
+ volume = self.volume_api.get(context, id)
+ self.volume_api.update(context, volume, update_dict)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ volume.update(update_dict)
+
+ return {'volume': _translate_volume_detail_view(context, volume)}
+
+
+def create_resource(ext_mgr):
+ return wsgi.Resource(VolumeController(ext_mgr))
+
+
+def remove_invalid_options(context, search_options, allowed_search_options):
+ """Remove search options that are not valid for non-admin API/context."""
+ if context.is_admin:
+ # Allow all options
+ return
+ # Otherwise, strip out all unknown options
+ unknown_options = [opt for opt in search_options
+ if opt not in allowed_search_options]
+ bad_options = ", ".join(unknown_options)
+ log_msg = _("Removing options '%(bad_options)s' from query") % locals()
+ LOG.debug(log_msg)
+ for opt in unknown_options:
+ del search_options[opt]
VERSIONS = {
+ "v2.0": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2012-11-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://jorgew.github.com/block-storage-api/"
+ "content/os-block-storage-1.0.pdf",
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ #(anthony) FIXME
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/application.wadl",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.volume+xml;version=1",
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.volume+json;version=1",
+ }
+ ],
+ },
"v1.0": {
"id": "v1.0",
"status": "CURRENT",
}
],
}
+
}
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1'
+XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/'
+ 'content')
def validate_schema(xml, schema_name):
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='the topic volume nodes listen on'),
- cfg.BoolOpt('enable_v1_api', default=True,
- help=_("Deploy v1 of the Cinder API")),
- cfg.BoolOpt('enable_v2_api', default=True,
- help=_("Deploy v2 of the Cinder API")),
+ cfg.BoolOpt('enable_v1_api',
+ default=True,
+ help=_("Deploy v1 of the Cinder API. ")),
+ cfg.BoolOpt('enable_v2_api',
+ default=True,
+ help=_("Deploy v2 of the Cinder API. ")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='whether to rate limit the api'),
--- /dev/null
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests dealing with HTTP rate-limiting.
+"""
+
+import httplib
+import StringIO
+
+from lxml import etree
+import webob
+from xml.dom import minidom
+
+from cinder.api.v2 import limits
+from cinder.api import views
+from cinder.api import xmlutil
+import cinder.context
+from cinder.openstack.common import jsonutils
+from cinder import test
+
+
+TEST_LIMITS = [
+ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
+ limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
+ limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
+ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
+ limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
+]
+NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/common/api/v1.0',
+}
+
+
+class BaseLimitTestSuite(test.TestCase):
+ """Base test suite which provides relevant stubs and time abstraction."""
+
+ def setUp(self):
+ super(BaseLimitTestSuite, self).setUp()
+ self.time = 0.0
+ self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+ self.absolute_limits = {}
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return dict((k, dict(limit=v))
+ for k, v in self.absolute_limits.items())
+
+ self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+
+ def _get_time(self):
+ """Return the "time" according to this test suite."""
+ return self.time
+
+
+class LimitsControllerTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.LimitsController` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimitsControllerTest, self).setUp()
+ self.controller = limits.create_resource()
+
+ def _get_index_request(self, accept_header="application/json"):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ context = cinder.context.RequestContext('testuser', 'testproject')
+ request.environ["cinder.context"] = context
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "changes-since*", "changes-since",
+ 5, 60).display(),
+ ]
+ request.environ["cinder.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ """Test getting empty limit details in JSON."""
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits(request)
+ self.absolute_limits = {
+ 'gigabytes': 512,
+ 'volumes': 5,
+ }
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ {
+ "verb": "POST",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "HOUR",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+ {
+ "regex": "changes-since",
+ "uri": "changes-since*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {
+ "maxTotalVolumeGigabytes": 512,
+ "maxTotalVolumes": 5,
+ },
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def _populate_limits_diff_regex(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("GET", "*", "*.*", 10, 60).display(),
+ ]
+ request.environ["cinder.limits"] = _limits
+ return request
+
+ def test_index_diff_regex(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits_diff_regex(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+ {
+ "regex": "*.*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def _test_index_absolute_limits_json(self, expected):
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body['limits']['absolute'])
+
+ def test_index_ignores_extra_absolute_limits_json(self):
+ self.absolute_limits = {'unknown_limit': 9001}
+ self._test_index_absolute_limits_json({})
+
+
+class TestLimiter(limits.Limiter):
+ pass
+
+
+class LimitMiddlewareTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.RateLimitingMiddleware` class.
+ """
+
+ @webob.dec.wsgify
+ def _empty_app(self, request):
+ """Do-nothing WSGI app."""
+ pass
+
+ def setUp(self):
+ """Prepare middleware for use through fake WSGI app."""
+ super(LimitMiddlewareTest, self).setUp()
+ _limits = '(GET, *, .*, 1, MINUTE)'
+ self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
+ "%s.TestLimiter" %
+ self.__class__.__module__)
+
+ def test_limit_class(self):
+ """Test that middleware selected correct limiter class."""
+ assert isinstance(self.app._limiter, TestLimiter)
+
+ def test_good_request(self):
+ """Test successful GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ def test_limited_request_json(self):
+ """Test a rate-limited (413) GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 413)
+
+ self.assertTrue('Retry-After' in response.headers)
+ retry_after = int(response.headers['Retry-After'])
+ self.assertAlmostEqual(retry_after, 60, 1)
+
+ body = jsonutils.loads(response.body)
+ expected = "Only 1 GET request(s) can be made to * every minute."
+ value = body["overLimitFault"]["details"].strip()
+ self.assertEqual(value, expected)
+
+ def test_limited_request_xml(self):
+ """Test a rate-limited (413) response as XML"""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ request.accept = "application/xml"
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 413)
+
+ root = minidom.parseString(response.body).childNodes[0]
+ expected = "Only 1 GET request(s) can be made to * every minute."
+
+ details = root.getElementsByTagName("details")
+ self.assertEqual(details.length, 1)
+
+ value = details.item(0).firstChild.data.strip()
+ self.assertEqual(value, expected)
+
+
+class LimitTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.Limit` class.
+ """
+
+ def test_GET_no_delay(self):
+ """Test a limit handles 1 GET per second."""
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(0, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ def test_GET_delay(self):
+ """Test two calls to 1 GET per second limit."""
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(1, delay)
+ self.assertEqual(1, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ self.time += 4
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(4, limit.next_request)
+ self.assertEqual(4, limit.last_request)
+
+
+class ParseLimitsTest(BaseLimitTestSuite):
+ """
+ Tests for the default limits parser in the in-memory
+ `limits.Limiter` class.
+ """
+
+ def test_invalid(self):
+ """Test that parse_limits() handles invalid input correctly."""
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ ';;;;;')
+
+ def test_bad_rule(self):
+ """Test that parse_limits() handles bad rules correctly."""
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ 'GET, *, .*, 20, minute')
+
+ def test_missing_arg(self):
+ """Test that parse_limits() handles missing args correctly."""
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20)')
+
+ def test_bad_value(self):
+ """Test that parse_limits() handles bad values correctly."""
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, foo, minute)')
+
+ def test_bad_unit(self):
+ """Test that parse_limits() handles bad units correctly."""
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20, lightyears)')
+
+ def test_multiple_rules(self):
+ """Test that parse_limits() handles multiple rules correctly."""
+ try:
+ l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
+ '(PUT, /foo*, /foo.*, 10, hour);'
+ '(POST, /bar*, /bar.*, 5, second);'
+ '(Say, /derp*, /derp.*, 1, day)')
+ except ValueError, e:
+ assert False, str(e)
+
+ # Make sure the number of returned limits are correct
+ self.assertEqual(len(l), 4)
+
+ # Check all the verbs...
+ expected = ['GET', 'PUT', 'POST', 'SAY']
+ self.assertEqual([t.verb for t in l], expected)
+
+ # ...the URIs...
+ expected = ['*', '/foo*', '/bar*', '/derp*']
+ self.assertEqual([t.uri for t in l], expected)
+
+ # ...the regexes...
+ expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
+ self.assertEqual([t.regex for t in l], expected)
+
+ # ...the values...
+ expected = [20, 10, 5, 1]
+ self.assertEqual([t.value for t in l], expected)
+
+ # ...and the units...
+ expected = [limits.PER_MINUTE, limits.PER_HOUR,
+ limits.PER_SECOND, limits.PER_DAY]
+ self.assertEqual([t.unit for t in l], expected)
+
+
+class LimiterTest(BaseLimitTestSuite):
+ """
+ Tests for the in-memory `limits.Limiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimiterTest, self).setUp()
+ userlimits = {'user:user3': ''}
+ self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
+
+ def _check(self, num, verb, url, username=None):
+ """Check and yield results from checks."""
+ for x in xrange(num):
+ yield self.limiter.check_for_delay(verb, url, username)[0]
+
+ def _check_sum(self, num, verb, url, username=None):
+ """Check and sum results from checks."""
+ results = self._check(num, verb, url, username)
+ return sum(item for item in results if item)
+
+ def test_no_delay_GET(self):
+ """
+ Simple test to ensure no delay on a single call for a limit verb we
+ didn"t set.
+ """
+ delay = self.limiter.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_no_delay_PUT(self):
+ """
+ Simple test to ensure no delay on a single call for a known limit.
+ """
+ delay = self.limiter.check_for_delay("PUT", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_delay_PUT(self):
+ """
+ Ensure the 11th PUT will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_POST(self):
+ """
+ Ensure the 8th POST will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 7
+ results = list(self._check(7, "POST", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = 60.0 / 7.0
+ results = self._check_sum(1, "POST", "/anything")
+ self.failUnlessAlmostEqual(expected, results, 8)
+
+ def test_delay_GET(self):
+ """
+ Ensure the 11th GET will result in NO delay.
+ """
+ expected = [None] * 11
+ results = list(self._check(11, "GET", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_volumes(self):
+ """
+ Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still
+ OK after 5 requests...but then after 11 total requests, PUT limiting
+ kicks in.
+ """
+ # First 6 requests on PUT /volumes
+ expected = [None] * 5 + [12.0]
+ results = list(self._check(6, "PUT", "/volumes"))
+ self.assertEqual(expected, results)
+
+ # Next 5 request on PUT /anything
+ expected = [None] * 4 + [6.0]
+ results = list(self._check(5, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_wait(self):
+ """
+ Ensure after hitting the limit and then waiting for the correct
+ amount of time, the limit will be lifted.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ # Advance time
+ self.time += 6.0
+
+ expected = [None, 6.0]
+ results = list(self._check(2, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_delays(self):
+ """
+ Ensure multiple requests still get a delay.
+ """
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_user_limit(self):
+ """
+ Test user-specific limits.
+ """
+ self.assertEqual(self.limiter.levels['user3'], [])
+
+ def test_multiple_users(self):
+ """
+ Tests involving multiple users.
+ """
+ # User1
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ # User2
+ expected = [None] * 10 + [6.0] * 5
+ results = list(self._check(15, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ # User3
+ expected = [None] * 20
+ results = list(self._check(20, "PUT", "/anything", "user3"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [4.0] * 5
+ results = list(self._check(5, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+
+class WsgiLimiterTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.WsgiLimiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(WsgiLimiterTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+
+ def _request_data(self, verb, path):
+ """Get data decribing a limit request verb/path."""
+ return jsonutils.dumps({"verb": verb, "path": path})
+
+ def _request(self, verb, url, username=None):
+ """Make sure that POSTing to the given url causes the given username
+ to perform the given action. Make the internal rate limiter return
+ delay and make sure that the WSGI app returns the correct response.
+ """
+ if username:
+ request = webob.Request.blank("/%s" % username)
+ else:
+ request = webob.Request.blank("/")
+
+ request.method = "POST"
+ request.body = self._request_data(verb, url)
+ response = request.get_response(self.app)
+
+ if "X-Wait-Seconds" in response.headers:
+ self.assertEqual(response.status_int, 403)
+ return response.headers["X-Wait-Seconds"]
+
+ self.assertEqual(response.status_int, 204)
+
+ def test_invalid_methods(self):
+ """Only POSTs should work."""
+ requests = []
+ for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
+ request = webob.Request.blank("/", method=method)
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 405)
+
+ def test_good_url(self):
+ delay = self._request("GET", "/something")
+ self.assertEqual(delay, None)
+
+ def test_escaping(self):
+ delay = self._request("GET", "/something/jump%20up")
+ self.assertEqual(delay, None)
+
+ def test_response_to_delays(self):
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, '60.00')
+
+ def test_response_to_delays_usernames(self):
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, '60.00')
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, '60.00')
+
+
+class FakeHttplibSocket(object):
+ """
+ Fake `httplib.HTTPResponse` replacement.
+ """
+
+ def __init__(self, response_string):
+ """Initialize new `FakeHttplibSocket`."""
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """
+ Fake `httplib.HTTPConnection`.
+ """
+
+ def __init__(self, app, host):
+ """
+ Initialize `FakeHttplibConnection`.
+ """
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, body="", headers=None):
+ """
+ Requests made via this connection actually get translated and routed
+ into our WSGI app, we then wait for the response and turn it back into
+ an `httplib.HTTPResponse`.
+ """
+ if not headers:
+ headers = {}
+
+ req = webob.Request.blank(path)
+ req.method = method
+ req.headers = headers
+ req.host = self.host
+ req.body = body
+
+ resp = str(req.get_response(self.app))
+ resp = "HTTP/1.0 %s" % resp
+ sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(sock)
+ self.http_response.begin()
+
+ def getresponse(self):
+ """Return our generated response from the request."""
+ return self.http_response
+
+
+def wire_HTTPConnection_to_WSGI(host, app):
+ """Monkeypatches HTTPConnection so that if you try to connect to host, you
+ are instead routed straight to the given WSGI app.
+
+ After calling this method, when any code calls
+
+ httplib.HTTPConnection(host)
+
+ the connection object will be a fake. Its requests will be sent directly
+ to the given WSGI app rather than through a socket.
+
+ Code connecting to hosts other than host will not be affected.
+
+ This method may be called multiple times to map different hosts to
+ different apps.
+
+ This method returns the original HTTPConnection object, so that the caller
+ can restore the default HTTPConnection interface (for all hosts).
+ """
+ class HTTPConnectionDecorator(object):
+ """Wraps the real HTTPConnection class so that when you instantiate
+ the class you might instead get a fake instance."""
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __call__(self, connection_host, *args, **kwargs):
+ if connection_host == host:
+ return FakeHttplibConnection(app, host)
+ else:
+ return self.wrapped(connection_host, *args, **kwargs)
+
+ oldHTTPConnection = httplib.HTTPConnection
+ httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
+ return oldHTTPConnection
+
+
+class WsgiLimiterProxyTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.WsgiLimiterProxy` class.
+ """
+
+ def setUp(self):
+ """
+ Do some nifty HTTP/WSGI magic which allows for WSGI to be called
+ directly by something like the `httplib` library.
+ """
+ super(WsgiLimiterProxyTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+ self.oldHTTPConnection = (
+ wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
+ self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
+
+ def test_200(self):
+ """Successful request test."""
+ delay = self.proxy.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_403(self):
+ """Forbidden request test."""
+ delay = self.proxy.check_for_delay("GET", "/delayed")
+ self.assertEqual(delay, (None, None))
+
+ delay, error = self.proxy.check_for_delay("GET", "/delayed")
+ error = error.strip()
+
+ expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
+ "made to /delayed every minute.")
+
+ self.assertEqual((delay, error), expected)
+
+ def tearDown(self):
+ # restore original HTTPConnection object
+ httplib.HTTPConnection = self.oldHTTPConnection
+
+
+class LimitsViewBuilderTest(test.TestCase):
+ def setUp(self):
+ super(LimitsViewBuilderTest, self).setUp()
+ self.view_builder = views.limits.ViewBuilder()
+ self.rate_limits = [{"URI": "*",
+ "regex": ".*",
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "resetTime": 1311272226},
+ {"URI": "*/volumes",
+ "regex": "^/volumes",
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "resetTime": 1311272226}]
+ self.absolute_limits = {"metadata_items": 1,
+ "injected_files": 5,
+ "injected_file_content_bytes": 5}
+
+ def test_build_limits(self):
+ expected_limits = {
+ "limits": {
+ "rate": [
+ {
+ "uri": "*",
+ "regex": ".*",
+ "limit": [
+ {
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-07-21T18:17:06Z"
+ }
+ ]
+ },
+ {
+ "uri": "*/volumes",
+ "regex": "^/volumes",
+ "limit": [
+ {
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-07-21T18:17:06Z"
+ }
+ ]
+ }
+ ],
+ "absolute": {
+ "maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5
+ }
+ }
+ }
+
+ output = self.view_builder.build(self.rate_limits,
+ self.absolute_limits)
+ self.assertDictMatch(output, expected_limits)
+
+ def test_build_limits_empty_limits(self):
+ expected_limits = {"limits": {"rate": [],
+ "absolute": {}}}
+
+ abs_limits = {}
+ rate_limits = []
+ output = self.view_builder.build(rate_limits, abs_limits)
+ self.assertDictMatch(output, expected_limits)
+
+
+class LimitsXMLSerializationTest(test.TestCase):
+ def test_xml_declaration(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_index(self):
+ serializer = limits.LimitsTemplate()
+ fixture = {
+ "limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-12-15T22:42:45Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ #verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 4)
+ for limit in absolutes:
+ name = limit.get('name')
+ value = limit.get('value')
+ self.assertEqual(value, str(fixture['limits']['absolute'][name]))
+
+ #verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 2)
+ for i, rate in enumerate(rates):
+ for key in ['uri', 'regex']:
+ self.assertEqual(rate.get(key),
+ str(fixture['limits']['rate'][i][key]))
+ rate_limits = rate.xpath('ns:limit', namespaces=NS)
+ self.assertEqual(len(rate_limits), 1)
+ for j, limit in enumerate(rate_limits):
+ for key in ['verb', 'value', 'remaining', 'unit',
+ 'next-available']:
+ self.assertEqual(limit.get(key),
+ str(fixture['limits']['rate'][i]['limit'][j][key]))
+
+ def test_index_no_limits(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ #verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 0)
+
+ #verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 0)
--- /dev/null
+# Copyright 2011 Denali Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import webob
+
+from cinder.api.v2 import snapshots
+from cinder import db
+from cinder import exception
+from cinder import flags
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.tests.api.openstack import fakes
+from cinder import volume
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
+
+UUID = '00000000-0000-0000-0000-000000000001'
+INVALID_UUID = '00000000-0000-0000-0000-000000000002'
+
+
+def _get_default_snapshot_param():
+ return {
+ 'id': UUID,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': None,
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ }
+
+
+def stub_snapshot_create(self, context, volume_id, name, description):
+ snapshot = _get_default_snapshot_param()
+ snapshot['volume_id'] = volume_id
+ snapshot['display_name'] = name
+ snapshot['display_description'] = description
+ return snapshot
+
+
+def stub_snapshot_delete(self, context, snapshot):
+ if snapshot['id'] != UUID:
+ raise exception.NotFound
+
+
+def stub_snapshot_get(self, context, snapshot_id):
+ if snapshot_id != UUID:
+ raise exception.NotFound
+
+ param = _get_default_snapshot_param()
+ return param
+
+
+def stub_snapshot_get_all(self, context, search_opts=None):
+ param = _get_default_snapshot_param()
+ return [param]
+
+
+class SnapshotApiTest(test.TestCase):
+ def setUp(self):
+ super(SnapshotApiTest, self).setUp()
+ self.controller = snapshots.SnapshotsController()
+
+ self.stubs.Set(db, 'snapshot_get_all_by_project',
+ fakes.stub_snapshot_get_all_by_project)
+ self.stubs.Set(db, 'snapshot_get_all',
+ fakes.stub_snapshot_get_all)
+
+ def test_snapshot_create(self):
+ self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
+ self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
+ snapshot = {
+ "volume_id": '12',
+ "force": False,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"
+ }
+ body = dict(snapshot=snapshot)
+ req = fakes.HTTPRequest.blank('/v2/snapshots')
+ resp_dict = self.controller.create(req, body)
+
+ self.assertTrue('snapshot' in resp_dict)
+ self.assertEqual(resp_dict['snapshot']['display_name'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['display_description'],
+ snapshot['display_description'])
+
+ def test_snapshot_create_force(self):
+ self.stubs.Set(volume.api.API, "create_snapshot_force",
+ stub_snapshot_create)
+ self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
+ snapshot = {
+ "volume_id": '12',
+ "force": True,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"
+ }
+ body = dict(snapshot=snapshot)
+ req = fakes.HTTPRequest.blank('/v2/snapshots')
+ resp_dict = self.controller.create(req, body)
+
+ self.assertTrue('snapshot' in resp_dict)
+ self.assertEqual(resp_dict['snapshot']['display_name'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['display_description'],
+ snapshot['display_description'])
+
+ snapshot = {
+ "volume_id": "12",
+ "force": "**&&^^%%$$##@@",
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"
+ }
+ body = dict(snapshot=snapshot)
+ req = fakes.HTTPRequest.blank('/v2/snapshots')
+ self.assertRaises(exception.InvalidParameterValue,
+ self.controller.create,
+ req,
+ body)
+
+ def test_snapshot_update(self):
+ self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
+ self.stubs.Set(volume.api.API, "update_snapshot",
+ fakes.stub_snapshot_update)
+ updates = {
+ "display_name": "Updated Test Name",
+ }
+ body = {"snapshot": updates}
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
+ res_dict = self.controller.update(req, UUID, body)
+ expected = {
+ 'snapshot': {
+ 'id': UUID,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'size': 100,
+ 'created_at': None,
+ 'display_name': 'Updated Test Name',
+ 'display_description': 'Default description',
+ }
+ }
+ self.assertEquals(expected, res_dict)
+
+ def test_snapshot_update_missing_body(self):
+ body = {}
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update, req, UUID, body)
+
+ def test_snapshot_update_invalid_body(self):
+ body = {'display_name': 'missing top level snapshot key'}
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update, req, UUID, body)
+
+ def test_snapshot_update_not_found(self):
+ self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
+ updates = {
+ "display_name": "Updated Test Name",
+ }
+ body = {"snapshot": updates}
+ req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req,
+ 'not-the-uuid', body)
+
+ def test_snapshot_delete(self):
+ self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
+ self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
+
+ snapshot_id = UUID
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
+ resp = self.controller.delete(req, snapshot_id)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_snapshot_delete_invalid_id(self):
+ self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
+ snapshot_id = INVALID_UUID
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, snapshot_id)
+
+ def test_snapshot_show(self):
+ self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
+ resp_dict = self.controller.show(req, UUID)
+
+ self.assertTrue('snapshot' in resp_dict)
+ self.assertEqual(resp_dict['snapshot']['id'], UUID)
+
+ def test_snapshot_show_invalid_id(self):
+ snapshot_id = INVALID_UUID
+ req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, snapshot_id)
+
+ def test_snapshot_detail(self):
+ self.stubs.Set(volume.api.API, "get_all_snapshots",
+ stub_snapshot_get_all)
+ req = fakes.HTTPRequest.blank('/v2/snapshots/detail')
+ resp_dict = self.controller.detail(req)
+
+ self.assertTrue('snapshots' in resp_dict)
+ resp_snapshots = resp_dict['snapshots']
+ self.assertEqual(len(resp_snapshots), 1)
+
+ resp_snapshot = resp_snapshots.pop()
+ self.assertEqual(resp_snapshot['id'], UUID)
+
+ def test_snapshot_list_by_status(self):
+ def stub_snapshot_get_all_by_project(context, project_id):
+ return [
+ fakes.stub_snapshot(1, display_name='backup1',
+ status='available'),
+ fakes.stub_snapshot(2, display_name='backup2',
+ status='available'),
+ fakes.stub_snapshot(3, display_name='backup3',
+ status='creating'),
+ ]
+ self.stubs.Set(db, 'snapshot_get_all_by_project',
+ stub_snapshot_get_all_by_project)
+
+ # no status filter
+ req = fakes.HTTPRequest.blank('/v2/snapshots')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 3)
+ # single match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?status=creating')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 1)
+ self.assertEqual(resp['snapshots'][0]['status'], 'creating')
+ # multiple match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?status=available')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 2)
+ for snapshot in resp['snapshots']:
+ self.assertEquals(snapshot['status'], 'available')
+ # no match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?status=error')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 0)
+
+ def test_snapshot_list_by_volume(self):
+ def stub_snapshot_get_all_by_project(context, project_id):
+ return [
+ fakes.stub_snapshot(1, volume_id='vol1', status='creating'),
+ fakes.stub_snapshot(2, volume_id='vol1', status='available'),
+ fakes.stub_snapshot(3, volume_id='vol2', status='available'),
+ ]
+ self.stubs.Set(db, 'snapshot_get_all_by_project',
+ stub_snapshot_get_all_by_project)
+
+ # single match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol2')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 1)
+ self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2')
+ # multiple match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 2)
+ for snapshot in resp['snapshots']:
+ self.assertEqual(snapshot['volume_id'], 'vol1')
+ # multiple filters
+ req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1'
+ '&status=available')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 1)
+ self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1')
+ self.assertEqual(resp['snapshots'][0]['status'], 'available')
+
+ def test_snapshot_list_by_name(self):
+ def stub_snapshot_get_all_by_project(context, project_id):
+ return [
+ fakes.stub_snapshot(1, display_name='backup1'),
+ fakes.stub_snapshot(2, display_name='backup2'),
+ fakes.stub_snapshot(3, display_name='backup3'),
+ ]
+ self.stubs.Set(db, 'snapshot_get_all_by_project',
+ stub_snapshot_get_all_by_project)
+
+ # no display_name filter
+ req = fakes.HTTPRequest.blank('/v2/snapshots')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 3)
+ # filter by one name
+ req = fakes.HTTPRequest.blank('/v2/snapshots?display_name=backup2')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 1)
+ self.assertEquals(resp['snapshots'][0]['display_name'], 'backup2')
+ # filter no match
+ req = fakes.HTTPRequest.blank('/v2/snapshots?display_name=backup4')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['snapshots']), 0)
+
+ def test_admin_list_snapshots_limited_to_project(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/snapshots',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertTrue('snapshots' in res)
+ self.assertEqual(1, len(res['snapshots']))
+
+ def test_admin_list_snapshots_all_tenants(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+ self.assertTrue('snapshots' in res)
+ self.assertEqual(3, len(res['snapshots']))
+
+ def test_all_tenants_non_admin_gets_all_tenants(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1')
+ res = self.controller.index(req)
+ self.assertTrue('snapshots' in res)
+ self.assertEqual(1, len(res['snapshots']))
+
+ def test_non_admin_get_by_project(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
+ res = self.controller.index(req)
+ self.assertTrue('snapshots' in res)
+ self.assertEqual(1, len(res['snapshots']))
+
+
+class SnapshotSerializerTest(test.TestCase):
+ def _verify_snapshot(self, snap, tree):
+ self.assertEqual(tree.tag, 'snapshot')
+
+ for attr in ('id', 'status', 'size', 'created_at',
+ 'display_name', 'display_description', 'volume_id'):
+ self.assertEqual(str(snap[attr]), tree.get(attr))
+
+ def test_snapshot_show_create_serializer(self):
+ serializer = snapshots.SnapshotTemplate()
+ raw_snapshot = dict(
+ id='snap_id',
+ status='snap_status',
+ size=1024,
+ created_at=datetime.datetime.now(),
+ display_name='snap_name',
+ display_description='snap_desc',
+ volume_id='vol_id',
+ )
+ text = serializer.serialize(dict(snapshot=raw_snapshot))
+
+ print text
+ tree = etree.fromstring(text)
+
+ self._verify_snapshot(raw_snapshot, tree)
+
+ def test_snapshot_index_detail_serializer(self):
+ serializer = snapshots.SnapshotsTemplate()
+ raw_snapshots = [
+ dict(
+ id='snap1_id',
+ status='snap1_status',
+ size=1024,
+ created_at=datetime.datetime.now(),
+ display_name='snap1_name',
+ display_description='snap1_desc',
+ volume_id='vol1_id',
+ ),
+ dict(
+ id='snap2_id',
+ status='snap2_status',
+ size=1024,
+ created_at=datetime.datetime.now(),
+ display_name='snap2_name',
+ display_description='snap2_desc',
+ volume_id='vol2_id',
+ )
+ ]
+ text = serializer.serialize(dict(snapshots=raw_snapshots))
+
+ print text
+ tree = etree.fromstring(text)
+
+ self.assertEqual('snapshots', tree.tag)
+ self.assertEqual(len(raw_snapshots), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_snapshot(raw_snapshots[idx], child)
+
+
+class SnapshotsUnprocessableEntityTestCase(test.TestCase):
+
+ """
+ Tests of places we throw 422 Unprocessable Entity from
+ """
+
+ def setUp(self):
+ super(SnapshotsUnprocessableEntityTestCase, self).setUp()
+ self.controller = snapshots.SnapshotsController()
+
+ def _unprocessable_snapshot_create(self, body):
+ req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
+ req.method = 'POST'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, body)
+
+ def test_create_no_body(self):
+ self._unprocessable_snapshot_create(body=None)
+
+ def test_create_missing_snapshot(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_snapshot_create(body=body)
+
+ def test_create_malformed_entity(self):
+ body = {'snapshot': 'string'}
+ self._unprocessable_snapshot_create(body=body)
--- /dev/null
+# Copyright 2011 OpenStack LLC.
+# aLL Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from cinder.api.v2 import types
+from cinder.api.views import types as views_types
+from cinder import exception
+from cinder.openstack.common import timeutils
+from cinder import test
+from cinder.tests.api.openstack import fakes
+from cinder.volume import volume_types
+
+
+def stub_volume_type(id):
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"
+ }
+ return dict(
+ id=id,
+ name='vol_type_%s' % str(id),
+ extra_specs=specs,
+ )
+
+
+def return_volume_types_get_all_types(context):
+ return dict(
+ vol_type_1=stub_volume_type(1),
+ vol_type_2=stub_volume_type(2),
+ vol_type_3=stub_volume_type(3)
+ )
+
+
+def return_empty_volume_types_get_all_types(context):
+ return {}
+
+
+def return_volume_types_get_volume_type(context, id):
+ if id == "777":
+ raise exception.VolumeTypeNotFound(volume_type_id=id)
+ return stub_volume_type(int(id))
+
+
+def return_volume_types_get_by_name(context, name):
+ if name == "777":
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ return stub_volume_type(int(name.split("_")[2]))
+
+
+class VolumeTypesApiTest(test.TestCase):
+ def setUp(self):
+ super(VolumeTypesApiTest, self).setUp()
+ self.controller = types.VolumeTypesController()
+
+ def test_volume_types_index(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_volume_types_get_all_types)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/types')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(3, len(res_dict['volume_types']))
+
+ expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
+ actual_names = map(lambda e: e['name'], res_dict['volume_types'])
+ self.assertEqual(set(actual_names), set(expected_names))
+ for entry in res_dict['volume_types']:
+ self.assertEqual('value1', entry['extra_specs']['key1'])
+
+ def test_volume_types_index_no_data(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_empty_volume_types_get_all_types)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/types')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(0, len(res_dict['volume_types']))
+
+ def test_volume_types_show(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/types/1')
+ res_dict = self.controller.show(req, 1)
+
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual('1', res_dict['volume_type']['id'])
+ self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
+
+ def test_volume_types_show_not_found(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/types/777')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, '777')
+
+ def test_view_builder_show(self):
+ view_builder = views_types.ViewBuilder()
+
+ now = timeutils.isotime()
+ raw_volume_type = dict(
+ name='new_type',
+ deleted=False,
+ created_at=now,
+ updated_at=now,
+ extra_specs={},
+ deleted_at=None,
+ id=42,
+ )
+
+ request = fakes.HTTPRequest.blank("/v2")
+ output = view_builder.show(request, raw_volume_type)
+
+ self.assertTrue('volume_type' in output)
+ expected_volume_type = dict(
+ name='new_type',
+ extra_specs={},
+ id=42,
+ )
+ self.assertDictMatch(output['volume_type'], expected_volume_type)
+
+ def test_view_builder_list(self):
+ view_builder = views_types.ViewBuilder()
+
+ now = timeutils.isotime()
+ raw_volume_types = []
+ for i in range(0, 10):
+ raw_volume_types.append(
+ dict(
+ name='new_type',
+ deleted=False,
+ created_at=now,
+ updated_at=now,
+ extra_specs={},
+ deleted_at=None,
+ id=42 + i
+ )
+ )
+
+ request = fakes.HTTPRequest.blank("/v2")
+ output = view_builder.index(request, raw_volume_types)
+
+ self.assertTrue('volume_types' in output)
+ for i in range(0, 10):
+ expected_volume_type = dict(
+ name='new_type',
+ extra_specs={},
+ id=42 + i
+ )
+ self.assertDictMatch(output['volume_types'][i],
+ expected_volume_type)
+
+
+class VolumeTypesSerializerTest(test.TestCase):
+ def _verify_volume_type(self, vtype, tree):
+ self.assertEqual('volume_type', tree.tag)
+ self.assertEqual(vtype['name'], tree.get('name'))
+ self.assertEqual(str(vtype['id']), tree.get('id'))
+ self.assertEqual(1, len(tree))
+ extra_specs = tree[0]
+ self.assertEqual('extra_specs', extra_specs.tag)
+ seen = set(vtype['extra_specs'].keys())
+ for child in extra_specs:
+ self.assertTrue(child.tag in seen)
+ self.assertEqual(vtype['extra_specs'][child.tag], child.text)
+ seen.remove(child.tag)
+ self.assertEqual(len(seen), 0)
+
+ def test_index_serializer(self):
+ serializer = types.VolumeTypesTemplate()
+
+ # Just getting some input data
+ vtypes = return_volume_types_get_all_types(None)
+ text = serializer.serialize({'volume_types': vtypes.values()})
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volume_types', tree.tag)
+ self.assertEqual(len(vtypes), len(tree))
+ for child in tree:
+ name = child.get('name')
+ self.assertTrue(name in vtypes)
+ self._verify_volume_type(vtypes[name], child)
+
+ def test_voltype_serializer(self):
+ serializer = types.VolumeTypeTemplate()
+
+ vtype = stub_volume_type(1)
+ text = serializer.serialize(dict(volume_type=vtype))
+
+ tree = etree.fromstring(text)
+
+ self._verify_volume_type(vtype, tree)
--- /dev/null
+# Copyright 2013 Josh Durgin
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import webob
+
+from cinder.api import extensions
+from cinder.api.v2 import volumes
+from cinder import context
+from cinder import db
+from cinder import exception
+from cinder import flags
+from cinder import test
+from cinder.tests.api.openstack import fakes
+from cinder.tests.image import fake as fake_image
+from cinder.volume import api as volume_api
+
+
+FLAGS = flags.FLAGS
+NS = '{http://docs.openstack.org/api/openstack-volume/2.0/content}'
+
+TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
+
+
+def stub_snapshot_get(self, context, snapshot_id):
+ if snapshot_id != TEST_SNAPSHOT_UUID:
+ raise exception.NotFound
+
+ return {
+ 'id': snapshot_id,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': None,
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ }
+
+
+class VolumeApiTest(test.TestCase):
+ def setUp(self):
+ super(VolumeApiTest, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ fake_image.stub_out_image_service(self.stubs)
+ self.controller = volumes.VolumeController(self.ext_mgr)
+
+ self.stubs.Set(db, 'volume_get_all', fakes.stub_volume_get_all)
+ self.stubs.Set(db, 'volume_get_all_by_project',
+ fakes.stub_volume_get_all_by_project)
+ self.stubs.Set(volume_api.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(volume_api.API, 'delete', fakes.stub_volume_delete)
+
+ def test_volume_create(self):
+ self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+
+ vol = {
+ "size": 100,
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ res_dict = self.controller.create(req, body)
+ expected = {
+ 'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'Volume Test Desc',
+ 'availability_zone': 'zone1:host1',
+ 'display_name': 'Volume Test Name',
+ 'attachments': [
+ {
+ 'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 100
+ }
+ }
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_create_with_type(self):
+ vol_type = FLAGS.default_volume_type
+ db.volume_type_create(context.get_admin_context(),
+ dict(name=vol_type, extra_specs={}))
+
+ db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
+ vol_type)
+
+ vol = {
+ "size": 100,
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1",
+ "volume_type": db_vol_type['name'],
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ res_dict = self.controller.create(req, body)
+ self.assertEquals(res_dict['volume']['volume_type'],
+ db_vol_type['name'])
+
+ def test_volume_creation_fails_with_bad_size(self):
+ vol = {"size": '',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"}
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ self.assertRaises(exception.InvalidInput,
+ self.controller.create,
+ req,
+ body)
+
+ def test_volume_create_with_image_id(self):
+ self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+ self.ext_mgr.extensions = {'os-image-create': 'fake'}
+ vol = {"size": '1',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "nova",
+ "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
+ expected = {
+ 'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'Volume Test Desc',
+ 'availability_zone': 'nova',
+ 'display_name': 'Volume Test Name',
+ 'attachments': [
+ {
+ 'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': '1'}
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ res_dict = self.controller.create(req, body)
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_create_with_image_id_and_snapshot_id(self):
+ self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+ self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
+ self.ext_mgr.extensions = {'os-image-create': 'fake'}
+ vol = {
+ "size": '1',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
+ "snapshot_id": TEST_SNAPSHOT_UUID
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req,
+ body)
+
+ def test_volume_create_with_image_id_is_integer(self):
+ self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+ self.ext_mgr.extensions = {'os-image-create': 'fake'}
+ vol = {
+ "size": '1',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": 1234,
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req,
+ body)
+
+ def test_volume_create_with_image_id_not_uuid_format(self):
+ self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+ self.ext_mgr.extensions = {'os-image-create': 'fake'}
+ vol = {
+ "size": '1',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "cinder",
+ "imageRef": '12345'
+ }
+ body = {"volume": vol}
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req,
+ body)
+
+ def test_volume_update(self):
+ self.stubs.Set(volume_api.API, "update", fakes.stub_volume_update)
+ updates = {
+ "display_name": "Updated Test Name",
+ }
+ body = {"volume": updates}
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ res_dict = self.controller.update(req, '1', body)
+ expected = {
+ 'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'Updated Test Name',
+ 'attachments': [
+ {
+ 'id': '1',
+ 'volume_id': '1',
+ 'server_id': 'fakeuuid',
+ 'device': '/',
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1,
+ }
+ }
+ self.assertEquals(res_dict, expected)
+
+ def test_volume_update_metadata(self):
+ self.stubs.Set(volume_api.API, "update", fakes.stub_volume_update)
+ updates = {
+ "metadata": {"qos_max_iops": 2000}
+ }
+ body = {"volume": updates}
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ res_dict = self.controller.update(req, '1', body)
+ expected = {'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'displayname',
+ 'attachments': [{
+ 'id': '1',
+ 'volume_id': '1',
+ 'server_id': 'fakeuuid',
+ 'device': '/',
+ }],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {"qos_max_iops": 2000},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1,
+ }}
+ self.assertEquals(res_dict, expected)
+
+ def test_update_empty_body(self):
+ body = {}
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update,
+ req, '1', body)
+
+ def test_update_invalid_body(self):
+ body = {
+ 'display_name': 'missing top level volume key'
+ }
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update,
+ req, '1', body)
+
+ def test_update_not_found(self):
+ self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
+ updates = {
+ "display_name": "Updated Test Name",
+ }
+ body = {"volume": updates}
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req, '1', body)
+
+ def test_volume_list(self):
+ self.stubs.Set(volume_api.API, 'get_all',
+ fakes.stub_volume_get_all_by_project)
+
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ res_dict = self.controller.index(req)
+ expected = {
+ 'volumes': [
+ {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'displayname',
+ 'attachments': [
+ {
+ 'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1
+ }
+ ]
+ }
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_list_detail(self):
+ self.stubs.Set(volume_api.API, 'get_all',
+ fakes.stub_volume_get_all_by_project)
+ req = fakes.HTTPRequest.blank('/v2/volumes/detail')
+ res_dict = self.controller.index(req)
+ expected = {
+ 'volumes': [
+ {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'displayname',
+ 'attachments': [
+ {
+ 'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1
+ }
+ ]
+ }
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_list_by_name(self):
+ def stub_volume_get_all_by_project(context, project_id):
+ return [
+ fakes.stub_volume(1, display_name='vol1'),
+ fakes.stub_volume(2, display_name='vol2'),
+ fakes.stub_volume(3, display_name='vol3'),
+ ]
+ self.stubs.Set(db, 'volume_get_all_by_project',
+ stub_volume_get_all_by_project)
+
+ # no display_name filter
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 3)
+ # filter on display_name
+ req = fakes.HTTPRequest.blank('/v2/volumes?display_name=vol2')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 1)
+ self.assertEqual(resp['volumes'][0]['display_name'], 'vol2')
+ # filter no match
+ req = fakes.HTTPRequest.blank('/v2/volumes?display_name=vol4')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 0)
+
+ def test_volume_list_by_status(self):
+ def stub_volume_get_all_by_project(context, project_id):
+ return [
+ fakes.stub_volume(1, display_name='vol1', status='available'),
+ fakes.stub_volume(2, display_name='vol2', status='available'),
+ fakes.stub_volume(3, display_name='vol3', status='in-use'),
+ ]
+ self.stubs.Set(db, 'volume_get_all_by_project',
+ stub_volume_get_all_by_project)
+ # no status filter
+ req = fakes.HTTPRequest.blank('/v2/volumes')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 3)
+ # single match
+ req = fakes.HTTPRequest.blank('/v2/volumes?status=in-use')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 1)
+ self.assertEqual(resp['volumes'][0]['status'], 'in-use')
+ # multiple match
+ req = fakes.HTTPRequest.blank('/v2/volumes?status=available')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 2)
+ for volume in resp['volumes']:
+ self.assertEqual(volume['status'], 'available')
+ # multiple filters
+ req = fakes.HTTPRequest.blank('/v2/volumes?status=available&'
+ 'display_name=vol1')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 1)
+ self.assertEqual(resp['volumes'][0]['display_name'], 'vol1')
+ self.assertEqual(resp['volumes'][0]['status'], 'available')
+ # no match
+ req = fakes.HTTPRequest.blank('/v2/volumes?status=in-use&'
+ 'display_name=vol1')
+ resp = self.controller.index(req)
+ self.assertEqual(len(resp['volumes']), 0)
+
+ def test_volume_show(self):
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ res_dict = self.controller.show(req, '1')
+ expected = {
+ 'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'displayname',
+ 'attachments': [
+ {
+ 'device': '/',
+ 'server_id': 'fakeuuid',
+ 'id': '1',
+ 'volume_id': '1'
+ }
+ ],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1
+ }
+ }
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_show_no_attachments(self):
+ def stub_volume_get(self, context, volume_id):
+ return fakes.stub_volume(volume_id, attach_status='detached')
+
+ self.stubs.Set(volume_api.API, 'get', stub_volume_get)
+
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ res_dict = self.controller.show(req, '1')
+ expected = {
+ 'volume': {
+ 'status': 'fakestatus',
+ 'display_description': 'displaydesc',
+ 'availability_zone': 'fakeaz',
+ 'display_name': 'displayname',
+ 'attachments': [],
+ 'volume_type': 'vol_type_name',
+ 'snapshot_id': None,
+ 'metadata': {},
+ 'id': '1',
+ 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'size': 1
+ }
+ }
+ self.assertEqual(res_dict, expected)
+
+ def test_volume_show_no_volume(self):
+ self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
+
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1)
+
+ def test_volume_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ resp = self.controller.delete(req, 1)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_volume_delete_no_volume(self):
+ self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
+
+ req = fakes.HTTPRequest.blank('/v2/volumes/1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1)
+
+ def test_admin_list_volumes_limited_to_project(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/volumes',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertTrue('volumes' in res)
+ self.assertEqual(1, len(res['volumes']))
+
+ def test_admin_list_volumes_all_tenants(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+ self.assertTrue('volumes' in res)
+ self.assertEqual(3, len(res['volumes']))
+
+ def test_all_tenants_non_admin_gets_all_tenants(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1')
+ res = self.controller.index(req)
+ self.assertTrue('volumes' in res)
+ self.assertEqual(1, len(res['volumes']))
+
+ def test_non_admin_get_by_project(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/volumes')
+ res = self.controller.index(req)
+ self.assertTrue('volumes' in res)
+ self.assertEqual(1, len(res['volumes']))
+
+
+class VolumeSerializerTest(test.TestCase):
+ def _verify_volume_attachment(self, attach, tree):
+ for attr in ('id', 'volume_id', 'server_id', 'device'):
+ self.assertEqual(str(attach[attr]), tree.get(attr))
+
+ def _verify_volume(self, vol, tree):
+ self.assertEqual(tree.tag, NS + 'volume')
+
+ for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
+ 'display_name', 'display_description', 'volume_type',
+ 'snapshot_id'):
+ self.assertEqual(str(vol[attr]), tree.get(attr))
+
+ for child in tree:
+ print child.tag
+ self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata'))
+ if child.tag == 'attachments':
+ self.assertEqual(1, len(child))
+ self.assertEqual('attachment', child[0].tag)
+ self._verify_volume_attachment(vol['attachments'][0], child[0])
+ elif child.tag == 'metadata':
+ not_seen = set(vol['metadata'].keys())
+ for gr_child in child:
+ self.assertTrue(gr_child.get("key") in not_seen)
+ self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
+ gr_child.text)
+ not_seen.remove(gr_child.get('key'))
+ self.assertEqual(0, len(not_seen))
+
+ def test_volume_show_create_serializer(self):
+ serializer = volumes.VolumeTemplate()
+ raw_volume = dict(
+ id='vol_id',
+ status='vol_status',
+ size=1024,
+ availability_zone='vol_availability',
+ created_at=datetime.datetime.now(),
+ attachments=[
+ dict(
+ id='vol_id',
+ volume_id='vol_id',
+ server_id='instance_uuid',
+ device='/foo'
+ )
+ ],
+ display_name='vol_name',
+ display_description='vol_desc',
+ volume_type='vol_type',
+ snapshot_id='snap_id',
+ metadata=dict(
+ foo='bar',
+ baz='quux',
+ ),
+ )
+ text = serializer.serialize(dict(volume=raw_volume))
+
+ print text
+ tree = etree.fromstring(text)
+
+ self._verify_volume(raw_volume, tree)
+
+ def test_volume_index_detail_serializer(self):
+ serializer = volumes.VolumesTemplate()
+ raw_volumes = [
+ dict(
+ id='vol1_id',
+ status='vol1_status',
+ size=1024,
+ availability_zone='vol1_availability',
+ created_at=datetime.datetime.now(),
+ attachments=[
+ dict(
+ id='vol1_id',
+ volume_id='vol1_id',
+ server_id='instance_uuid',
+ device='/foo1'
+ )
+ ],
+ display_name='vol1_name',
+ display_description='vol1_desc',
+ volume_type='vol1_type',
+ snapshot_id='snap1_id',
+ metadata=dict(
+ foo='vol1_foo',
+ bar='vol1_bar',
+ ),
+ ),
+ dict(
+ id='vol2_id',
+ status='vol2_status',
+ size=1024,
+ availability_zone='vol2_availability',
+ created_at=datetime.datetime.now(),
+ attachments=[
+ dict(
+ id='vol2_id',
+ volume_id='vol2_id',
+ server_id='instance_uuid',
+ device='/foo2')],
+ display_name='vol2_name',
+ display_description='vol2_desc',
+ volume_type='vol2_type',
+ snapshot_id='snap2_id',
+ metadata=dict(
+ foo='vol2_foo',
+ bar='vol2_bar',
+ ),
+ )
+ ]
+ text = serializer.serialize(dict(volumes=raw_volumes))
+
+ print text
+ tree = etree.fromstring(text)
+
+ self.assertEqual(NS + 'volumes', tree.tag)
+ self.assertEqual(len(raw_volumes), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_volume(raw_volumes[idx], child)
+
+
+class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = volumes.CreateDeserializer()
+
+ def test_minimal_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_display_name(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"
+ display_name="Volume-xml"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_display_description(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_volume_type(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "display_name": "Volume-xml",
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_availability_zone(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_metadata(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ display_name="Volume-xml"
+ size="1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "display_name": "Volume-xml",
+ "size": "1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_full_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+
+class VolumesUnprocessableEntityTestCase(test.TestCase):
+
+ """
+ Tests of places we throw 422 Unprocessable Entity from
+ """
+
+ def setUp(self):
+ super(VolumesUnprocessableEntityTestCase, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = volumes.VolumeController(self.ext_mgr)
+
+ def _unprocessable_volume_create(self, body):
+ req = fakes.HTTPRequest.blank('/v2/fake/volumes')
+ req.method = 'POST'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, body)
+
+ def test_create_no_body(self):
+ self._unprocessable_volume_create(body=None)
+
+ def test_create_missing_volume(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_volume_create(body=body)
+
+ def test_create_malformed_entity(self):
+ body = {'volume': 'string'}
+ self._unprocessable_volume_create(body=body)
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = faultwrap sizelimit noauth apiv2
+keystone = faultwrap sizelimit authtoken keystonecontext apiv2
+keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
+
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
[pipeline:apiversions]
pipeline = faultwrap osvolumeversionapp