try:
backup = self.backup_api.get(context, backup_id=id)
+ req.cache_db_backup(backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
backups = self.backup_api.get_all(context, search_opts=filters)
limited_list = common.limited(backups, req)
+ req.cache_db_backups(limited_list)
if is_detail:
backups = self._view_builder.detail_list(req, limited_list)
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder.openstack.common import log as logging
-from cinder import volume
LOG = logging.getLogger(__name__)
class ExtendedSnapshotAttributesController(wsgi.Controller):
- def __init__(self, *args, **kwargs):
- super(ExtendedSnapshotAttributesController, self).__init__(*args,
- **kwargs)
- self.volume_api = volume.API()
-
- def _get_snapshots(self, context):
- snapshots = self.volume_api.get_all_snapshots(context)
- rval = dict((snapshot['id'], snapshot) for snapshot in snapshots)
- return rval
-
def _extend_snapshot(self, req, resp_snap):
- db_snap = req.cached_resource_by_id(resp_snap['id'])
+ db_snap = req.get_db_snapshot(resp_snap['id'])
for attr in ['project_id', 'progress']:
key = "%s:%s" % (Extended_snapshot_attributes.alias, attr)
resp_snap[key] = db_snap[attr]
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder.openstack.common import log as logging
-from cinder import volume
LOG = logging.getLogger(__name__)
class VolumeHostAttributeController(wsgi.Controller):
- def __init__(self, *args, **kwargs):
- super(VolumeHostAttributeController, self).__init__(*args, **kwargs)
- self.volume_api = volume.API()
-
def _add_volume_host_attribute(self, context, req, resp_volume):
- db_volume = req.cached_resource_by_id(resp_volume['id'])
+ db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:host" % Volume_host_attribute.alias
resp_volume[key] = db_volume['host']
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
-from cinder import volume
authorize = extensions.soft_extension_authorizer('volume',
class VolumeMigStatusAttributeController(wsgi.Controller):
- def __init__(self, *args, **kwargs):
- super(VolumeMigStatusAttributeController, self).__init__(*args,
- **kwargs)
- self.volume_api = volume.API()
-
def _add_volume_mig_status_attribute(self, req, context, resp_volume):
- db_volume = req.cached_resource_by_id(resp_volume['id'])
+ db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:migstat" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['migration_status']
key = "%s:name_id" % Volume_mig_status_attribute.alias
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
-from cinder import volume
authorize = extensions.soft_extension_authorizer('volume',
class VolumeTenantAttributeController(wsgi.Controller):
- def __init__(self, *args, **kwargs):
- super(VolumeTenantAttributeController, self).__init__(*args, **kwargs)
- self.volume_api = volume.API()
-
def _add_volume_tenant_attribute(self, context, req, resp_volume):
- db_volume = req.cached_resource_by_id(resp_volume['id'])
+ db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:tenant_id" % Volume_tenant_attribute.alias
resp_volume[key] = db_volume['project_id']
return None
return resources.get(resource_id)
+ def cache_db_items(self, key, items, item_key='id'):
+ """Allow API methods to store objects from a DB query to be
+ used by API extensions within the same API request.
+
+ An instance of this class only lives for the lifetime of a
+ single API request, so there's no need to implement full
+ cache management.
+ """
+ self.cache_resource(items, item_key, key)
+
+ def get_db_items(self, key):
+ """Allow an API extension to get previously stored objects within
+ the same API request.
+
+ Note that the object data will be slightly stale.
+ """
+ return self.cached_resource(key)
+
+ def get_db_item(self, key, item_key):
+ """Allow an API extension to get a previously stored object
+ within the same API request.
+
+ Note that the object data will be slightly stale.
+ """
+ return self.get_db_items(key).get(item_key)
+
+ def cache_db_volumes(self, volumes):
+ # NOTE(mgagne) Cache it twice for backward compatibility reasons
+ self.cache_db_items('volumes', volumes, 'id')
+ self.cache_db_items(self.path, volumes, 'id')
+
+ def cache_db_volume(self, volume):
+ # NOTE(mgagne) Cache it twice for backward compatibility reasons
+ self.cache_db_items('volumes', [volume], 'id')
+ self.cache_db_items(self.path, [volume], 'id')
+
+ def get_db_volumes(self):
+ return (self.get_db_items('volumes') or
+ self.get_db_items(self.path))
+
+ def get_db_volume(self, volume_id):
+ return (self.get_db_item('volumes', volume_id) or
+ self.get_db_item(self.path, volume_id))
+
+ def cache_db_volume_types(self, volume_types):
+ self.cache_db_items('volume_types', volume_types, 'id')
+
+ def cache_db_volume_type(self, volume_type):
+ self.cache_db_items('volume_types', [volume_type], 'id')
+
+ def get_db_volume_types(self):
+ return self.get_db_items('volume_types')
+
+ def get_db_volume_type(self, volume_type_id):
+ return self.get_db_item('volume_types', volume_type_id)
+
+ def cache_db_snapshots(self, snapshots):
+ self.cache_db_items('snapshots', snapshots, 'id')
+
+ def cache_db_snapshot(self, snapshot):
+ self.cache_db_items('snapshots', [snapshot], 'id')
+
+ def get_db_snapshots(self):
+ return self.get_db_items('snapshots')
+
+ def get_db_snapshot(self, snapshot_id):
+ return self.get_db_item('snapshots', snapshot_id)
+
+ def cache_db_backups(self, backups):
+ self.cache_db_items('backups', backups, 'id')
+
+ def cache_db_backup(self, backup):
+ self.cache_db_items('backups', [backup], 'id')
+
+ def get_db_backups(self):
+ return self.get_db_items('backups')
+
+ def get_db_backup(self, backup_id):
+ return self.get_db_item('backups', backup_id)
+
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'cinder.best_content_type' not in self.environ:
try:
snapshot = self.volume_api.get_snapshot(context, id)
- req.cache_resource(snapshot)
+ req.cache_db_snapshot(snapshot)
except exception.NotFound:
raise exc.HTTPNotFound()
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts)
limited_list = common.limited(snapshots, req)
- req.cache_resource(limited_list)
+ req.cache_db_snapshots(limited_list)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
snapshot.get('display_name'),
snapshot.get('display_description'),
**kwargs)
+ req.cache_db_snapshot(new_snapshot)
retval = _translate_snapshot_detail_view(context, new_snapshot)
raise exc.HTTPNotFound()
snapshot.update(update_dict)
+ req.cache_db_snapshot(snapshot)
return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
try:
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
- req.cache_resource(vol)
+ req.cache_db_volume(vol)
except exception.NotFound:
raise exc.HTTPNotFound()
utils.add_visible_admin_metadata(volume)
limited_list = common.limited(volumes, req)
- req.cache_resource(limited_list)
+ req.cache_db_volumes(limited_list)
+
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
try:
snapshot = self.volume_api.get_snapshot(context, id)
- req.cache_resource(snapshot)
+ req.cache_db_snapshot(snapshot)
except exception.NotFound:
msg = _("Snapshot could not be found")
raise exc.HTTPNotFound(explanation=msg)
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts)
limited_list = common.limited(snapshots, req)
- req.cache_resource(limited_list)
+ req.cache_db_snapshots(limited_list)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
+ req.cache_db_snapshot(new_snapshot)
retval = _translate_snapshot_detail_view(context, new_snapshot)
raise exc.HTTPNotFound(explanation=msg)
snapshot.update(update_dict)
+ req.cache_db_snapshot(snapshot)
return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
try:
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
- req.cache_resource(vol)
+ req.cache_db_volume(vol)
except exception.NotFound:
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
utils.add_visible_admin_metadata(volume)
limited_list = common.limited(volumes, req)
+ req.cache_db_volumes(limited_list)
if is_detail:
volumes = self._view_builder.detail_list(req, limited_list)
else:
volumes = self._view_builder.summary_list(req, limited_list)
- req.cache_resource(limited_list)
return volumes
def _image_uuid_from_href(self, image_href):
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
def test_show(self):
- url = '/v2/fake/snapshots/%s' % UUID2
+ url = '/v2/fake/snapshots/%s' % UUID1
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
request.cached_resource_by_id('o-0',
name='other-resource'))
+ def test_cache_and_retrieve_volumes(self):
+ self._test_cache_and_retrieve_resources('volume')
+
+ def test_cache_and_retrieve_volume_types(self):
+ self._test_cache_and_retrieve_resources('volume_type')
+
+ def test_cache_and_retrieve_snapshots(self):
+ self._test_cache_and_retrieve_resources('snapshot')
+
+ def test_cache_and_retrieve_backups(self):
+ self._test_cache_and_retrieve_resources('backup')
+
+ def _test_cache_and_retrieve_resources(self, resource_name):
+ """Generic helper for cache tests."""
+ cache_all_func = 'cache_db_%ss' % resource_name
+ cache_one_func = 'cache_db_%s' % resource_name
+ get_db_all_func = 'get_db_%ss' % resource_name
+ get_db_one_func = 'get_db_%s' % resource_name
+
+ r = wsgi.Request.blank('/foo')
+ resources = []
+ for x in xrange(3):
+ resources.append({'id': 'id%s' % x})
+
+ # Store 2
+ getattr(r, cache_all_func)(resources[:2])
+ # Store 1
+ getattr(r, cache_one_func)(resources[2])
+
+ self.assertEqual(getattr(r, get_db_one_func)('id0'), resources[0])
+ self.assertEqual(getattr(r, get_db_one_func)('id1'), resources[1])
+ self.assertEqual(getattr(r, get_db_one_func)('id2'), resources[2])
+ self.assertIsNone(getattr(r, get_db_one_func)('id3'))
+ self.assertEqual(getattr(r, get_db_all_func)(), {
+ 'id0': resources[0],
+ 'id1': resources[1],
+ 'id2': resources[2]})
+
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):