from cinder import db
from cinder import exception
from cinder.i18n import _
+from cinder import objects
from cinder import rpc
from cinder import volume
collection = 'snapshots'
def _update(self, *args, **kwargs):
- db.snapshot_update(*args, **kwargs)
+ context = args[0]
+ snapshot_id = args[1]
+ fields = args[2]
+ snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
+ snapshot.update(fields)
+ snapshot.save()
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
+from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
- (snap_count, snap_sum) = db.snapshot_data_get_for_project(
- context,
- project_id)
+ (snap_count, snap_sum) = (
+ objects.Snapshot.snapshot_data_get_for_project(context,
+ project_id))
resources.append(
{'resource':
{'host': host,
from cinder.api import extensions
from cinder.api.openstack import wsgi
-from cinder import db
from cinder.i18n import _, _LI
+from cinder import objects
LOG = logging.getLogger(__name__)
status_map = {'creating': ['creating', 'available', 'error'],
'deleting': ['deleting', 'error_deleting']}
- current_snapshot = db.snapshot_get(context, id)
+ current_snapshot = objects.Snapshot.get_by_id(context, id)
- if current_snapshot['status'] not in status_map:
+ if current_snapshot.status not in status_map:
msg = _("Snapshot status %(cur)s not allowed for "
"update_snapshot_status") % {
- 'cur': current_snapshot['status']}
+ 'cur': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
- if status not in status_map[current_snapshot['status']]:
+ if status not in status_map[current_snapshot.status]:
msg = _("Provided snapshot status %(provided)s not allowed for "
"snapshot with status %(current)s.") % \
{'provided': status,
- 'current': current_snapshot['status']}
+ 'current': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'id': id,
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
{'id': id, 'dict': update_dict})
- db.snapshot_update(context, id, update_dict)
+ current_snapshot.update(update_dict)
+ current_snapshot.save()
return webob.Response(status_int=202)
from cinder import context
from cinder import db
from cinder.i18n import _, _LE
+from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
volumes = db.volume_get_active_by_window(admin_context,
begin,
end)
- LOG.debug("Found %d volumes", len(volumes))
+ LOG.debug("Found %d volumes"), len(volumes)
for volume_ref in volumes:
try:
LOG.debug("Send exists notification for <volume_id: "
LOG.exception(_LE("Delete volume notification failed: %s"),
exc_msg, resource=volume_ref)
- snapshots = db.snapshot_get_active_by_window(admin_context,
- begin,
- end)
- LOG.debug("Found %d snapshots", len(snapshots))
+ snapshots = objects.SnapshotList.get_active_by_window(admin_context,
+ begin, end)
+ LOG.debug("Found %d snapshots"), len(snapshots)
for snapshot_ref in snapshots:
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LW
+from cinder import objects
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot):
try:
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+ snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if not snapshots:
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
- snapshots = self.db.snapshot_get_all_for_volume(context,
- volume['id'])
+ snapshots = objects.SnapshotList.get_all_for_volume(context,
+ volume['id'])
if snapshots:
msg = _("Volume in consistency group still has "
"dependent snapshots.")
if not md_was_changed:
self.obj_reset_changes(['metadata'])
+ @base.remotable_classmethod
+ def snapshot_data_get_for_project(cls, context, project_id,
+ volume_type_id=None):
+ return db.snapshot_data_get_for_project(context, project_id,
+ volume_type_id)
+
@base.CinderObjectRegistry.register
class SnapshotList(base.ObjectListBase, base.CinderObject):
snapshots,
expected_attrs=['metadata'])
+ @base.remotable_classmethod
+ def get_by_host(cls, context, host, filters=None):
+ snapshots = db.snapshot_get_by_host(context, host, filters)
+ return base.obj_make_list(context, cls(context), objects.Snapshot,
+ snapshots, expected_attrs=['metadata'])
+
@base.remotable_classmethod
def get_all_by_project(cls, context, project_id):
snapshots = db.snapshot_get_all_by_project(context, project_id)
snapshots = db.snapshot_get_all_for_volume(context, volume_id)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=['metadata'])
+
+ @base.remotable_classmethod
+ def get_active_by_window(cls, context, begin, end):
+ snapshots = db.snapshot_get_active_by_window(context, begin, end)
+ return base.obj_make_list(context, cls(context), objects.Snapshot,
+ snapshots, expected_attrs=['metadata'])
+
+ @base.remotable_classmethod
+ def get_all_for_cgsnapshot(cls, context, cgsnapshot_id):
+ snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
+ return base.obj_make_list(context, cls(context), objects.Snapshot,
+ snapshots, expected_attrs=['metadata'])
from cinder import context
from cinder import db
from cinder import exception
+from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
def test_snapshot_reset_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
- 'provider_location': '', 'size': 1})
- snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
- 'volume_id': volume['id']})
+ 'provider_location': '', 'size': 1,
+ 'availability_zone': 'test',
+ 'attach_status': 'detached'})
+ kwargs = {
+ 'volume_id': volume['id'],
+ 'cgsnapshot_id': None,
+ 'user_id': ctx.user_id,
+ 'project_id': ctx.project_id,
+ 'status': 'error_deleting',
+ 'progress': '0%',
+ 'volume_size': volume['size'],
+ 'metadata': {}
+ }
+ snapshot = objects.Snapshot(context=ctx, **kwargs)
+ snapshot.create()
resp = self._issue_snapshot_reset(ctx,
snapshot,
{'status': 'error'})
self.assertEqual(resp.status_int, 202)
- snapshot = db.snapshot_get(ctx, snapshot['id'])
- self.assertEqual(snapshot['status'], 'error')
+ snapshot = objects.Snapshot.get_by_id(ctx, snapshot['id'])
+ self.assertEqual(snapshot.status, 'error')
def test_invalid_status_for_snapshot(self):
ctx = context.RequestContext('admin', 'fake', True)
# License for the specific language governing permissions and limitations
# under the License.
+import mock
from oslo_serialization import jsonutils
import webob
def setUp(self):
super(SnapshotActionsTest, self).setUp()
- def test_update_snapshot_status(self):
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+ def test_update_snapshot_status(self, metadata_get):
self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
self.stubs.Set(db, 'snapshot_update', stub_snapshot_update)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
- def test_update_snapshot_status_invalid_status(self):
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+ def test_update_snapshot_status_invalid_status(self, metadata_get):
self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
body = {'os-update_snapshot_status': {'status': 'in-use'}}
req = webob.Request.blank('/v2/fake/snapshots/1/action')
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
+from cinder.tests.unit import fake_snapshot
# This list of fake volumes is used by our tests. Each is configured in a
inspect the contents.
"""
if volume_id == snapshot_vol_id:
- return ['fake_snapshot']
+ db_snapshot = {'volume_id': volume_id}
+ snapshot = fake_snapshot.fake_db_snapshot(**db_snapshot)
+ return [snapshot]
return []
res = self._get_resp(attached_vol_id)
self.assertEqual(res.status_int, 400, res)
- def test_unmanage_volume_with_snapshots(self):
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+ def test_unmanage_volume_with_snapshots(self, metadata_get):
"""Return 400 if the volume exists but has snapshots."""
res = self._get_resp(snapshot_vol_id)
self.assertEqual(res.status_int, 400, res)
volume_get_by_id.assert_called_once_with(self.context,
snapshot.volume_id)
+ @mock.patch('cinder.db.snapshot_data_get_for_project')
+ def test_snapshot_data_get_for_project(self, snapshot_data_get):
+ snapshot = snapshot_obj.Snapshot._from_db_object(
+ self.context, snapshot_obj.Snapshot(), fake_snapshot)
+ volume_type_id = mock.sentinel.volume_type_id
+ snapshot.snapshot_data_get_for_project(self.context,
+ self.project_id,
+ volume_type_id)
+ snapshot_data_get.assert_called_once_with(self.context,
+ self.project_id,
+ volume_type_id)
+
class TestSnapshotList(test_objects._LocalTest):
@mock.patch('cinder.db.snapshot_metadata_get', return_value={})
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+ @mock.patch('cinder.objects.Volume.get_by_id')
+ @mock.patch('cinder.db.snapshot_get_by_host',
+ return_value=[fake_snapshot])
+ def test_get_by_host(self, get_by_host, volume_get_by_id,
+ snapshot_metadata_get):
+ fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+ volume_get_by_id.return_value = fake_volume_obj
+
+ snapshots = snapshot_obj.SnapshotList.get_by_host(
+ self.context, 'fake-host')
+ self.assertEqual(1, len(snapshots))
+ TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
@mock.patch('cinder.db.snapshot_metadata_get', return_value={})
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_by_project',
self.context, fake_volume_obj.id)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+ @mock.patch('cinder.objects.volume.Volume.get_by_id')
+ @mock.patch('cinder.db.snapshot_get_active_by_window',
+ return_value=[fake_snapshot])
+ def test_get_active_by_window(self, get_active_by_window,
+ volume_get_by_id, snapshot_metadata_get):
+ fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+ volume_get_by_id.return_value = fake_volume_obj
+
+ snapshots = snapshot_obj.SnapshotList.get_active_by_window(
+ self.context, mock.sentinel.begin, mock.sentinel.end)
+ self.assertEqual(1, len(snapshots))
+ TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
+ @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+ @mock.patch('cinder.objects.volume.Volume.get_by_id')
+ @mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot',
+ return_value=[fake_snapshot])
+ def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot,
+ volume_get_by_id, snapshot_metadata_get):
+ fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+ volume_get_by_id.return_value = fake_volume_obj
+
+ snapshots = snapshot_obj.SnapshotList.get_all_for_cgsnapshot(
+ self.context, mock.sentinel.cgsnapshot_id)
+ self.assertEqual(1, len(snapshots))
+ TestSnapshot._compare(self, fake_snapshot, snapshots[0])
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
- @mock.patch('cinder.db.snapshot_get_active_by_window')
+ @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
- @mock.patch('cinder.db.snapshot_get_active_by_window')
+ @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
- snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
+ snapshots = objects.SnapshotList.get_all_for_volume(context,
+ volume_id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume['id'])
try:
for options in options_list:
- snapshot = self.db.snapshot_create(context, options)
+ snapshot = objects.Snapshot(context=context, **options)
+ snapshot.create()
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
- self.db.snapshot_destroy(context, snap['id'])
+ snapshot.destroy()
finally:
QUOTAS.rollback(context, reservations)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
- snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
+ snaps = objects.SnapshotList.get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LW
+from cinder import objects
from cinder import policy
from cinder import quota
from cinder import utils
# If snapshot_id is set, make the call create volume directly to
# the volume host where the snapshot resides instead of passing it
# through the scheduler. So snapshot can be copy to new volume.
- snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+ snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
source_volume_ref = self.db.volume_get(context,
- snapshot_ref['volume_id'])
+ snapshot.volume_id)
host = source_volume_ref['host']
elif source_volid:
source_volume_ref = self.db.volume_get(context, source_volid)
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
+from cinder import objects
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
{'status': 'error'})
else:
pass
-
- snapshots = self.db.snapshot_get_by_host(ctxt,
- self.host,
- {'status': 'creating'})
+ snapshots = objects.SnapshotList.get_by_host(
+ ctxt, self.host, {'status': 'creating'})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
- self.db.snapshot_update(ctxt,
- snapshot['id'],
- {'status': 'error'})
+ snapshot.status = 'error'
+ snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+ snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
- snaps = self.db.snapshot_get_all_for_volume(context,
- volume_ref['id'])
+ snaps = objects.SnapshotList.get_all_for_volume(context,
+ volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
'id': group_ref['id']})
raise
if cgsnapshot:
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+ snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot_id)
for snap in snapshots:
- if (snap['status'] not in
+ if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
def _update_volume_from_src(self, context, vol, update, group_id=None):
try:
- snapshot_ref = self.db.snapshot_get(context,
- vol['snapshot_id'])
+ snapshot = objects.Snapshot.get_by_id(context, vol['snapshot_id'])
orig_vref = self.db.volume_get(context,
- snapshot_ref['volume_id'])
+ snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
- {'volume_id': snapshot_ref['volume_id']})
+ {'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
- cgsnapshot_id)
+ snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
+ context, cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.start")
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
- snapshot['context'] = caller_context
+ snapshot.context = caller_context
model_update, snapshots = \
self.driver.create_cgsnapshot(context, cgsnapshot_ref)
# Update db if status is error
if snapshot['status'] == 'error':
update = {'status': snapshot['status']}
+
+ # TODO(thangp): Switch over to use snapshot.update()
+ # after cgsnapshot has been switched over to objects
self.db.snapshot_update(context, snapshot['id'],
update)
# If status for one snapshot is error, make sure
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
- context, snapshot['id'], volume_id)
+ context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
+
+ # TODO(thangp): Switch over to use snapshot.update()
+ # after cgsnapshot has been switched over to objects
self.db.snapshot_update(context,
- snapshot['id'],
+ snapshot_id,
{'status': 'error'})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
- snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
- cgsnapshot_id)
+ snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
+ context, cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.start")
if snapshots:
for snapshot in snapshots:
update = {'status': snapshot['status']}
+
+ # TODO(thangp): Switch over to use snapshot.update()
+ # after cgsnapshot has been switched over to objects
self.db.snapshot_update(context, snapshot['id'],
update)
if snapshot['status'] in ['error_deleting', 'error'] and \
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
+
+ # TODO(thangp): Switch over to use snapshot.destroy()
+ # after cgsnapshot has been switched over to objects
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations