]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Complete switch to snapshot objects
authorThang Pham <thang.g.pham@gmail.com>
Thu, 12 Mar 2015 17:18:05 +0000 (13:18 -0400)
committerrick.chen <rick.chen@prophetstor.com>
Fri, 5 Jun 2015 03:59:39 +0000 (11:59 +0800)
The following patch switches the remainder of cinder internals
to use snapshot object instead of direct db calls.  Note that
db calls within cinder drivers were not switched over.  This
is left to driver maintainers to do themselves and to
properly test out the changes on their hardware.

Also, note that there are three occurrences of
db.snapshot_update and one of db.snapshot_destroy left in
cinder/volume/manager.py.  This is intentional because
driver.create_cgsnapshot and driver.delete_cgsnapshot returns
a list of snapshot dicts.  Each driver needs to switched over
occurences of db.snapshot_get_all_for_cgsnapshot() to
SnapshotList.get_all_for_cgsnapshot() in create_cgsnapshot
and delete_cgsnapshot.  Once each driver has done so, a follow
up patch can be created to remove db.snapshot_update and
db.snapshot_destroy in cinder/volume/manager.py.  There are
bugs filed for these to be fixed -
https://bugs.launchpad.net/cinder/+bugs?field.tag=
cgsnapshot-objects.

Change-Id: I64004ac404f67eecee51361dc8edd3f149e1b987
Partial-Implements: blueprint cinder-objects

14 files changed:
cinder/api/contrib/admin_actions.py
cinder/api/contrib/hosts.py
cinder/api/contrib/snapshot_actions.py
cinder/cmd/volume_usage_audit.py
cinder/consistencygroup/api.py
cinder/objects/snapshot.py
cinder/tests/unit/api/contrib/test_admin_actions.py
cinder/tests/unit/api/contrib/test_snapshot_actions.py
cinder/tests/unit/api/contrib/test_volume_unmanage.py
cinder/tests/unit/objects/test_snapshot.py
cinder/tests/unit/test_cmd.py
cinder/volume/api.py
cinder/volume/flows/api/create_volume.py
cinder/volume/manager.py

index 5c51646d7f63fbf4514f4d5b5ee1b2236c87c802..18bbabd461b7b0a1b623c4a96f597025d8783f08 100644 (file)
@@ -24,6 +24,7 @@ from cinder import backup
 from cinder import db
 from cinder import exception
 from cinder.i18n import _
+from cinder import objects
 from cinder import rpc
 from cinder import volume
 
@@ -265,7 +266,12 @@ class SnapshotAdminController(AdminController):
     collection = 'snapshots'
 
     def _update(self, *args, **kwargs):
-        db.snapshot_update(*args, **kwargs)
+        context = args[0]
+        snapshot_id = args[1]
+        fields = args[2]
+        snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
+        snapshot.update(fields)
+        snapshot.save()
 
     def _get(self, *args, **kwargs):
         return self.volume_api.get_snapshot(*args, **kwargs)
index d26868905f75d5c713730b312f8d317ba9394678..ed529e2fd8e2e50fc55784bf4cdbd05e8428d7da 100644 (file)
@@ -28,6 +28,7 @@ from cinder.api import xmlutil
 from cinder import db
 from cinder import exception
 from cinder.i18n import _, _LI
+from cinder import objects
 from cinder import utils
 from cinder.volume import api as volume_api
 
@@ -232,9 +233,9 @@ class HostController(wsgi.Controller):
         project_ids = list(set(project_ids))
         for project_id in project_ids:
             (count, sum) = db.volume_data_get_for_project(context, project_id)
-            (snap_count, snap_sum) = db.snapshot_data_get_for_project(
-                context,
-                project_id)
+            (snap_count, snap_sum) = (
+                objects.Snapshot.snapshot_data_get_for_project(context,
+                                                               project_id))
             resources.append(
                 {'resource':
                     {'host': host,
index 4ce889f3f29af582cc40b798521c6bc4b5133210..6319c16e27f55127581b158f63e5c7b05f9cbae6 100644 (file)
@@ -17,8 +17,8 @@ import webob
 
 from cinder.api import extensions
 from cinder.api.openstack import wsgi
-from cinder import db
 from cinder.i18n import _, _LI
+from cinder import objects
 
 LOG = logging.getLogger(__name__)
 
@@ -56,19 +56,19 @@ class SnapshotActionsController(wsgi.Controller):
         status_map = {'creating': ['creating', 'available', 'error'],
                       'deleting': ['deleting', 'error_deleting']}
 
-        current_snapshot = db.snapshot_get(context, id)
+        current_snapshot = objects.Snapshot.get_by_id(context, id)
 
-        if current_snapshot['status'] not in status_map:
+        if current_snapshot.status not in status_map:
             msg = _("Snapshot status %(cur)s not allowed for "
                     "update_snapshot_status") % {
-                        'cur': current_snapshot['status']}
+                        'cur': current_snapshot.status}
             raise webob.exc.HTTPBadRequest(explanation=msg)
 
-        if status not in status_map[current_snapshot['status']]:
+        if status not in status_map[current_snapshot.status]:
             msg = _("Provided snapshot status %(provided)s not allowed for "
                     "snapshot with status %(current)s.") % \
                 {'provided': status,
-                 'current': current_snapshot['status']}
+                 'current': current_snapshot.status}
             raise webob.exc.HTTPBadRequest(explanation=msg)
 
         update_dict = {'id': id,
@@ -90,7 +90,8 @@ class SnapshotActionsController(wsgi.Controller):
         LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
                  {'id': id, 'dict': update_dict})
 
-        db.snapshot_update(context, id, update_dict)
+        current_snapshot.update(update_dict)
+        current_snapshot.save()
         return webob.Response(status_int=202)
 
 
index 7caebfd3e0a6decd44ee17b0b70b33b9a182ea74..38d1fc7d30182f621cda44941e0fb60436017397 100644 (file)
@@ -48,6 +48,7 @@ i18n.enable_lazy()
 from cinder import context
 from cinder import db
 from cinder.i18n import _, _LE
+from cinder import objects
 from cinder import rpc
 from cinder import utils
 from cinder import version
@@ -106,7 +107,7 @@ def main():
     volumes = db.volume_get_active_by_window(admin_context,
                                              begin,
                                              end)
-    LOG.debug("Found %d volumes", len(volumes))
+    LOG.debug("Found %d volumes"), len(volumes)
     for volume_ref in volumes:
         try:
             LOG.debug("Send exists notification for <volume_id: "
@@ -175,10 +176,9 @@ def main():
                 LOG.exception(_LE("Delete volume notification failed: %s"),
                               exc_msg, resource=volume_ref)
 
-    snapshots = db.snapshot_get_active_by_window(admin_context,
-                                                 begin,
-                                                 end)
-    LOG.debug("Found %d snapshots", len(snapshots))
+    snapshots = objects.SnapshotList.get_active_by_window(admin_context,
+                                                          begin, end)
+    LOG.debug("Found %d snapshots"), len(snapshots)
     for snapshot_ref in snapshots:
         try:
             LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
index 6cba8000cadbee94cd4e32cda01ebde04d4bce23..72ebf00222a0177ec87710cae3fd7e4a512e3d6c 100644 (file)
@@ -28,6 +28,7 @@ from oslo_utils import timeutils
 from cinder.db import base
 from cinder import exception
 from cinder.i18n import _, _LE, _LW
+from cinder import objects
 import cinder.policy
 from cinder import quota
 from cinder.scheduler import rpcapi as scheduler_rpcapi
@@ -207,7 +208,7 @@ class API(base.Base):
 
     def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot):
         try:
-            snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+            snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                 context, cgsnapshot['id'])
 
             if not snapshots:
@@ -380,8 +381,8 @@ class API(base.Base):
                 LOG.error(msg)
                 raise exception.InvalidConsistencyGroup(reason=msg)
 
-            snapshots = self.db.snapshot_get_all_for_volume(context,
-                                                            volume['id'])
+            snapshots = objects.SnapshotList.get_all_for_volume(context,
+                                                                volume['id'])
             if snapshots:
                 msg = _("Volume in consistency group still has "
                         "dependent snapshots.")
index d0ce81f61031037b9570da0806b73abdb4fe02ff..d266a756a7dedebbb41d1f39e861c3683ccbe03e 100644 (file)
@@ -192,6 +192,12 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
         if not md_was_changed:
             self.obj_reset_changes(['metadata'])
 
+    @base.remotable_classmethod
+    def snapshot_data_get_for_project(cls, context, project_id,
+                                      volume_type_id=None):
+        return db.snapshot_data_get_for_project(context, project_id,
+                                                volume_type_id)
+
 
 @base.CinderObjectRegistry.register
 class SnapshotList(base.ObjectListBase, base.CinderObject):
@@ -211,6 +217,12 @@ class SnapshotList(base.ObjectListBase, base.CinderObject):
                                   snapshots,
                                   expected_attrs=['metadata'])
 
+    @base.remotable_classmethod
+    def get_by_host(cls, context, host, filters=None):
+        snapshots = db.snapshot_get_by_host(context, host, filters)
+        return base.obj_make_list(context, cls(context), objects.Snapshot,
+                                  snapshots, expected_attrs=['metadata'])
+
     @base.remotable_classmethod
     def get_all_by_project(cls, context, project_id):
         snapshots = db.snapshot_get_all_by_project(context, project_id)
@@ -222,3 +234,15 @@ class SnapshotList(base.ObjectListBase, base.CinderObject):
         snapshots = db.snapshot_get_all_for_volume(context, volume_id)
         return base.obj_make_list(context, cls(context), objects.Snapshot,
                                   snapshots, expected_attrs=['metadata'])
+
+    @base.remotable_classmethod
+    def get_active_by_window(cls, context, begin, end):
+        snapshots = db.snapshot_get_active_by_window(context, begin, end)
+        return base.obj_make_list(context, cls(context), objects.Snapshot,
+                                  snapshots, expected_attrs=['metadata'])
+
+    @base.remotable_classmethod
+    def get_all_for_cgsnapshot(cls, context, cgsnapshot_id):
+        snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
+        return base.obj_make_list(context, cls(context), objects.Snapshot,
+                                  snapshots, expected_attrs=['metadata'])
index 71aece524838d826644fd3c75b50e53b0615359c..0d6034c15284c324c652613c311afd44779bf058 100644 (file)
@@ -28,6 +28,7 @@ from cinder.brick.local_dev import lvm as brick_lvm
 from cinder import context
 from cinder import db
 from cinder import exception
+from cinder import objects
 from cinder import test
 from cinder.tests.unit.api import fakes
 from cinder.tests.unit.api.v2 import stubs
@@ -313,17 +314,29 @@ class AdminActionsTest(test.TestCase):
     def test_snapshot_reset_status(self):
         ctx = context.RequestContext('admin', 'fake', True)
         volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
-                                        'provider_location': '', 'size': 1})
-        snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
-                                            'volume_id': volume['id']})
+                                        'provider_location': '', 'size': 1,
+                                        'availability_zone': 'test',
+                                        'attach_status': 'detached'})
+        kwargs = {
+            'volume_id': volume['id'],
+            'cgsnapshot_id': None,
+            'user_id': ctx.user_id,
+            'project_id': ctx.project_id,
+            'status': 'error_deleting',
+            'progress': '0%',
+            'volume_size': volume['size'],
+            'metadata': {}
+        }
+        snapshot = objects.Snapshot(context=ctx, **kwargs)
+        snapshot.create()
 
         resp = self._issue_snapshot_reset(ctx,
                                           snapshot,
                                           {'status': 'error'})
 
         self.assertEqual(resp.status_int, 202)
-        snapshot = db.snapshot_get(ctx, snapshot['id'])
-        self.assertEqual(snapshot['status'], 'error')
+        snapshot = objects.Snapshot.get_by_id(ctx, snapshot['id'])
+        self.assertEqual(snapshot.status, 'error')
 
     def test_invalid_status_for_snapshot(self):
         ctx = context.RequestContext('admin', 'fake', True)
index be2592fe5a75bb78c46a22b944353a260ec3ea85..6f49fb6152039140145fd882f1b70bc31503dee7 100644 (file)
@@ -12,6 +12,7 @@
 #   License for the specific language governing permissions and limitations
 #   under the License.
 
+import mock
 from oslo_serialization import jsonutils
 import webob
 
@@ -26,7 +27,8 @@ class SnapshotActionsTest(test.TestCase):
     def setUp(self):
         super(SnapshotActionsTest, self).setUp()
 
-    def test_update_snapshot_status(self):
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+    def test_update_snapshot_status(self, metadata_get):
         self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
         self.stubs.Set(db, 'snapshot_update', stub_snapshot_update)
 
@@ -39,7 +41,8 @@ class SnapshotActionsTest(test.TestCase):
         res = req.get_response(fakes.wsgi_app())
         self.assertEqual(res.status_int, 202)
 
-    def test_update_snapshot_status_invalid_status(self):
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+    def test_update_snapshot_status_invalid_status(self, metadata_get):
         self.stubs.Set(db, 'snapshot_get', stub_snapshot_get)
         body = {'os-update_snapshot_status': {'status': 'in-use'}}
         req = webob.Request.blank('/v2/fake/snapshots/1/action')
index fcbc856a4d1521383069ff4f754beb36d9987f72..a7104ee80596febb4db33bd538ab99c5238282c8 100644 (file)
@@ -20,6 +20,7 @@ from cinder import context
 from cinder import exception
 from cinder import test
 from cinder.tests.unit.api import fakes
+from cinder.tests.unit import fake_snapshot
 
 
 # This list of fake volumes is used by our tests.  Each is configured in a
@@ -90,7 +91,9 @@ def db_snapshot_get_all_for_volume(context, volume_id):
     inspect the contents.
     """
     if volume_id == snapshot_vol_id:
-        return ['fake_snapshot']
+        db_snapshot = {'volume_id': volume_id}
+        snapshot = fake_snapshot.fake_db_snapshot(**db_snapshot)
+        return [snapshot]
     return []
 
 
@@ -155,7 +158,8 @@ class VolumeUnmanageTest(test.TestCase):
         res = self._get_resp(attached_vol_id)
         self.assertEqual(res.status_int, 400, res)
 
-    def test_unmanage_volume_with_snapshots(self):
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
+    def test_unmanage_volume_with_snapshots(self, metadata_get):
         """Return 400 if the volume exists but has snapshots."""
         res = self._get_resp(snapshot_vol_id)
         self.assertEqual(res.status_int, 400, res)
index e1a76e7f015f7bd50da54ac15ea62406dc8bf394..899da03ab70d501ed9f27adff7504c86937fd576 100644 (file)
@@ -125,6 +125,18 @@ class TestSnapshot(test_objects._LocalTest):
         volume_get_by_id.assert_called_once_with(self.context,
                                                  snapshot.volume_id)
 
+    @mock.patch('cinder.db.snapshot_data_get_for_project')
+    def test_snapshot_data_get_for_project(self, snapshot_data_get):
+        snapshot = snapshot_obj.Snapshot._from_db_object(
+            self.context, snapshot_obj.Snapshot(), fake_snapshot)
+        volume_type_id = mock.sentinel.volume_type_id
+        snapshot.snapshot_data_get_for_project(self.context,
+                                               self.project_id,
+                                               volume_type_id)
+        snapshot_data_get.assert_called_once_with(self.context,
+                                                  self.project_id,
+                                                  volume_type_id)
+
 
 class TestSnapshotList(test_objects._LocalTest):
     @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
@@ -139,6 +151,20 @@ class TestSnapshotList(test_objects._LocalTest):
         self.assertEqual(1, len(snapshots))
         TestSnapshot._compare(self, fake_snapshot, snapshots[0])
 
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+    @mock.patch('cinder.objects.Volume.get_by_id')
+    @mock.patch('cinder.db.snapshot_get_by_host',
+                return_value=[fake_snapshot])
+    def test_get_by_host(self, get_by_host, volume_get_by_id,
+                         snapshot_metadata_get):
+        fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+        volume_get_by_id.return_value = fake_volume_obj
+
+        snapshots = snapshot_obj.SnapshotList.get_by_host(
+            self.context, 'fake-host')
+        self.assertEqual(1, len(snapshots))
+        TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
     @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
     @mock.patch('cinder.objects.volume.Volume.get_by_id')
     @mock.patch('cinder.db.snapshot_get_all_by_project',
@@ -166,3 +192,31 @@ class TestSnapshotList(test_objects._LocalTest):
             self.context, fake_volume_obj.id)
         self.assertEqual(1, len(snapshots))
         TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+    @mock.patch('cinder.objects.volume.Volume.get_by_id')
+    @mock.patch('cinder.db.snapshot_get_active_by_window',
+                return_value=[fake_snapshot])
+    def test_get_active_by_window(self, get_active_by_window,
+                                  volume_get_by_id, snapshot_metadata_get):
+        fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+        volume_get_by_id.return_value = fake_volume_obj
+
+        snapshots = snapshot_obj.SnapshotList.get_active_by_window(
+            self.context, mock.sentinel.begin, mock.sentinel.end)
+        self.assertEqual(1, len(snapshots))
+        TestSnapshot._compare(self, fake_snapshot, snapshots[0])
+
+    @mock.patch('cinder.db.snapshot_metadata_get', return_value={})
+    @mock.patch('cinder.objects.volume.Volume.get_by_id')
+    @mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot',
+                return_value=[fake_snapshot])
+    def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot,
+                                    volume_get_by_id, snapshot_metadata_get):
+        fake_volume_obj = fake_volume.fake_volume_obj(self.context)
+        volume_get_by_id.return_value = fake_volume_obj
+
+        snapshots = snapshot_obj.SnapshotList.get_all_for_cgsnapshot(
+            self.context, mock.sentinel.cgsnapshot_id)
+        self.assertEqual(1, len(snapshots))
+        TestSnapshot._compare(self, fake_snapshot, snapshots[0])
index 7a6e31282ebe3ae6997e2edeae69dcbe697d6979..feea1b32281b2be36224a9461742b487f9e01257 100644 (file)
@@ -1293,7 +1293,7 @@ class TestCinderVolumeUsageAuditCmd(test.TestCase):
             extra_usage_info=local_extra_info_delete)
 
     @mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
-    @mock.patch('cinder.db.snapshot_get_active_by_window')
+    @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window')
     @mock.patch('cinder.volume.utils.notify_about_volume_usage')
     @mock.patch('cinder.db.volume_get_active_by_window')
     @mock.patch('cinder.utils.last_completed_audit_period')
@@ -1366,7 +1366,7 @@ class TestCinderVolumeUsageAuditCmd(test.TestCase):
             extra_usage_info=local_extra_info_delete)
 
     @mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
-    @mock.patch('cinder.db.snapshot_get_active_by_window')
+    @mock.patch('cinder.objects.snapshot.SnapshotList.get_active_by_window')
     @mock.patch('cinder.volume.utils.notify_about_volume_usage')
     @mock.patch('cinder.db.volume_get_active_by_window')
     @mock.patch('cinder.utils.last_completed_audit_period')
index c4b470c244d7789b283b6e1da6c09b73811755b6..f0c17367648a824c8e3bb08d407bcc78ff490a26 100644 (file)
@@ -371,7 +371,8 @@ class API(base.Base):
                          'consistency group.'), volume['id'])
             raise exception.InvalidVolume(reason=msg)
 
-        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
+        snapshots = objects.SnapshotList.get_all_for_volume(context,
+                                                            volume_id)
         if len(snapshots):
             LOG.info(_LI('Unable to delete volume: %s, '
                          'volume currently has snapshots.'), volume['id'])
@@ -781,7 +782,8 @@ class API(base.Base):
 
         try:
             for options in options_list:
-                snapshot = self.db.snapshot_create(context, options)
+                snapshot = objects.Snapshot(context=context, **options)
+                snapshot.create()
                 snapshot_list.append(snapshot)
 
             QUOTAS.commit(context, reservations)
@@ -789,7 +791,7 @@ class API(base.Base):
             with excutils.save_and_reraise_exception():
                 try:
                     for snap in snapshot_list:
-                        self.db.snapshot_destroy(context, snap['id'])
+                        snapshot.destroy()
                 finally:
                     QUOTAS.rollback(context, reservations)
 
@@ -1240,7 +1242,7 @@ class API(base.Base):
             raise exception.InvalidVolume(reason=msg)
 
         # We only handle volumes without snapshots for now
-        snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
+        snaps = objects.SnapshotList.get_all_for_volume(context, volume['id'])
         if snaps:
             msg = _("Volume %s must not have snapshots.") % volume['id']
             LOG.error(msg)
index 79852531104f7d251e96cff56a5f7e1ead0f1dea..3be0251b34bd49b6ae3cffd9f27eef8849119b02 100644 (file)
@@ -22,6 +22,7 @@ from taskflow.types import failure as ft
 from cinder import exception
 from cinder import flow_utils
 from cinder.i18n import _, _LE, _LW
+from cinder import objects
 from cinder import policy
 from cinder import quota
 from cinder import utils
@@ -722,9 +723,9 @@ class VolumeCastTask(flow_utils.CinderTask):
             # If snapshot_id is set, make the call create volume directly to
             # the volume host where the snapshot resides instead of passing it
             # through the scheduler. So snapshot can be copy to new volume.
-            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
             source_volume_ref = self.db.volume_get(context,
-                                                   snapshot_ref['volume_id'])
+                                                   snapshot.volume_id)
             host = source_volume_ref['host']
         elif source_volid:
             source_volume_ref = self.db.volume_get(context, source_volid)
index 8f563a82dc127d51b2d2afc23681bf1bc6faaf40..7c201824add2044fa61d45fb022be54e100c1fc7 100644 (file)
@@ -58,6 +58,7 @@ from cinder import flow_utils
 from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import glance
 from cinder import manager
+from cinder import objects
 from cinder.openstack.common import periodic_task
 from cinder import quota
 from cinder import utils
@@ -344,16 +345,13 @@ class VolumeManager(manager.SchedulerDependentManager):
                                           {'status': 'error'})
                 else:
                     pass
-
-            snapshots = self.db.snapshot_get_by_host(ctxt,
-                                                     self.host,
-                                                     {'status': 'creating'})
+            snapshots = objects.SnapshotList.get_by_host(
+                ctxt, self.host, {'status': 'creating'})
             for snapshot in snapshots:
                 LOG.warning(_LW("Detected snapshot stuck in creating "
                             "status, setting to ERROR."), resource=snapshot)
-                self.db.snapshot_update(ctxt,
-                                        snapshot['id'],
-                                        {'status': 'error'})
+                snapshot.status = 'error'
+                snapshot.save()
         except Exception:
             LOG.exception(_LE("Error during re-export on driver init."),
                           resource=volume)
@@ -1542,7 +1540,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             extra_usage_info=extra_usage_info, host=self.host)
 
         if not snapshots:
-            snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+            snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                 context, cgsnapshot['id'])
         if snapshots:
             for snapshot in snapshots:
@@ -1707,8 +1705,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                 msg = _("Retype requires migration but is not allowed.")
                 raise exception.VolumeMigrationFailed(reason=msg)
 
-            snaps = self.db.snapshot_get_all_for_volume(context,
-                                                        volume_ref['id'])
+            snaps = objects.SnapshotList.get_all_for_volume(context,
+                                                            volume_ref['id'])
             if snaps:
                 _retype_error(context, volume_id, old_reservations,
                               new_reservations, status_update)
@@ -1944,10 +1942,10 @@ class VolumeManager(manager.SchedulerDependentManager):
                                         'id': group_ref['id']})
                     raise
                 if cgsnapshot:
-                    snapshots = self.db.snapshot_get_all_for_cgsnapshot(
+                    snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                         context, cgsnapshot_id)
                     for snap in snapshots:
-                        if (snap['status'] not in
+                        if (snap.status not in
                                 VALID_CREATE_CG_SRC_SNAP_STATUS):
                             msg = (_("Cannot create consistency group "
                                      "%(group)s because snapshot %(snap)s is "
@@ -2043,10 +2041,9 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     def _update_volume_from_src(self, context, vol, update, group_id=None):
         try:
-            snapshot_ref = self.db.snapshot_get(context,
-                                                vol['snapshot_id'])
+            snapshot = objects.Snapshot.get_by_id(context, vol['snapshot_id'])
             orig_vref = self.db.volume_get(context,
-                                           snapshot_ref['volume_id'])
+                                           snapshot.volume_id)
             if orig_vref.bootable:
                 update['bootable'] = True
                 self.db.volume_glance_metadata_copy_to_volume(
@@ -2063,7 +2060,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         except exception.VolumeNotFound:
             LOG.error(_LE("The source volume %(volume_id)s "
                           "cannot be found."),
-                      {'volume_id': snapshot_ref['volume_id']})
+                      {'volume_id': snapshot.volume_id})
             self.db.volume_update(context, vol['id'],
                                   {'status': 'error'})
             if group_id:
@@ -2367,8 +2364,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
         LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
 
-        snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
-                                                            cgsnapshot_id)
+        snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
+            context, cgsnapshot_id)
 
         self._notify_about_cgsnapshot_usage(
             context, cgsnapshot_ref, "create.start")
@@ -2383,7 +2380,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             # but it is not a requirement for all drivers.
             cgsnapshot_ref['context'] = caller_context
             for snapshot in snapshots:
-                snapshot['context'] = caller_context
+                snapshot.context = caller_context
 
             model_update, snapshots = \
                 self.driver.create_cgsnapshot(context, cgsnapshot_ref)
@@ -2393,6 +2390,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                     # Update db if status is error
                     if snapshot['status'] == 'error':
                         update = {'status': snapshot['status']}
+
+                        # TODO(thangp): Switch over to use snapshot.update()
+                        # after cgsnapshot has been switched over to objects
                         self.db.snapshot_update(context, snapshot['id'],
                                                 update)
                         # If status for one snapshot is error, make sure
@@ -2420,15 +2420,18 @@ class VolumeManager(manager.SchedulerDependentManager):
             if vol_ref.bootable:
                 try:
                     self.db.volume_glance_metadata_copy_to_snapshot(
-                        context, snapshot['id'], volume_id)
+                        context, snapshot_id, volume_id)
                 except exception.CinderException as ex:
                     LOG.error(_LE("Failed updating %(snapshot_id)s"
                                   " metadata using the provided volumes"
                                   " %(volume_id)s metadata"),
                               {'volume_id': volume_id,
                                'snapshot_id': snapshot_id})
+
+                    # TODO(thangp): Switch over to use snapshot.update()
+                    # after cgsnapshot has been switched over to objects
                     self.db.snapshot_update(context,
-                                            snapshot['id'],
+                                            snapshot_id,
                                             {'status': 'error'})
                     raise exception.MetadataCopyFailure(
                         reason=six.text_type(ex))
@@ -2456,8 +2459,8 @@ class VolumeManager(manager.SchedulerDependentManager):
 
         LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
 
-        snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
-                                                            cgsnapshot_id)
+        snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
+            context, cgsnapshot_id)
 
         self._notify_about_cgsnapshot_usage(
             context, cgsnapshot_ref, "delete.start")
@@ -2480,6 +2483,9 @@ class VolumeManager(manager.SchedulerDependentManager):
             if snapshots:
                 for snapshot in snapshots:
                     update = {'status': snapshot['status']}
+
+                    # TODO(thangp): Switch over to use snapshot.update()
+                    # after cgsnapshot has been switched over to objects
                     self.db.snapshot_update(context, snapshot['id'],
                                             update)
                     if snapshot['status'] in ['error_deleting', 'error'] and \
@@ -2527,6 +2533,9 @@ class VolumeManager(manager.SchedulerDependentManager):
 
             self.db.volume_glance_metadata_delete_by_snapshot(context,
                                                               snapshot['id'])
+
+            # TODO(thangp): Switch over to use snapshot.destroy()
+            # after cgsnapshot has been switched over to objects
             self.db.snapshot_destroy(context, snapshot['id'])
 
             # Commit the reservations