]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Added need info to accept_transfer
authorJohn Griffith <john.griffith@solidfire.com>
Fri, 16 Aug 2013 23:12:11 +0000 (17:12 -0600)
committerJohn Griffith <john.griffith@solidfire.com>
Tue, 20 Aug 2013 15:02:33 +0000 (09:02 -0600)
Drivers that implement accept_transfer will need
things like the new user_id and project_id.  We were
not including this in the original add, in order for
drivers that are tenant aware this information will be
necessary.

Also the get_volume call in the transfer was using
the new user context, so the volume would never be
found.  We fix this here by providing an elevated
context to the get_volume call when accepting the
transfer.

Change-Id: I7b60c19950f85c4309a97bb842ff238bcf8e746a
Fixes: bug 1213275
cinder/tests/test_volume_rpcapi.py
cinder/transfer/api.py
cinder/volume/api.py
cinder/volume/driver.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index 79916b97ef47281b8f591cf3dd0818250b0a3c1a..67c40c4d43d1b292433798763ebe48274347310c 100644 (file)
@@ -193,7 +193,11 @@ class VolumeRpcAPITestCase(test.TestCase):
         self._test_volume_api('accept_transfer',
                               rpc_method='cast',
                               volume=self.fake_volume,
-                              version='1.5')
+                              new_user='e5565fd0-06c8-11e3-'
+                                       '8ffd-0800200c9b77',
+                              new_project='e4465fd0-06c8-11e3'
+                                          '-8ffd-0800200c9a66',
+                              version='1.9')
 
     def test_extend_volume(self):
         self._test_volume_api('extend_volume',
index b89aa5990ee5bb5ff8fc5368f89e211258d7eeba..c52f3faa51e12f8af2ce7842645a9a1481b1ab69 100644 (file)
@@ -184,7 +184,10 @@ class API(base.Base):
         try:
             # Transfer ownership of the volume now, must use an elevated
             # context.
-            self.volume_api.accept_transfer(context, vol_ref)
+            self.volume_api.accept_transfer(context,
+                                            vol_ref,
+                                            context.user_id,
+                                            context.project_id)
             self.db.transfer_accept(context.elevated(),
                                     transfer_id,
                                     context.user_id,
index 16a81457741de7c4c318b884ed9afd0ebf8e9760..9ef85270ec572fd2c567cc8f493e42b0e20b1fb8 100644 (file)
@@ -430,9 +430,11 @@ class API(base.Base):
                                                        force)
 
     @wrap_check_policy
-    def accept_transfer(self, context, volume):
+    def accept_transfer(self, context, volume, new_user, new_project):
         return self.volume_rpcapi.accept_transfer(context,
-                                                  volume)
+                                                  volume,
+                                                  new_user,
+                                                  new_project)
 
     def _create_snapshot(self, context,
                          volume, name, description,
index 0a0cd5f36f5d0ac294c9c9e3b373b5bff139634d..9953db4c9396e1f37ea90ef0ff0c2543abd70484 100644 (file)
@@ -613,7 +613,7 @@ class ISCSIDriver(VolumeDriver):
         data['QoS_support'] = False
         self._stats = data
 
-    def accept_transfer(self, volume):
+    def accept_transfer(self, volume, new_user, new_project):
         pass
 
 
index 1784837cf85978203c3816fc7d54685ffa4bd195..f0bba49eddbe7b229943d689544ae4f99e347714 100644 (file)
@@ -114,7 +114,7 @@ MAPPING = {
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.8'
+    RPC_API_VERSION = '1.9'
 
     def __init__(self, volume_driver=None, service_name=None,
                  *args, **kwargs):
@@ -527,9 +527,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         volume_ref = self.db.volume_get(context, volume_id)
         self.driver.terminate_connection(volume_ref, connector, force=force)
 
-    def accept_transfer(self, context, volume_id):
-        volume_ref = self.db.volume_get(context, volume_id)
-        self.driver.accept_transfer(volume_ref)
+    def accept_transfer(self, context, volume_id, new_user, new_project):
+        # NOTE(jdg): need elevated context as we haven't "given" the vol
+        # yet
+        volume_ref = self.db.volume_get(context.elevated(), volume_id)
+        self.driver.accept_transfer(volume_ref, new_user, new_project)
 
     def _migrate_volume_generic(self, ctxt, volume, host):
         rpcapi = volume_rpcapi.VolumeAPI()
index b428f79340b4ddc11d7df5e16148c21a31ab6724..9bd067d04c6a27c9de4073376522f45879474f86 100644 (file)
@@ -43,6 +43,7 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
         1.7 - Adds host_name parameter to attach_volume()
               to allow attaching to host rather than instance.
         1.8 - Add migrate_volume, rename_volume.
+        1.9 - Add new_user and new_project to accept_transfer.
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -137,12 +138,14 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
         self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'),
                          version='1.2')
 
-    def accept_transfer(self, ctxt, volume):
+    def accept_transfer(self, ctxt, volume, new_user, new_project):
         self.cast(ctxt,
                   self.make_msg('accept_transfer',
-                                volume_id=volume['id']),
+                                volume_id=volume['id'],
+                                new_user=new_user,
+                                new_project=new_project),
                   topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.5')
+                  version='1.9')
 
     def extend_volume(self, ctxt, volume, new_size):
         self.cast(ctxt,