]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove unused arguments from c-vol's create_volume
authorMichal Dulko <michal.dulko@intel.com>
Thu, 9 Jul 2015 07:05:10 +0000 (09:05 +0200)
committerMichal Dulko <michal.dulko@intel.com>
Fri, 24 Jul 2015 09:17:32 +0000 (11:17 +0200)
Arguments snapshot_id, image_id, source_volid, source_replicaid,
consistencygroup_id and cgsnapshot_id in volume.rpcapi.create_volume
RPC method are duplicated either in request_spec dictionary passed as
one of the arguments or are saved into the databased and are retrieved
by flow processing the request.

To simplify the flow this commit removes these duplicated parameters
and adapts rest of the code to use occurrences that are already there.

Related-Blueprint: taskflow-refactoring
Change-Id: I4dbb57968358e8930b923275c2dfd0e44d7b40d8

cinder/scheduler/filter_scheduler.py
cinder/tests/unit/test_rbd.py
cinder/tests/unit/test_volume.py
cinder/tests/unit/test_volume_rpcapi.py
cinder/volume/flows/api/create_volume.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py
cinder/volume/rpcapi.py

index 7ff5c39826e4eee859228f5cdeee0602726c0af5..df2bceef8a0fb37a7c973aacba12094b5d33b4b7 100644 (file)
@@ -90,8 +90,6 @@ class FilterScheduler(driver.Scheduler):
 
         host = weighed_host.obj.host
         volume_id = request_spec['volume_id']
-        snapshot_id = request_spec['snapshot_id']
-        image_id = request_spec['image_id']
 
         updated_volume = driver.volume_update_db(context, volume_id, host)
         self._post_select_populate_filter_properties(filter_properties,
@@ -102,9 +100,7 @@ class FilterScheduler(driver.Scheduler):
 
         self.volume_rpcapi.create_volume(context, updated_volume, host,
                                          request_spec, filter_properties,
-                                         allow_reschedule=True,
-                                         snapshot_id=snapshot_id,
-                                         image_id=image_id)
+                                         allow_reschedule=True)
 
     def host_passes_filters(self, context, host, request_spec,
                             filter_properties):
index a3205ed4a5d985147755450fda1c5e1113f495c8..aca40fe5ad8a1135602137682f3a854a2df03eb0 100644 (file)
@@ -1083,13 +1083,13 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
             if not clone_error:
                 self.volume.create_volume(self.context,
                                           volume_id,
-                                          image_id=image_id)
+                                          request_spec={'image_id': image_id})
             else:
                 self.assertRaises(exception.CinderException,
                                   self.volume.create_volume,
                                   self.context,
                                   volume_id,
-                                  image_id=image_id)
+                                  request_spec={'image_id': image_id})
 
             volume = db.volume_get(self.context, volume_id)
             self.assertEqual(volume['status'], expected_status)
index 18369e836fe1368d5f5c9350243c26845c02b792..2fde332ff084090cc2b97459dab392a137137ea9 100644 (file)
@@ -1028,7 +1028,9 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertTrue(self.volume.delete_volume(self.context, volume_id))
         self.assertTrue(mock_get_volume.called)
 
-    def test_create_volume_from_snapshot(self):
+    @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
+                'create_volume_from_snapshot')
+    def test_create_volume_from_snapshot(self, mock_create_from_snap):
         """Test volume can be created from a snapshot."""
         volume_src = tests_utils.create_volume(self.context,
                                                **self.volume_params)
@@ -1041,7 +1043,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume_dst = tests_utils.create_volume(self.context,
                                                snapshot_id=snapshot_id,
                                                **self.volume_params)
-        self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
+        self.volume.create_volume(self.context, volume_dst['id'])
         self.assertEqual(volume_dst['id'],
                          db.volume_get(
                              context.get_admin_context(),
@@ -1355,7 +1357,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # locked
         self.volume.create_volume(self.context, volume_id=dst_vol_id,
-                                  snapshot_id=snap_id)
+                                  request_spec={'snapshot_id': snap_id})
         self.assertEqual(2, len(self.called))
         self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
         self.assertEqual(snap_id,
@@ -1417,7 +1419,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # locked
         self.volume.create_volume(self.context, volume_id=dst_vol_id,
-                                  source_volid=src_vol_id)
+                                  request_spec={'source_volid': src_vol_id})
         self.assertEqual(2, len(self.called))
         self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
         self.assertEqual(src_vol_id,
@@ -1463,7 +1465,8 @@ class VolumeTestCase(BaseVolumeTestCase):
             # we expect this to block and then fail
             t = eventlet.spawn(self.volume.create_volume,
                                self.context,
-                               volume_id=dst_vol_id, source_volid=src_vol_id)
+                               volume_id=dst_vol_id,
+                               request_spec={'source_volid': src_vol_id})
             gthreads.append(t)
 
             return orig_elevated(*args, **kwargs)
@@ -1515,10 +1518,10 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # create volume from source volume
         dst_vol = tests_utils.create_volume(self.context,
+                                            source_volid=src_vol_id,
                                             **self.volume_params)
         self.volume.create_volume(self.context,
-                                  dst_vol['id'],
-                                  source_volid=src_vol_id)
+                                  dst_vol['id'])
 
         self.assertRaises(exception.GlanceMetadataNotFound,
                           db.volume_glance_metadata_copy_from_volume_to_volume,
@@ -1550,8 +1553,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                                             **self.volume_params)
         self._raise_metadata_copy_failure(
             'volume_glance_metadata_copy_from_volume_to_volume',
-            dst_vol['id'],
-            source_volid=src_vol_id)
+            dst_vol['id'])
 
         # cleanup resource
         db.volume_destroy(self.context, src_vol_id)
@@ -1578,11 +1580,11 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertEqual('available', snapshot_ref)
 
         dst_vol = tests_utils.create_volume(self.context,
+                                            snapshot_id=snapshot_id,
                                             **self.volume_params)
         self._raise_metadata_copy_failure(
             'volume_glance_metadata_copy_to_volume',
-            dst_vol['id'],
-            snapshot_id=snapshot_id)
+            dst_vol['id'])
 
         # cleanup resource
         db.snapshot_destroy(self.context, snapshot_id)
@@ -1609,8 +1611,7 @@ class VolumeTestCase(BaseVolumeTestCase):
                                             **self.volume_params)
         self._raise_metadata_copy_failure(
             'volume_glance_metadata_copy_from_volume_to_volume',
-            dst_vol['id'],
-            source_volid=src_vol_id)
+            dst_vol['id'])
 
         # cleanup resource
         db.volume_destroy(self.context, src_vol_id)
@@ -1640,10 +1641,10 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # create volume from snapshot
         dst_vol = tests_utils.create_volume(self.context,
+                                            snapshot_id=snapshot_id,
                                             **self.volume_params)
         self.volume.create_volume(self.context,
-                                  dst_vol['id'],
-                                  snapshot_id=snapshot_id)
+                                  dst_vol['id'])
 
         self.assertRaises(exception.GlanceMetadataNotFound,
                           db.volume_glance_metadata_copy_to_volume,
@@ -1673,10 +1674,9 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume = db.volume_get(self.context, volume_src['id'])
         volume_dst = tests_utils.create_volume(
             self.context,
-            source_replicaid=volume['id'],
             **self.volume_params)
         self.volume.create_volume(self.context, volume_dst['id'],
-                                  source_replicaid=volume['id'])
+                                  {'source_replicaid': volume['id']})
 
         self.assertRaises(exception.GlanceMetadataNotFound,
                           db.volume_glance_metadata_copy_from_volume_to_volume,
@@ -1708,6 +1708,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         # create vol from snapshot...
         dst_vol = tests_utils.create_volume(self.context,
+                                            snapshot_id=snap_id,
                                             source_volid=src_vol_id,
                                             **self.volume_params)
         dst_vol_id = dst_vol['id']
@@ -1722,7 +1723,8 @@ class VolumeTestCase(BaseVolumeTestCase):
 
             # We expect this to block and then fail
             t = eventlet.spawn(self.volume.create_volume, self.context,
-                               volume_id=dst_vol_id, snapshot_id=snap_id)
+                               volume_id=dst_vol_id,
+                               request_spec={'snapshot_id': snap_id})
             gthreads.append(t)
 
             return orig_elevated(*args, **kwargs)
@@ -3020,7 +3022,8 @@ class VolumeTestCase(BaseVolumeTestCase):
         test_volume = tests_utils.create_volume(
             self.context,
             **self.volume_params)
-        self.volume.create_volume(self.context, test_volume['id'])
+        self.volume.create_volume(self.context, test_volume['id'],
+                                  request_spec={})
         test_volume['status'] = 'available'
         volume_api = cinder.volume.api.API()
         self.assertRaises(exception.QuotaError,
@@ -3414,11 +3417,13 @@ class VolumeTestCase(BaseVolumeTestCase):
                                               **self.volume_params)['id']
         # creating volume testdata
         try:
-            request_spec = {'volume_properties': self.volume_params}
+            request_spec = {
+                'volume_properties': self.volume_params,
+                'image_id': image_id,
+            }
             self.volume.create_volume(self.context,
                                       volume_id,
-                                      request_spec,
-                                      image_id=image_id)
+                                      request_spec)
         finally:
             # cleanup
             os.unlink(dst_path)
@@ -3470,9 +3475,8 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertRaises(exception.ImageNotFound,
                           self.volume.create_volume,
                           self.context,
-                          volume_id, None, None, None,
-                          None,
-                          self.FAKE_UUID)
+                          volume_id,
+                          {'image_id': self.FAKE_UUID})
         volume = db.volume_get(self.context, volume_id)
         self.assertEqual("error", volume['status'])
         self.assertFalse(volume['bootable'])
@@ -3851,10 +3855,9 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.volume.create_volume(self.context, volume_src['id'])
         volume_dst = tests_utils.create_volume(
             self.context,
-            source_replicaid=volume_src['id'],
             **self.volume_params)
         self.volume.create_volume(self.context, volume_dst['id'],
-                                  source_replicaid=volume_src['id'])
+                                  {'source_replicaid': volume_src['id']})
         self.assertEqual('available',
                          db.volume_get(context.get_admin_context(),
                                        volume_dst['id']).status)
@@ -3875,8 +3878,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume_dst = tests_utils.create_volume(self.context,
                                                source_volid=volume_src['id'],
                                                **self.volume_params)
-        self.volume.create_volume(self.context, volume_dst['id'],
-                                  source_volid=volume_src['id'])
+        self.volume.create_volume(self.context, volume_dst['id'])
         self.assertEqual('available',
                          db.volume_get(context.get_admin_context(),
                                        volume_dst['id']).status)
@@ -3925,8 +3927,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         volume_dst = tests_utils.create_volume(self.context,
                                                source_volid=volume_src['id'],
                                                **self.volume_params)
-        self.volume.create_volume(self.context, volume_dst['id'],
-                                  source_volid=volume_src['id'])
+        self.volume.create_volume(self.context, volume_dst['id'])
         self.assertEqual('available',
                          db.volume_get(context.get_admin_context(),
                                        volume_dst['id']).status)
@@ -3958,8 +3959,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertRaises(exception.CinderException,
                           self.volume.create_volume,
                           self.context,
-                          volume_dst['id'], None, None, None, None, None,
-                          volume_src['id'])
+                          volume_dst['id'])
         self.assertEqual('creating', volume_src['status'])
         self.volume.delete_volume(self.context, volume_dst['id'])
         self.volume.delete_volume(self.context, volume_src['id'])
@@ -4794,10 +4794,13 @@ class VolumeTestCase(BaseVolumeTestCase):
     @mock.patch.object(driver.VolumeDriver,
                        "create_consistencygroup_from_src",
                        return_value=(None, None))
+    @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
+                'create_volume_from_snapshot')
     def test_create_consistencygroup_from_src(self, mock_create_from_src,
                                               mock_delete_cgsnap,
                                               mock_create_cgsnap,
-                                              mock_delete_cg, mock_create_cg):
+                                              mock_delete_cg, mock_create_cg,
+                                              mock_create_volume):
         """Test consistencygroup can be created and deleted."""
         group = tests_utils.create_consistencygroup(
             self.context,
index 6cf41376ec3a2a97832ecb83f1f4e9e55bd43a20..8d3d3fc9396632642573d4833150dda8fc08c00d 100644 (file)
@@ -154,13 +154,7 @@ class VolumeRpcAPITestCase(test.TestCase):
                               request_spec='fake_request_spec',
                               filter_properties='fake_properties',
                               allow_reschedule=True,
-                              snapshot_id='fake_snapshot_id',
-                              image_id='fake_image_id',
-                              source_volid='fake_src_id',
-                              source_replicaid='fake_replica_id',
-                              consistencygroup_id='fake_cg_id',
-                              cgsnapshot_id=None,
-                              version='1.4')
+                              version='1.24')
 
     def test_create_volume_serialization(self):
         request_spec = {"metadata": self.fake_volume_metadata}
@@ -171,13 +165,7 @@ class VolumeRpcAPITestCase(test.TestCase):
                               request_spec=request_spec,
                               filter_properties='fake_properties',
                               allow_reschedule=True,
-                              snapshot_id='fake_snapshot_id',
-                              image_id='fake_image_id',
-                              source_volid='fake_src_id',
-                              source_replicaid='fake_replica_id',
-                              consistencygroup_id='fake_cg_id',
-                              cgsnapshot_id=None,
-                              version='1.4')
+                              version='1.24')
 
     def test_delete_volume(self):
         self._test_volume_api('delete_volume',
index ac6bc9b19289e9a303a11c02af0c8fac82463774..5f1a4adfcbb5b22d6f107ba73e1194d430a90a39 100644 (file)
@@ -715,12 +715,7 @@ class VolumeCastTask(flow_utils.CinderTask):
                     volume_ref['host'],
                     request_spec,
                     filter_properties,
-                    allow_reschedule=False,
-                    snapshot_id=snapshot_id,
-                    image_id=image_id,
-                    source_volid=source_volid,
-                    source_replicaid=source_replicaid,
-                    consistencygroup_id=cgroup_id)
+                    allow_reschedule=False)
 
     def execute(self, context, **kwargs):
         scheduler_hints = kwargs.pop('scheduler_hints', None)
index 8e58e0e347fa9c44cc3383c6088b8e3020798412..0f775a639ca94d179d6632fa2db20fa15d2b1172 100644 (file)
@@ -56,8 +56,8 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
 
     def __init__(self, reschedule_context, db, scheduler_rpcapi,
                  do_reschedule):
-        requires = ['filter_properties', 'image_id', 'request_spec',
-                    'snapshot_id', 'volume_id', 'context']
+        requires = ['filter_properties', 'request_spec', 'volume_id',
+                    'context']
         super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
                                                       requires=requires)
         self.do_reschedule = do_reschedule
@@ -89,7 +89,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
         pass
 
     def _reschedule(self, context, cause, request_spec, filter_properties,
-                    snapshot_id, image_id, volume_id, **kwargs):
+                    volume_id):
         """Actions that happen during the rescheduling attempt occur here."""
 
         create_volume = self.scheduler_rpcapi.create_volume
@@ -114,7 +114,6 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
             retry_info['exc'] = traceback.format_exception(*cause.exc_info)
 
         return create_volume(context, CONF.volume_topic, volume_id,
-                             snapshot_id=snapshot_id, image_id=image_id,
                              request_spec=request_spec,
                              filter_properties=filter_properties)
 
@@ -228,13 +227,12 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
     default_provides = 'volume_spec'
 
     def __init__(self, db):
-        requires = ['image_id', 'snapshot_id', 'source_volid',
-                    'source_replicaid']
+        requires = ['volume_ref', 'request_spec']
         super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
                                                     requires=requires)
         self.db = db
 
-    def execute(self, context, volume_ref, **kwargs):
+    def execute(self, context, volume_ref, request_spec):
         get_remote_image_service = glance.get_remote_image_service
 
         volume_name = volume_ref['name']
@@ -254,18 +252,18 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
             'volume_size': volume_size,
         }
 
-        if kwargs.get('snapshot_id'):
+        if volume_ref.get('snapshot_id'):
             # We are making a snapshot based volume instead of a raw volume.
             specs.update({
                 'type': 'snap',
-                'snapshot_id': kwargs['snapshot_id'],
+                'snapshot_id': volume_ref['snapshot_id'],
             })
-        elif kwargs.get('source_volid'):
+        elif volume_ref.get('source_volid'):
             # We are making a source based volume instead of a raw volume.
             #
             # NOTE(harlowja): This will likely fail if the source volume
             # disappeared by the time this call occurred.
-            source_volid = kwargs['source_volid']
+            source_volid = volume_ref.get('source_volid')
             source_volume_ref = self.db.volume_get(context, source_volid)
             specs.update({
                 'source_volid': source_volid,
@@ -275,21 +273,21 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
                 'source_volstatus': source_volume_ref['status'],
                 'type': 'source_vol',
             })
-        elif kwargs.get('source_replicaid'):
+        elif request_spec.get('source_replicaid'):
             # We are making a clone based on the replica.
             #
             # NOTE(harlowja): This will likely fail if the replica
             # disappeared by the time this call occurred.
-            source_volid = kwargs['source_replicaid']
+            source_volid = request_spec['source_replicaid']
             source_volume_ref = self.db.volume_get(context, source_volid)
             specs.update({
                 'source_replicaid': source_volid,
                 'source_replicastatus': source_volume_ref['status'],
                 'type': 'source_replica',
             })
-        elif kwargs.get('image_id'):
+        elif request_spec.get('image_id'):
             # We are making an image based volume instead of a raw volume.
-            image_href = kwargs['image_id']
+            image_href = request_spec['image_id']
             image_service, image_id = get_remote_image_service(context,
                                                                image_href)
             specs.update({
@@ -729,9 +727,7 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
 
 def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
              allow_reschedule, reschedule_context, request_spec,
-             filter_properties, snapshot_id=None, image_id=None,
-             source_volid=None, source_replicaid=None,
-             consistencygroup_id=None, cgsnapshot_id=None):
+             filter_properties):
     """Constructs and returns the manager entrypoint flow.
 
     This flow will do the following:
@@ -756,14 +752,8 @@ def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
     create_what = {
         'context': context,
         'filter_properties': filter_properties,
-        'image_id': image_id,
         'request_spec': request_spec,
-        'snapshot_id': snapshot_id,
-        'source_volid': source_volid,
         'volume_id': volume_id,
-        'source_replicaid': source_replicaid,
-        'consistencygroup_id': consistencygroup_id,
-        'cgsnapshot_id': cgsnapshot_id,
     }
 
     volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
index 3ac4aa59f03c8661306cee85b14a2df3f0024e58..b9c6e21b3dc33b5207e20e759d57c59fcafe0093 100644 (file)
@@ -188,7 +188,7 @@ def locked_snapshot_operation(f):
 class VolumeManager(manager.SchedulerDependentManager):
     """Manages attachable block storage devices."""
 
-    RPC_API_VERSION = '1.23'
+    RPC_API_VERSION = '1.24'
 
     target = messaging.Target(version=RPC_API_VERSION)
 
@@ -406,16 +406,16 @@ class VolumeManager(manager.SchedulerDependentManager):
         return self.driver.initialized
 
     def create_volume(self, context, volume_id, request_spec=None,
-                      filter_properties=None, allow_reschedule=True,
-                      snapshot_id=None, image_id=None, source_volid=None,
-                      source_replicaid=None, consistencygroup_id=None,
-                      cgsnapshot_id=None):
+                      filter_properties=None, allow_reschedule=True):
 
         """Creates the volume."""
         context_elevated = context.elevated()
         if filter_properties is None:
             filter_properties = {}
 
+        if request_spec is None:
+            request_spec = {}
+
         try:
             # NOTE(flaper87): Driver initialization is
             # verified by the task itself.
@@ -429,18 +429,16 @@ class VolumeManager(manager.SchedulerDependentManager):
                 allow_reschedule,
                 context,
                 request_spec,
-                filter_properties,
-                snapshot_id=snapshot_id,
-                image_id=image_id,
-                source_volid=source_volid,
-                source_replicaid=source_replicaid,
-                consistencygroup_id=consistencygroup_id,
-                cgsnapshot_id=cgsnapshot_id)
+                filter_properties)
         except Exception:
             msg = _("Create manager volume flow failed.")
             LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
             raise exception.CinderException(msg)
 
+        snapshot_id = request_spec.get('snapshot_id')
+        source_volid = request_spec.get('source_volid')
+        source_replicaid = request_spec.get('source_replicaid')
+
         if snapshot_id is not None:
             # Make sure the snapshot is not deleted until we are done with it.
             locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
index a8212ecead71fd538cac1f04d024e6562b610288..04812f5681d29de09c6a323ef25db913f6996b77 100644 (file)
@@ -63,7 +63,11 @@ class VolumeAPI(object):
                and delete_snapshot()
         1.21 - Adds update_consistencygroup.
         1.22 - Adds create_consistencygroup_from_src.
-        1.23 - Adds attachment_id to detach_volume
+        1.23 - Adds attachment_id to detach_volume.
+        1.24 - Removed duplicated parameters: snapshot_id, image_id,
+               source_volid, source_replicaid, consistencygroup_id and
+               cgsnapshot_id from create_volume. All off them are already
+               passed either in request_spec or available in the DB.
     '''
 
     BASE_RPC_API_VERSION = '1.0'
@@ -73,7 +77,7 @@ class VolumeAPI(object):
         target = messaging.Target(topic=CONF.volume_topic,
                                   version=self.BASE_RPC_API_VERSION)
         serializer = objects_base.CinderObjectSerializer()
-        self.client = rpc.get_client(target, '1.23', serializer=serializer)
+        self.client = rpc.get_client(target, '1.24', serializer=serializer)
 
     def create_consistencygroup(self, ctxt, group, host):
         new_host = utils.extract_host(host)
@@ -118,29 +122,16 @@ class VolumeAPI(object):
         cctxt.cast(ctxt, 'delete_cgsnapshot',
                    cgsnapshot_id=cgsnapshot['id'])
 
-    def create_volume(self, ctxt, volume, host,
-                      request_spec, filter_properties,
-                      allow_reschedule=True,
-                      snapshot_id=None, image_id=None,
-                      source_replicaid=None,
-                      source_volid=None,
-                      consistencygroup_id=None,
-                      cgsnapshot_id=None):
-
+    def create_volume(self, ctxt, volume, host, request_spec,
+                      filter_properties, allow_reschedule=True):
         new_host = utils.extract_host(host)
-        cctxt = self.client.prepare(server=new_host, version='1.4')
+        cctxt = self.client.prepare(server=new_host, version='1.24')
         request_spec_p = jsonutils.to_primitive(request_spec)
         cctxt.cast(ctxt, 'create_volume',
                    volume_id=volume['id'],
                    request_spec=request_spec_p,
                    filter_properties=filter_properties,
-                   allow_reschedule=allow_reschedule,
-                   snapshot_id=snapshot_id,
-                   image_id=image_id,
-                   source_replicaid=source_replicaid,
-                   source_volid=source_volid,
-                   consistencygroup_id=consistencygroup_id,
-                   cgsnapshot_id=cgsnapshot_id)
+                   allow_reschedule=allow_reschedule)
 
     def delete_volume(self, ctxt, volume, unmanage_only=False):
         new_host = utils.extract_host(volume['host'])