]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Send 'create volume from snapshot' to the proper host
authorZhuRongze <zrzhit@gmail.com>
Fri, 13 Jul 2012 12:07:13 +0000 (12:07 +0000)
committerZhuRongze <zrzhit@gmail.com>
Fri, 3 Aug 2012 09:23:34 +0000 (09:23 +0000)
A simple solution for bug 1008866. When creating volume from snapshot on
multicluster, in volume it will check if snapshot_id is set. If snapshot_id
is set, make the call create volume directly to the volume host where the
snapshot resides instead of passing it through the scheduler. So snapshot can
be copy to new volume.

Change-Id: Ie9c1a77f62abc40e294b1d0c604cf885652728da

cinder/volume/api.py
etc/cinder/cinder.conf.sample

index 614b39d816cec053fc3766722b5122420e296991..9784d8719da4a00d077376ceab8465ff32b08dfa 100644 (file)
@@ -26,6 +26,7 @@ from eventlet import greenthread
 
 from cinder import exception
 from cinder import flags
+from cinder.openstack.common import cfg
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import rpc
 import cinder.policy
@@ -34,7 +35,12 @@ from cinder import quota
 from cinder import utils
 from cinder.db import base
 
+volume_host_opt = cfg.BoolOpt('snapshot_same_host',
+        default=True,
+        help='Create volume from snapshot at the host where snapshot resides')
+
 FLAGS = flags.FLAGS
+FLAGS.register_opt(volume_host_opt)
 flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')
 
 LOG = logging.getLogger(__name__)
@@ -113,14 +119,36 @@ class API(base.Base):
             }
 
         volume = self.db.volume_create(context, options)
-        rpc.cast(context,
-                 FLAGS.scheduler_topic,
-                 {"method": "create_volume",
-                  "args": {"topic": FLAGS.volume_topic,
-                           "volume_id": volume['id'],
-                           "snapshot_id": snapshot_id}})
+        self._cast_create_volume(context, volume['id'], snapshot_id)
         return volume
 
+    def _cast_create_volume(self, context, volume_id, snapshot_id):
+
+        # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
+        # If snapshot_id is set, make the call create volume directly to
+        # the volume host where the snapshot resides instead of passing it
+        # through the scheduer. So snapshot can be copy to new volume.
+
+        if snapshot_id and FLAGS.snapshot_same_host:
+            snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+            src_volume_ref = self.db.volume_get(context,
+                                                snapshot_ref['volume_id'])
+            topic = rpc.queue_get_for(context,
+                                      FLAGS.volume_topic,
+                                      src_volume_ref['host'])
+            rpc.cast(context,
+                     topic,
+                     {"method": "create_volume",
+                      "args": {"volume_id": volume_id,
+                               "snapshot_id": snapshot_id}})
+        else:
+            rpc.cast(context,
+                     FLAGS.scheduler_topic,
+                     {"method": "create_volume",
+                      "args": {"topic": FLAGS.volume_topic,
+                               "volume_id": volume_id,
+                               "snapshot_id": snapshot_id}})
+
     # TODO(yamahata): eliminate dumb polling
     def wait_creation(self, context, volume):
         volume_id = volume['id']
index ee97e65039dfeecdd7c1ab0fc9b3f34cd0234c95..3491e9375cbeca551faefd9162a9d76c6f3cbebd 100644 (file)
 ###### (IntOpt) maximum number of volume gigabytes to allow per host
 # max_gigabytes=10000
 
+######## defined in cinder.volume.api ########
+
+# snapshot_same_host=true
+#### (BoolOpt) Create volume form snapshot at the host where snapshot resides.
+
 ######### defined in cinder.volume.driver #########
 
 ###### (StrOpt) iscsi target user-land tool to use