]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
VolMgr: reschedule only when filter_properties has retry
authorZhiteng Huang <zhithuang@ebaysf.com>
Fri, 17 Apr 2015 16:34:59 +0000 (00:34 +0800)
committerZhiteng Huang <zhithuang@ebaysf.com>
Mon, 20 Apr 2015 04:54:34 +0000 (12:54 +0800)
In the task flow for volume manager, create_volume tasks, volume gets
reschedule even when scheduler doesn't indicate so.  The problem is
the flow should not only check 'allow_reschedule' and 'request_specs',
but also (more importantly) filter_properties['retry'], which is
populated by scheduler if schedule_max_attempts is set to > 1.  This
checks was there before taskflow was introduced, but somehow the
migration missed the check for filter_properties['retry'].

This change adds back the check, so scheduler_max_attempts won't be
treated like scheduler_max_attempts = infinite.

Change-Id: Ia873617b22a2d86662ea8ea2fec5aae7f54a2058
Closes-bug: #1445561
(cherry picked from commit 217f4dfb13714f583deeaeded66d36fb558e4b02)

cinder/tests/test_volume.py
cinder/volume/flows/manager/create_volume.py

index 671bce85f8df8666c148ebec05df6eb09d7ae118..6e234d2a8607f7469251760d8d6d4ec2e06ba50b 100644 (file)
@@ -410,10 +410,12 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertRaises(exception.DriverNotInitialized,
                           self.volume.create_volume,
                           self.context, volume_id,
-                          {'volume_properties': self.volume_params})
-        # NOTE(dulek): Volume should be rescheduled as we passed request_spec,
-        # assert that it wasn't counted in allocated_capacity tracking.
-        self.assertEqual(self.volume.stats['pools'], {})
+                          {'volume_properties': self.volume_params},
+                          {'retry': {'num_attempts': 1, 'host': []}})
+        # NOTE(dulek): Volume should be rescheduled as we passed request_spec
+        # and filter_properties, assert that it wasn't counted in
+        # allocated_capacity tracking.
+        self.assertEqual({}, self.volume.stats['pools'])
 
         db.volume_destroy(context.get_admin_context(), volume_id)
 
@@ -431,10 +433,12 @@ class VolumeTestCase(BaseVolumeTestCase):
             self.assertRaises(processutils.ProcessExecutionError,
                               self.volume.create_volume,
                               self.context, volume_id,
-                              {'volume_properties': params})
-        # NOTE(dulek): Volume should be rescheduled as we passed request_spec,
-        # assert that it wasn't counted in allocated_capacity tracking.
-        self.assertEqual(self.volume.stats['pools'], {})
+                              {'volume_properties': params},
+                              {'retry': {'num_attempts': 1, 'host': []}})
+        # NOTE(dulek): Volume should be rescheduled as we passed request_spec
+        # and filter_properties, assert that it wasn't counted in
+        # allocated_capacity tracking.
+        self.assertEqual({}, self.volume.stats['pools'])
 
         db.volume_destroy(context.get_admin_context(), volume_id)
 
index fa0fd5a10004fdba84e85cc2861290ab0c2c01ab..1d9f898bdf28f34206bfaa3f96d6038e1363d4e1 100644 (file)
@@ -757,10 +757,14 @@ def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
 
     volume_flow.add(ExtractVolumeRefTask(db, host))
 
-    if allow_reschedule and request_spec:
+    retry = filter_properties.get('retry', None)
+    if allow_reschedule and request_spec and retry:
         volume_flow.add(OnFailureRescheduleTask(reschedule_context,
                                                 db, scheduler_rpcapi))
 
+    LOG.debug("Volume reschedule parameters: %(allow)s "
+              "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
+
     volume_flow.add(ExtractVolumeSpecTask(db),
                     NotifyVolumeActionTask(db, "create.start"),
                     CreateVolumeFromSpecTask(db, driver),