From: Zhiteng Huang Date: Fri, 17 Apr 2015 16:34:59 +0000 (+0800) Subject: VolMgr: reschedule only when filter_properties has retry X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=217f4dfb13714f583deeaeded66d36fb558e4b02;p=openstack-build%2Fcinder-build.git VolMgr: reschedule only when filter_properties has retry In the task flow for volume manager, create_volume tasks, volume gets reschedule even when scheduler doesn't indicate so. The problem is the flow should not only check 'allow_reschedule' and 'request_specs', but also (more importantly) filter_properties['retry'], which is populated by scheduler if schedule_max_attempts is set to > 1. This checks was there before taskflow was introduced, but somehow the migration missed the check for filter_properties['retry']. This change adds back the check, so scheduler_max_attempts won't be treated like scheduler_max_attempts = infinite. Change-Id: Ia873617b22a2d86662ea8ea2fec5aae7f54a2058 Closes-bug: #1445561 --- diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index f14f8990b..eb9ca327d 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -410,9 +410,11 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume_id, - {'volume_properties': self.volume_params}) - # NOTE(dulek): Volume should be rescheduled as we passed request_spec, - # assert that it wasn't counted in allocated_capacity tracking. + {'volume_properties': self.volume_params}, + {'retry': {'num_attempts': 1, 'host': []}}) + # NOTE(dulek): Volume should be rescheduled as we passed request_spec + # and filter_properties, assert that it wasn't counted in + # allocated_capacity tracking. self.assertEqual({}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) @@ -431,9 +433,11 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(processutils.ProcessExecutionError, self.volume.create_volume, self.context, volume_id, - {'volume_properties': params}) - # NOTE(dulek): Volume should be rescheduled as we passed request_spec, - # assert that it wasn't counted in allocated_capacity tracking. + {'volume_properties': params}, + {'retry': {'num_attempts': 1, 'host': []}}) + # NOTE(dulek): Volume should be rescheduled as we passed request_spec + # and filter_properties, assert that it wasn't counted in + # allocated_capacity tracking. self.assertEqual({}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py index fa0fd5a10..1d9f898bd 100644 --- a/cinder/volume/flows/manager/create_volume.py +++ b/cinder/volume/flows/manager/create_volume.py @@ -757,10 +757,14 @@ def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id, volume_flow.add(ExtractVolumeRefTask(db, host)) - if allow_reschedule and request_spec: + retry = filter_properties.get('retry', None) + if allow_reschedule and request_spec and retry: volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, scheduler_rpcapi)) + LOG.debug("Volume reschedule parameters: %(allow)s " + "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry}) + volume_flow.add(ExtractVolumeSpecTask(db), NotifyVolumeActionTask(db, "create.start"), CreateVolumeFromSpecTask(db, driver),