]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Create structure of flows' packages
authorAnastasia Karpinska <akarpinska@griddynamics.com>
Wed, 18 Dec 2013 11:34:23 +0000 (13:34 +0200)
committerAnastasia Karpinska <akarpinska@griddynamics.com>
Fri, 24 Jan 2014 10:44:56 +0000 (12:44 +0200)
1. Create_volume flows were moved to separate files from __init__.py.
   In future each flow should be implemented in a separate file.

2. Add flows.common package for tasks that could be used by different flows.

3. Proposed file structure for flows is more clean and helps to find and reuse
common code.

4. _exception_to_unicode function and tests were removed because never used.

Partially implements: blueprint create-volume-flow

Change-Id: I63473f549f0c501fe0f373830bc1080239d01892

12 files changed:
cinder/flow_utils.py [moved from cinder/volume/flows/base.py with 95% similarity]
cinder/scheduler/flows/__init__.py [new file with mode: 0644]
cinder/scheduler/flows/create_volume.py [new file with mode: 0644]
cinder/scheduler/manager.py
cinder/tests/test_create_volume_flow.py
cinder/tests/test_rbd.py
cinder/volume/api.py
cinder/volume/flows/api/create_volume.py
cinder/volume/flows/common.py [new file with mode: 0644]
cinder/volume/flows/manager/__init__.py [new file with mode: 0644]
cinder/volume/flows/manager/create_volume.py [new file with mode: 0644]
cinder/volume/manager.py

similarity index 95%
rename from cinder/volume/flows/base.py
rename to cinder/flow_utils.py
index fa6aa80b97c15ed227c9100153ac67487e168bf4..05a89c32e1071edddf4ac150433c205e19c11dd2 100644 (file)
@@ -1,5 +1,3 @@
-#    Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
diff --git a/cinder/scheduler/flows/__init__.py b/cinder/scheduler/flows/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py
new file mode 100644 (file)
index 0000000..805622a
--- /dev/null
@@ -0,0 +1,161 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow import task
+
+from cinder import exception
+from cinder import flow_utils
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder.openstack.common.notifier import api as notifier
+from cinder import utils
+from cinder.volume.flows import common
+
+LOG = logging.getLogger(__name__)
+
+ACTION = 'volume:create'
+
+
+class ExtractSchedulerSpecTask(flow_utils.CinderTask):
+    """Extracts a spec object from a partial and/or incomplete request spec.
+
+    Reversion strategy: N/A
+    """
+
+    default_provides = set(['request_spec'])
+
+    def __init__(self, db, **kwargs):
+        super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
+                                                       **kwargs)
+        self.db = db
+
+    def _populate_request_spec(self, context, volume_id, snapshot_id,
+                               image_id):
+        # Create the full request spec using the volume_id.
+        #
+        # NOTE(harlowja): this will fetch the volume from the database, if
+        # the volume has been deleted before we got here then this should fail.
+        #
+        # In the future we might want to have a lock on the volume_id so that
+        # the volume can not be deleted while its still being created?
+        if not volume_id:
+            msg = _("No volume_id provided to populate a request_spec from")
+            raise exception.InvalidInput(reason=msg)
+        volume_ref = self.db.volume_get(context, volume_id)
+        volume_type_id = volume_ref.get('volume_type_id')
+        vol_type = self.db.volume_type_get(context, volume_type_id)
+        return {
+            'volume_id': volume_id,
+            'snapshot_id': snapshot_id,
+            'image_id': image_id,
+            'volume_properties': {
+                'size': utils.as_int(volume_ref.get('size'), quiet=False),
+                'availability_zone': volume_ref.get('availability_zone'),
+                'volume_type_id': volume_type_id,
+            },
+            'volume_type': list(dict(vol_type).iteritems()),
+        }
+
+    def execute(self, context, request_spec, volume_id, snapshot_id,
+                image_id):
+        # For RPC version < 1.2 backward compatibility
+        if request_spec is None:
+            request_spec = self._populate_request_spec(context, volume_id,
+                                                       snapshot_id, image_id)
+        return {
+            'request_spec': request_spec,
+        }
+
+
+def get_flow(context, db, driver, request_spec=None,
+             filter_properties=None,
+             volume_id=None, snapshot_id=None, image_id=None):
+
+    """Constructs and returns the scheduler entrypoint flow.
+
+    This flow will do the following:
+
+    1. Inject keys & values for dependent tasks.
+    2. Extracts a scheduler specification from the provided inputs.
+    3. Attaches 2 activated only on *failure* tasks (one to update the db
+       status and one to notify on the MQ of the failure that occured).
+    4. Uses provided driver to to then select and continue processing of
+       volume request.
+    """
+    create_what = {
+        'context': context,
+        'raw_request_spec': request_spec,
+        'filter_properties': filter_properties,
+        'volume_id': volume_id,
+        'snapshot_id': snapshot_id,
+        'image_id': image_id,
+    }
+
+    flow_name = ACTION.replace(":", "_") + "_scheduler"
+    scheduler_flow = linear_flow.Flow(flow_name)
+
+    # This will extract and clean the spec from the starting values.
+    scheduler_flow.add(ExtractSchedulerSpecTask(
+        db,
+        rebind={'request_spec': 'raw_request_spec'}))
+
+    def schedule_create_volume(context, request_spec, filter_properties):
+
+        def _log_failure(cause):
+            LOG.error(_("Failed to schedule_create_volume: %(cause)s") %
+                      {'cause': cause})
+
+        def _notify_failure(cause):
+            """When scheduling fails send out a event that it failed."""
+            topic = "scheduler.create_volume"
+            payload = {
+                'request_spec': request_spec,
+                'volume_properties': request_spec.get('volume_properties', {}),
+                'volume_id': volume_id,
+                'state': 'error',
+                'method': 'create_volume',
+                'reason': cause,
+            }
+            try:
+                publisher_id = notifier.publisher_id("scheduler")
+                notifier.notify(context, publisher_id, topic, notifier.ERROR,
+                                payload)
+            except exception.CinderException:
+                LOG.exception(_("Failed notifying on %(topic)s "
+                                "payload %(payload)s") % {'topic': topic,
+                                                          'payload': payload})
+
+        try:
+            driver.schedule_create_volume(context, request_spec,
+                                          filter_properties)
+        except exception.NoValidHost as e:
+            # Not host found happened, notify on the scheduler queue and log
+            # that this happened and set the volume to errored out and
+            # *do not* reraise the error (since whats the point).
+            _notify_failure(e)
+            _log_failure(e)
+            common.error_out_volume(context, db, volume_id, reason=e)
+        except Exception as e:
+            # Some other error happened, notify on the scheduler queue and log
+            # that this happened and set the volume to errored out and
+            # *do* reraise the error.
+            with excutils.save_and_reraise_exception():
+                _notify_failure(e)
+                _log_failure(e)
+                common.error_out_volume(context, db, volume_id, reason=e)
+
+    scheduler_flow.add(task.FunctorTask(schedule_create_volume))
+
+    # Now load (but do not run) the flow using the provided initial data.
+    return taskflow.engines.load(scheduler_flow, store=create_what)
index d6aa77e48ea64e50ede05874e454bc3c5a98e901..b7a25cc63d5f539218de02535d1b2df6c6205837 100644 (file)
@@ -30,7 +30,7 @@ from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common.notifier import api as notifier
 from cinder import quota
-from cinder.volume.flows.api import create_volume
+from cinder.scheduler.flows import create_volume
 from cinder.volume import rpcapi as volume_rpcapi
 
 
@@ -87,13 +87,13 @@ class SchedulerManager(manager.Manager):
                       filter_properties=None):
 
         try:
-            flow_engine = create_volume.get_scheduler_flow(context,
-                                                           db, self.driver,
-                                                           request_spec,
-                                                           filter_properties,
-                                                           volume_id,
-                                                           snapshot_id,
-                                                           image_id)
+            flow_engine = create_volume.get_flow(context,
+                                                 db, self.driver,
+                                                 request_spec,
+                                                 filter_properties,
+                                                 volume_id,
+                                                 snapshot_id,
+                                                 image_id)
         except Exception:
             LOG.exception(_("Failed to create scheduler manager volume flow"))
             raise exception.CinderException(
index 3b5dc0215a9c65a6c3e9b60b540a10045a031383..13a3b8691b324664f988295d8b2a71992b99f5f0 100644 (file)
@@ -78,27 +78,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
         self.counter = float(0)
         self.stubs.Set(time, 'time', self.time_inc)
 
-    def test_exception_to_unicode(self):
-        class FakeException(Exception):
-            def __str__(self):
-                raise UnicodeError()
-
-        exc = Exception('error message')
-        ret = create_volume._exception_to_unicode(exc)
-        self.assertEqual(unicode, type(ret))
-        self.assertEqual(ret, 'error message')
-
-        exc = Exception('\xa5 error message')
-        ret = create_volume._exception_to_unicode(exc)
-        self.assertEqual(unicode, type(ret))
-        self.assertEqual(ret, ' error message')
-
-        unicodeExc = FakeException('\xa5 error message')
-        ret = create_volume._exception_to_unicode(unicodeExc)
-        self.assertEqual(unicode, type(ret))
-        self.assertEqual(ret, _("Caught '%(exception)s' exception.") %
-                         {'exception': 'FakeException'})
-
     def test_cast_create_volume(self):
 
         props = {}
@@ -107,9 +86,10 @@ class CreateVolumeFlowTestCase(test.TestCase):
                 'snapshot_id': None,
                 'image_id': None}
 
-        task = create_volume.VolumeCastTask(fake_scheduler_rpc_api(spec, self),
-                                            fake_volume_api(spec, self),
-                                            fake_db())
+        task = create_volume.VolumeCastTask(
+            fake_scheduler_rpc_api(spec, self),
+            fake_volume_api(spec, self),
+            fake_db())
 
         task._cast_create_volume(self.ctxt, spec, props)
 
@@ -118,9 +98,10 @@ class CreateVolumeFlowTestCase(test.TestCase):
                 'snapshot_id': 3,
                 'image_id': 4}
 
-        task = create_volume.VolumeCastTask(fake_scheduler_rpc_api(spec, self),
-                                            fake_volume_api(spec, self),
-                                            fake_db())
+        task = create_volume.VolumeCastTask(
+            fake_scheduler_rpc_api(spec, self),
+            fake_volume_api(spec, self),
+            fake_db())
 
         task._cast_create_volume(self.ctxt, spec, props)
 
index 2edf62c1cd77bba90bb7ac7a3a06bb83fcaf3312..b70ffbfbb46146271e03ed28ca6fa5a917119fae 100644 (file)
@@ -34,7 +34,7 @@ from cinder.tests.test_volume import DriverTestCase
 from cinder import units
 from cinder.volume import configuration as conf
 import cinder.volume.drivers.rbd as driver
-from cinder.volume.flows.api import create_volume
+from cinder.volume.flows.manager import create_volume
 
 
 LOG = logging.getLogger(__name__)
index 5991d571bfbd49f1f6da60a047ca6053b8796ccf..cc4dc55101474b809e539ff6be249a2074cb6504 100644 (file)
@@ -161,12 +161,12 @@ class API(base.Base):
         }
 
         try:
-            flow_engine = create_volume.get_api_flow(self.scheduler_rpcapi,
-                                                     self.volume_rpcapi,
-                                                     self.db,
-                                                     self.image_service,
-                                                     check_volume_az_zone,
-                                                     create_what)
+            flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
+                                                 self.volume_rpcapi,
+                                                 self.db,
+                                                 self.image_service,
+                                                 check_volume_az_zone,
+                                                 create_what)
         except Exception:
             LOG.exception(_("Failed to create api volume flow"))
             raise exception.CinderException(
index 35aad8daecb9e5a2de258f565442285d574c8403..4aad49e341a289beaa58a9ae44c33d1c35689494 100644 (file)
@@ -1,11 +1,3 @@
-# -*- coding: utf-8 -*-
-
-#    Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
-#    Copyright (c) 2013 OpenStack Foundation
-#    Copyright 2010 United States Government as represented by the
-#    Administrator of the National Aeronautics and Space Administration.
-#    All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import traceback
 
 from oslo.config import cfg
 import taskflow.engines
 from taskflow.patterns import linear_flow
-from taskflow import task
 from taskflow.utils import misc
 
 from cinder import exception
-from cinder.image import glance
-from cinder.openstack.common import excutils
+from cinder import flow_utils
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier
-from cinder.openstack.common import processutils
-from cinder.openstack.common import strutils
 from cinder.openstack.common import timeutils
 from cinder import policy
 from cinder import quota
 from cinder import units
 from cinder import utils
-from cinder.volume.flows import base
-from cinder.volume import utils as volume_utils
+from cinder.volume.flows import common
 from cinder.volume import volume_types
 
 LOG = logging.getLogger(__name__)
@@ -55,100 +40,8 @@ QUOTAS = quota.QUOTAS
 SNAPSHOT_PROCEED_STATUS = ('available',)
 SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
 
-# When a volume errors out we have the ability to save a piece of the exception
-# that caused said failure, but we don't want to save the whole message since
-# that could be very large, just save up to this number of characters.
-REASON_LENGTH = 128
-
-# These attributes we will attempt to save for the volume if they exist
-# in the source image metadata.
-IMAGE_ATTRIBUTES = (
-    'checksum',
-    'container_format',
-    'disk_format',
-    'min_disk',
-    'min_ram',
-    'size',
-)
-
-
-def _make_pretty_name(method):
-    """Makes a pretty name for a function/method."""
-    meth_pieces = [method.__name__]
-    # If its an instance method attempt to tack on the class name
-    if hasattr(method, 'im_self') and method.im_self is not None:
-        try:
-            meth_pieces.insert(0, method.im_self.__class__.__name__)
-        except AttributeError:
-            pass
-    return ".".join(meth_pieces)
-
-
-def _restore_source_status(context, db, volume_spec):
-    # NOTE(harlowja): Only if the type of the volume that was being created is
-    # the source volume type should we try to reset the source volume status
-    # back to its original value.
-    if not volume_spec or volume_spec.get('type') != 'source_vol':
-        return
-    source_volid = volume_spec['source_volid']
-    source_status = volume_spec['source_volstatus']
-    try:
-        LOG.debug(_('Restoring source %(source_volid)s status to %(status)s') %
-                  {'status': source_status, 'source_volid': source_volid})
-        db.volume_update(context, source_volid, {'status': source_status})
-    except exception.CinderException:
-        # NOTE(harlowja): Don't let this cause further exceptions since this is
-        # a non-critical failure.
-        LOG.exception(_("Failed setting source volume %(source_volid)s back to"
-                        " its initial %(source_status)s status") %
-                      {'source_status': source_status,
-                       'source_volid': source_volid})
-
-
-def _error_out_volume(context, db, volume_id, reason=None):
-
-    def _clean_reason(reason):
-        if reason is None:
-            return '???'
-        reason = str(reason)
-        if len(reason) <= REASON_LENGTH:
-            return reason
-        else:
-            return reason[0:REASON_LENGTH] + '...'
-
-    update = {
-        'status': 'error',
-    }
-    reason = _clean_reason(reason)
-    # TODO(harlowja): re-enable when we can support this in the database.
-    # if reason:
-    #     status['details'] = reason
-    try:
-        LOG.debug(_('Updating volume: %(volume_id)s with %(update)s'
-                    ' due to: %(reason)s') % {'volume_id': volume_id,
-                                              'reason': reason,
-                                              'update': update})
-        db.volume_update(context, volume_id, update)
-    except exception.CinderException:
-        # Don't let this cause further exceptions.
-        LOG.exception(_("Failed updating volume %(volume_id)s with"
-                        " %(update)s") % {'volume_id': volume_id,
-                                          'update': update})
-
-
-def _exception_to_unicode(exc):
-    try:
-        return unicode(exc)
-    except UnicodeError:
-        try:
-            return strutils.safe_decode(str(exc), errors='ignore')
-        except UnicodeError:
-            msg = (_("Caught '%(exception)s' exception.") %
-                   {"exception": exc.__class__.__name__})
-            return strutils.safe_decode(msg, errors='ignore')
-
 
-class ExtractVolumeRequestTask(base.CinderTask):
+class ExtractVolumeRequestTask(flow_utils.CinderTask):
     """Processes an api request values into a validated set of values.
 
     This tasks responsibility is to take in a set of inputs that will form
@@ -269,7 +162,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
         size = utils.as_int(size)
         LOG.debug("Validating volume %(size)s using %(functors)s" %
                   {'size': size,
-                   'functors': ", ".join([_make_pretty_name(func)
+                   'functors': ", ".join([common.make_pretty_name(func)
                                           for func in validator_functors])})
         for func in validator_functors:
             func(size)
@@ -492,7 +385,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
         }
 
 
-class EntryCreateTask(base.CinderTask):
+class EntryCreateTask(flow_utils.CinderTask):
     """Creates an entry for the given volume creation in the database.
 
     Reversion strategy: remove the volume_id created from the database.
@@ -570,7 +463,7 @@ class EntryCreateTask(base.CinderTask):
             LOG.exception(_("Failed destroying volume entry %s"), vol_id)
 
 
-class QuotaReserveTask(base.CinderTask):
+class QuotaReserveTask(flow_utils.CinderTask):
     """Reserves a single volume with the given size & the given volume type.
 
     Reversion strategy: rollback the quota reservation.
@@ -654,7 +547,7 @@ class QuotaReserveTask(base.CinderTask):
                             " %s reservations"), reservations)
 
 
-class QuotaCommitTask(base.CinderTask):
+class QuotaCommitTask(flow_utils.CinderTask):
     """Commits the reservation.
 
     Reversion strategy: N/A (the rollback will be handled by the task that did
@@ -698,7 +591,7 @@ class QuotaCommitTask(base.CinderTask):
                           volume['id'])
 
 
-class VolumeCastTask(base.CinderTask):
+class VolumeCastTask(flow_utils.CinderTask):
     """Performs a volume create cast to the scheduler or to the volume manager.
 
     This which will signal a transition of the api workflow to another child
@@ -780,8 +673,8 @@ class VolumeCastTask(base.CinderTask):
 
         # Restore the source volume status and set the volume to error status.
         volume_id = kwargs['volume_id']
-        _restore_source_status(context, self.db, kwargs)
-        _error_out_volume(context, self.db, volume_id)
+        common.restore_source_status(context, self.db, kwargs)
+        common.error_out_volume(context, self.db, volume_id)
         LOG.error(_("Volume %s: create failed"), volume_id)
         exc_info = False
         if all(flow_failures[-1].exc_info):
@@ -789,735 +682,10 @@ class VolumeCastTask(base.CinderTask):
         LOG.error(_('Unexpected build error:'), exc_info=exc_info)
 
 
-class OnFailureChangeStatusTask(base.CinderTask):
-    """Helper task that sets a volume id to status error.
-
-    Reversion strategy: On failure of any flow that includes this task the
-    volume id that is associated with this task will be have its status set
-    to error. If a volume specification is provided and the type of that spec
-    is a source volume said source volume will have its status status updated
-    as well.
-    """
-
-    def __init__(self, db):
-        super(OnFailureChangeStatusTask, self).__init__(addons=[ACTION])
-        self.db = db
-
-    def execute(self, context, volume_id, volume_spec):
-        # Save these items since we only use them if a reversion is triggered.
-        return {
-            'volume_id': volume_id,
-            'volume_spec': volume_spec,
-        }
-
-    def revert(self, context, result, flow_failures, **kwargs):
-        if isinstance(result, misc.Failure):
-            return
-        volume_spec = result.get('volume_spec')
-
-        # Restore the source volume status and set the volume to error status.
-        volume_id = result['volume_id']
-        _restore_source_status(context, self.db, volume_spec)
-        _error_out_volume(context, self.db, volume_id)
-        LOG.error(_("Volume %s: create failed"), volume_id)
-
-
-class OnFailureRescheduleTask(base.CinderTask):
-    """Triggers a rescheduling request to be sent when reverting occurs.
-
-    Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
-    sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
-    this volume elsewhere.
-    """
-
-    def __init__(self, reschedule_context, db, scheduler_rpcapi):
-        requires = ['filter_properties', 'image_id', 'request_spec',
-                    'snapshot_id', 'volume_id', 'context']
-        super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
-                                                      requires=requires)
-        self.scheduler_rpcapi = scheduler_rpcapi
-        self.db = db
-        self.reschedule_context = reschedule_context
-        # These exception types will trigger the volume to be set into error
-        # status rather than being rescheduled.
-        self.no_reschedule_types = [
-            # The volume has already finished being created when the exports
-            # occur, rescheduling would be bad if it happened due to exports
-            # not succeeding.
-            exception.ExportFailure,
-            # Image copying happens after volume creation so rescheduling due
-            # to copy failure will mean the same volume will be created at
-            # another place when it still exists locally.
-            exception.ImageCopyFailure,
-            # Metadata updates happen after the volume has been created so if
-            # they fail, rescheduling will likely attempt to create the volume
-            # on another machine when it still exists locally.
-            exception.MetadataCopyFailure,
-            exception.MetadataCreateFailure,
-            exception.MetadataUpdateFailure,
-            # The volume/snapshot has been removed from the database, that
-            # can not be fixed by rescheduling.
-            exception.VolumeNotFound,
-            exception.SnapshotNotFound,
-            exception.VolumeTypeNotFound,
-            exception.ImageUnacceptable,
-        ]
-
-    def execute(self, **kwargs):
-        pass
-
-    def _reschedule(self, context, cause, request_spec, filter_properties,
-                    snapshot_id, image_id, volume_id, **kwargs):
-        """Actions that happen during the rescheduling attempt occur here."""
-
-        create_volume = self.scheduler_rpcapi.create_volume
-        if not filter_properties:
-            filter_properties = {}
-        if 'retry' not in filter_properties:
-            filter_properties['retry'] = {}
-
-        retry_info = filter_properties['retry']
-        num_attempts = retry_info.get('num_attempts', 0)
-        request_spec['volume_id'] = volume_id
-
-        LOG.debug(_("Volume %(volume_id)s: re-scheduling %(method)s "
-                    "attempt %(num)d due to %(reason)s") %
-                  {'volume_id': volume_id,
-                   'method': _make_pretty_name(create_volume),
-                   'num': num_attempts,
-                   'reason': cause.exception_str})
-
-        if all(cause.exc_info):
-            # Stringify to avoid circular ref problem in json serialization
-            retry_info['exc'] = traceback.format_exception(*cause.exc_info)
-
-        return create_volume(context, CONF.volume_topic, volume_id,
-                             snapshot_id=snapshot_id, image_id=image_id,
-                             request_spec=request_spec,
-                             filter_properties=filter_properties)
-
-    def _post_reschedule(self, context, volume_id):
-        """Actions that happen after the rescheduling attempt occur here."""
-
-        LOG.debug(_("Volume %s: re-scheduled"), volume_id)
-
-    def _pre_reschedule(self, context, volume_id):
-        """Actions that happen before the rescheduling attempt occur here."""
-
-        try:
-            # Reset the volume state.
-            #
-            # NOTE(harlowja): this is awkward to be done here, shouldn't
-            # this happen at the scheduler itself and not before it gets
-            # sent to the scheduler? (since what happens if it never gets
-            # there??). It's almost like we need a status of 'on-the-way-to
-            # scheduler' in the future.
-            update = {
-                'status': 'creating',
-                'scheduled_at': timeutils.utcnow(),
-            }
-            LOG.debug(_("Updating volume %(volume_id)s with %(update)s") %
-                      {'update': update, 'volume_id': volume_id})
-            self.db.volume_update(context, volume_id, update)
-        except exception.CinderException:
-            # Don't let resetting the status cause the rescheduling to fail.
-            LOG.exception(_("Volume %s: resetting 'creating' status failed"),
-                          volume_id)
-
-    def revert(self, context, result, flow_failures, **kwargs):
-        # Check if we have a cause which can tell us not to reschedule.
-        for failure in flow_failures.values():
-            if failure.check(*self.no_reschedule_types):
-                return
-
-        volume_id = kwargs['volume_id']
-        # Use a different context when rescheduling.
-        if self.reschedule_context:
-            context = self.reschedule_context
-            try:
-                cause = list(flow_failures.values())[0]
-                self._pre_reschedule(context, volume_id)
-                self._reschedule(context, cause, **kwargs)
-                self._post_reschedule(context, volume_id)
-            except exception.CinderException:
-                LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
-
-
-class ExtractSchedulerSpecTask(base.CinderTask):
-    """Extracts a spec object from a partial and/or incomplete request spec.
-
-    Reversion strategy: N/A
-    """
-
-    default_provides = set(['request_spec'])
-
-    def __init__(self, db, **kwargs):
-        super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
-                                                       **kwargs)
-        self.db = db
-
-    def _populate_request_spec(self, context, volume_id, snapshot_id,
-                               image_id):
-        # Create the full request spec using the volume_id.
-        #
-        # NOTE(harlowja): this will fetch the volume from the database, if
-        # the volume has been deleted before we got here then this should fail.
-        #
-        # In the future we might want to have a lock on the volume_id so that
-        # the volume can not be deleted while its still being created?
-        if not volume_id:
-            msg = _("No volume_id provided to populate a request_spec from")
-            raise exception.InvalidInput(reason=msg)
-        volume_ref = self.db.volume_get(context, volume_id)
-        volume_type_id = volume_ref.get('volume_type_id')
-        vol_type = self.db.volume_type_get(context, volume_type_id)
-        return {
-            'volume_id': volume_id,
-            'snapshot_id': snapshot_id,
-            'image_id': image_id,
-            'volume_properties': {
-                'size': utils.as_int(volume_ref.get('size'), quiet=False),
-                'availability_zone': volume_ref.get('availability_zone'),
-                'volume_type_id': volume_type_id,
-            },
-            'volume_type': list(dict(vol_type).iteritems()),
-        }
-
-    def execute(self, context, request_spec, volume_id, snapshot_id,
-                image_id):
-        # For RPC version < 1.2 backward compatibility
-        if request_spec is None:
-            request_spec = self._populate_request_spec(context, volume_id,
-                                                       snapshot_id, image_id)
-        return {
-            'request_spec': request_spec,
-        }
-
-
-class ExtractVolumeRefTask(base.CinderTask):
-    """Extracts volume reference for given volume id."""
-
-    default_provides = 'volume_ref'
-
-    def __init__(self, db):
-        super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
-        self.db = db
-
-    def execute(self, context, volume_id):
-        # NOTE(harlowja): this will fetch the volume from the database, if
-        # the volume has been deleted before we got here then this should fail.
-        #
-        # In the future we might want to have a lock on the volume_id so that
-        # the volume can not be deleted while its still being created?
-        volume_ref = self.db.volume_get(context, volume_id)
-
-        return volume_ref
-
-    def revert(self, context, volume_id, result, **kwargs):
-        if isinstance(result, misc.Failure):
-            return
-
-        _error_out_volume(context, self.db, volume_id)
-        LOG.error(_("Volume %s: create failed"), volume_id)
-
-
-class ExtractVolumeSpecTask(base.CinderTask):
-    """Extracts a spec of a volume to be created into a common structure.
-
-    This task extracts and organizes the input requirements into a common
-    and easier to analyze structure for later tasks to use. It will also
-    attach the underlying database volume reference which can be used by
-    other tasks to reference for further details about the volume to be.
-
-    Reversion strategy: N/A
-    """
-
-    default_provides = 'volume_spec'
-
-    def __init__(self, db):
-        requires = ['image_id', 'snapshot_id', 'source_volid']
-        super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
-                                                    requires=requires)
-        self.db = db
-
-    def execute(self, context, volume_ref, **kwargs):
-        get_remote_image_service = glance.get_remote_image_service
-
-        volume_name = volume_ref['name']
-        volume_size = utils.as_int(volume_ref['size'], quiet=False)
-
-        # Create a dictionary that will represent the volume to be so that
-        # later tasks can easily switch between the different types and create
-        # the volume according to the volume types specifications (which are
-        # represented in this dictionary).
-        specs = {
-            'status': volume_ref['status'],
-            'type': 'raw',  # This will have the type of the volume to be
-                            # created, which should be one of [raw, snap,
-                            # source_vol, image]
-            'volume_id': volume_ref['id'],
-            'volume_name': volume_name,
-            'volume_size': volume_size,
-        }
-
-        if kwargs.get('snapshot_id'):
-            # We are making a snapshot based volume instead of a raw volume.
-            specs.update({
-                'type': 'snap',
-                'snapshot_id': kwargs['snapshot_id'],
-            })
-        elif kwargs.get('source_volid'):
-            # We are making a source based volume instead of a raw volume.
-            #
-            # NOTE(harlowja): This will likely fail if the source volume
-            # disappeared by the time this call occurred.
-            source_volid = kwargs['source_volid']
-            source_volume_ref = self.db.volume_get(context, source_volid)
-            specs.update({
-                'source_volid': source_volid,
-                # This is captured incase we have to revert and we want to set
-                # back the source volume status to its original status. This
-                # may or may not be sketchy to do??
-                'source_volstatus': source_volume_ref['status'],
-                'type': 'source_vol',
-            })
-        elif kwargs.get('image_id'):
-            # We are making a image based volume instead of a raw volume.
-            image_href = kwargs['image_id']
-            image_service, image_id = get_remote_image_service(context,
-                                                               image_href)
-            specs.update({
-                'type': 'image',
-                'image_id': image_id,
-                'image_location': image_service.get_location(context,
-                                                             image_id),
-                'image_meta': image_service.show(context, image_id),
-                # Instead of refetching the image service later just save it.
-                #
-                # NOTE(harlowja): if we have to later recover this tasks output
-                # on another 'node' that this object won't be able to be
-                # serialized, so we will have to recreate this object on
-                # demand in the future.
-                'image_service': image_service,
-            })
-
-        return specs
-
-    def revert(self, context, result, **kwargs):
-        if isinstance(result, misc.Failure):
-            return
-        volume_spec = result.get('volume_spec')
-        # Restore the source volume status and set the volume to error status.
-        _restore_source_status(context, self.db, volume_spec)
-
-
-class NotifyVolumeActionTask(base.CinderTask):
-    """Performs a notification about the given volume when called.
-
-    Reversion strategy: N/A
-    """
-
-    def __init__(self, db, host, event_suffix):
-        super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
-                                                             event_suffix])
-        self.db = db
-        self.event_suffix = event_suffix
-        self.host = host
-
-    def execute(self, context, volume_ref):
-        volume_id = volume_ref['id']
-        try:
-            volume_utils.notify_about_volume_usage(context, volume_ref,
-                                                   self.event_suffix,
-                                                   host=self.host)
-        except exception.CinderException:
-            # If notification sending of volume database entry reading fails
-            # then we shouldn't error out the whole workflow since this is
-            # not always information that must be sent for volumes to operate
-            LOG.exception(_("Failed notifying about the volume"
-                            " action %(event)s for volume %(volume_id)s") %
-                          {'event': self.event_suffix,
-                           'volume_id': volume_id})
-
-
-class CreateVolumeFromSpecTask(base.CinderTask):
-    """Creates a volume from a provided specification.
-
-    Reversion strategy: N/A
-    """
-
-    default_provides = 'volume'
-
-    def __init__(self, db, host, driver):
-        super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
-        self.db = db
-        self.driver = driver
-        # This maps the different volume specification types into the methods
-        # that can create said volume type (aka this is a jump table).
-        self._create_func_mapping = {
-            'raw': self._create_raw_volume,
-            'snap': self._create_from_snapshot,
-            'source_vol': self._create_from_source_volume,
-            'image': self._create_from_image,
-        }
-        self.host = host
-
-    def _handle_bootable_volume_glance_meta(self, context, volume_id,
-                                            **kwargs):
-        """Enable bootable flag and properly handle glance metadata.
-
-        Caller should provide one and only one of snapshot_id,source_volid
-        and image_id. If an image_id specified, a image_meta should also be
-        provided, otherwise will be treated as an empty dictionary.
-        """
-
-        log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
-                         "%(vol_id)s")
-        exception_template = _("Failed updating volume %(vol_id)s metadata"
-                               " using the provided %(src_type)s"
-                               " %(src_id)s metadata")
-        src_type = None
-        src_id = None
-        self._enable_bootable_flag(context, volume_id)
-        try:
-            if kwargs.get('snapshot_id'):
-                src_type = 'snapshot'
-                src_id = kwargs['snapshot_id']
-                snapshot_id = src_id
-                LOG.debug(log_template % {'src_type': src_type,
-                                          'src_id': src_id,
-                                          'vol_id': volume_id})
-                self.db.volume_glance_metadata_copy_to_volume(
-                    context, volume_id, snapshot_id)
-            elif kwargs.get('source_volid'):
-                src_type = 'source volume'
-                src_id = kwargs['source_volid']
-                source_volid = src_id
-                LOG.debug(log_template % {'src_type': src_type,
-                                          'src_id': src_id,
-                                          'vol_id': volume_id})
-                self.db.volume_glance_metadata_copy_from_volume_to_volume(
-                    context,
-                    source_volid,
-                    volume_id)
-            elif kwargs.get('image_id'):
-                src_type = 'image'
-                src_id = kwargs['image_id']
-                image_id = src_id
-                image_meta = kwargs.get('image_meta', {})
-                LOG.debug(log_template % {'src_type': src_type,
-                                          'src_id': src_id,
-                                          'vol_id': volume_id})
-                self._capture_volume_image_metadata(context, volume_id,
-                                                    image_id, image_meta)
-        except exception.CinderException as ex:
-            LOG.exception(exception_template % {'src_type': src_type,
-                                                'src_id': src_id,
-                                                'vol_id': volume_id})
-            raise exception.MetadataCopyFailure(reason=ex)
-
-    def _create_from_snapshot(self, context, volume_ref, snapshot_id,
-                              **kwargs):
-        volume_id = volume_ref['id']
-        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
-        model_update = self.driver.create_volume_from_snapshot(volume_ref,
-                                                               snapshot_ref)
-        # NOTE(harlowja): Subtasks would be useful here since after this
-        # point the volume has already been created and further failures
-        # will not destroy the volume (although they could in the future).
-        make_bootable = False
-        try:
-            originating_vref = self.db.volume_get(context,
-                                                  snapshot_ref['volume_id'])
-            make_bootable = originating_vref.bootable
-        except exception.CinderException as ex:
-            LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
-                            " flag using the provided glance snapshot "
-                            "%(snapshot_ref_id)s volume reference") %
-                          {'snapshot_id': snapshot_id,
-                           'snapshot_ref_id': snapshot_ref['volume_id']})
-            raise exception.MetadataUpdateFailure(reason=ex)
-        if make_bootable:
-            self._handle_bootable_volume_glance_meta(context, volume_id,
-                                                     snapshot_id=snapshot_id)
-        return model_update
-
-    def _enable_bootable_flag(self, context, volume_id):
-        try:
-            LOG.debug(_('Marking volume %s as bootable'), volume_id)
-            self.db.volume_update(context, volume_id, {'bootable': True})
-        except exception.CinderException as ex:
-            LOG.exception(_("Failed updating volume %(volume_id)s bootable"
-                            " flag to true") % {'volume_id': volume_id})
-            raise exception.MetadataUpdateFailure(reason=ex)
-
-    def _create_from_source_volume(self, context, volume_ref,
-                                   source_volid, **kwargs):
-        # NOTE(harlowja): if the source volume has disappeared this will be our
-        # detection of that since this database call should fail.
-        #
-        # NOTE(harlowja): likely this is not the best place for this to happen
-        # and we should have proper locks on the source volume while actions
-        # that use the source volume are underway.
-        srcvol_ref = self.db.volume_get(context, source_volid)
-        model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
-        # NOTE(harlowja): Subtasks would be useful here since after this
-        # point the volume has already been created and further failures
-        # will not destroy the volume (although they could in the future).
-        if srcvol_ref.bootable:
-            self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
-                                                     source_volid=source_volid)
-        return model_update
-
-    def _copy_image_to_volume(self, context, volume_ref,
-                              image_id, image_location, image_service):
-        """Downloads Glance image to the specified volume."""
-        copy_image_to_volume = self.driver.copy_image_to_volume
-        volume_id = volume_ref['id']
-        LOG.debug(_("Attempting download of %(image_id)s (%(image_location)s)"
-                    " to volume %(volume_id)s") %
-                  {'image_id': image_id, 'volume_id': volume_id,
-                   'image_location': image_location})
-        try:
-            copy_image_to_volume(context, volume_ref, image_service, image_id)
-        except processutils.ProcessExecutionError as ex:
-            LOG.error(_("Failed to copy image %(image_id)s to volume: "
-                        "%(volume_id)s, error: %(error)s") %
-                      {'volume_id': volume_id,
-                       'error': ex.stderr, 'image_id': image_id})
-            raise exception.ImageCopyFailure(reason=ex.stderr)
-        except exception.ImageUnacceptable as ex:
-            LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
-                        "error: %(error)s") % {'volume_id': volume_id,
-                                               'error': ex})
-            raise exception.ImageUnacceptable(ex)
-        except Exception as ex:
-            LOG.error(_("Failed to copy image %(image_id)s to "
-                        "volume: %(volume_id)s, error: %(error)s") %
-                      {'volume_id': volume_id, 'error': ex,
-                       'image_id': image_id})
-            if not isinstance(ex, exception.ImageCopyFailure):
-                raise exception.ImageCopyFailure(reason=ex)
-            else:
-                raise
-
-        LOG.debug(_("Downloaded image %(image_id)s (%(image_location)s)"
-                    " to volume %(volume_id)s successfully") %
-                  {'image_id': image_id, 'volume_id': volume_id,
-                   'image_location': image_location})
-
-    def _capture_volume_image_metadata(self, context, volume_id,
-                                       image_id, image_meta):
-
-        # Save some base attributes into the volume metadata
-        base_metadata = {
-            'image_id': image_id,
-        }
-        name = image_meta.get('name', None)
-        if name:
-            base_metadata['image_name'] = name
-
-        # Save some more attributes into the volume metadata from the image
-        # metadata
-        for key in IMAGE_ATTRIBUTES:
-            if key not in image_meta:
-                continue
-            value = image_meta.get(key, None)
-            if value is not None:
-                base_metadata[key] = value
-
-        # Save all the image metadata properties into the volume metadata
-        property_metadata = {}
-        image_properties = image_meta.get('properties', {})
-        for (key, value) in image_properties.items():
-            if value is not None:
-                property_metadata[key] = value
-
-        # NOTE(harlowja): The best way for this to happen would be in bulk,
-        # but that doesn't seem to exist (yet), so we go through one by one
-        # which means we can have partial create/update failure.
-        volume_metadata = dict(property_metadata)
-        volume_metadata.update(base_metadata)
-        LOG.debug(_("Creating volume glance metadata for volume %(volume_id)s"
-                    " backed by image %(image_id)s with: %(vol_metadata)s") %
-                  {'volume_id': volume_id, 'image_id': image_id,
-                   'vol_metadata': volume_metadata})
-        for (key, value) in volume_metadata.items():
-            try:
-                self.db.volume_glance_metadata_create(context, volume_id,
-                                                      key, value)
-            except exception.GlanceMetadataExists:
-                pass
-
-    def _create_from_image(self, context, volume_ref,
-                           image_location, image_id, image_meta,
-                           image_service, **kwargs):
-        LOG.debug(_("Cloning %(volume_id)s from image %(image_id)s "
-                    " at location %(image_location)s") %
-                  {'volume_id': volume_ref['id'],
-                   'image_location': image_location, 'image_id': image_id})
-        # Create the volume from an image.
-        #
-        # NOTE (singn): two params need to be returned
-        # dict containing provider_location for cloned volume
-        # and clone status.
-        model_update, cloned = self.driver.clone_image(
-            volume_ref, image_location, image_id, image_meta)
-        if not cloned:
-            # TODO(harlowja): what needs to be rolled back in the clone if this
-            # volume create fails?? Likely this should be a subflow or broken
-            # out task in the future. That will bring up the question of how
-            # do we make said subflow/task which is only triggered in the
-            # clone image 'path' resumable and revertable in the correct
-            # manner.
-            #
-            # Create the volume and then download the image onto the volume.
-            model_update = self.driver.create_volume(volume_ref)
-            updates = dict(model_update or dict(), status='downloading')
-            try:
-                volume_ref = self.db.volume_update(context,
-                                                   volume_ref['id'], updates)
-            except exception.CinderException:
-                LOG.exception(_("Failed updating volume %(volume_id)s with "
-                                "%(updates)s") %
-                              {'volume_id': volume_ref['id'],
-                               'updates': updates})
-            self._copy_image_to_volume(context, volume_ref,
-                                       image_id, image_location, image_service)
-
-        self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
-                                                 image_id=image_id,
-                                                 image_meta=image_meta)
-        return model_update
-
-    def _create_raw_volume(self, context, volume_ref, **kwargs):
-        return self.driver.create_volume(volume_ref)
-
-    def execute(self, context, volume_ref, volume_spec):
-        volume_spec = dict(volume_spec)
-        volume_id = volume_spec.pop('volume_id', None)
-
-        # we can't do anything if the driver didn't init
-        if not self.driver.initialized:
-            driver_name = self.driver.__class__.__name__
-            LOG.error(_("Unable to create volume. "
-                        "Volume driver %s not initialized") % driver_name)
-
-            # NOTE(flaper87): Set the error status before
-            # raising any exception.
-            self.db.volume_update(context, volume_id, dict(status='error'))
-            raise exception.DriverNotInitialized()
-
-        create_type = volume_spec.pop('type', None)
-        create_functor = self._create_func_mapping.get(create_type)
-        if not create_functor:
-            raise exception.VolumeTypeNotFound(volume_type_id=create_type)
-
-        if not volume_id:
-            volume_id = volume_ref['id']
-        LOG.info(_("Volume %(volume_id)s: being created using %(functor)s "
-                   "with specification: %(volume_spec)s") %
-                 {'volume_spec': volume_spec, 'volume_id': volume_id,
-                  'functor': _make_pretty_name(create_functor)})
-
-        # NOTE(vish): so we don't have to get volume from db again before
-        # passing it to the driver.
-        volume_ref['host'] = self.host
-
-        # Call the given functor to make the volume.
-        model_update = create_functor(context, volume_ref=volume_ref,
-                                      **volume_spec)
-
-        # Persist any model information provided on creation.
-        try:
-            if model_update:
-                volume_ref = self.db.volume_update(context, volume_ref['id'],
-                                                   model_update)
-        except exception.CinderException as ex:
-            # If somehow the update failed we want to ensure that the
-            # failure is logged (but not try rescheduling since the volume at
-            # this point has been created).
-            if model_update:
-                LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                                " with creation provided model %(model)s") %
-                              {'volume_id': volume_id, 'model': model_update})
-                raise exception.ExportFailure(reason=ex)
-
-        # Persist any driver exported model information.
-        model_update = None
-        try:
-            LOG.debug(_("Volume %s: creating export"), volume_ref['id'])
-            model_update = self.driver.create_export(context, volume_ref)
-            if model_update:
-                self.db.volume_update(context, volume_ref['id'], model_update)
-        except exception.CinderException as ex:
-            # If somehow the read *or* create export failed we want to ensure
-            # that the failure is logged (but not try rescheduling since
-            # the volume at this point has been created).
-            #
-            # NOTE(harlowja): Notice that since the model_update is initially
-            # empty, the only way it will still be empty is if there is no
-            # model_update (which we don't care about) or there was an
-            # model_update and updating failed.
-            if model_update:
-                LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                              " with driver provided model %(model)s") %
-                              {'volume_id': volume_id, 'model': model_update})
-                raise exception.ExportFailure(reason=ex)
-
-        return volume_ref
-
-
-class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
-    """On successful volume creation this will perform final volume actions.
-
-    When a volume is created successfully it is expected that MQ notifications
-    and database updates will occur to 'signal' to others that the volume is
-    now ready for usage. This task does those notifications and updates in a
-    reliable manner (not re-raising exceptions if said actions can not be
-    triggered).
-
-    Reversion strategy: N/A
-    """
-
-    def __init__(self, db, host, event_suffix):
-        super(CreateVolumeOnFinishTask, self).__init__(db, host, event_suffix)
-        self.status_translation = {
-            'migration_target_creating': 'migration_target',
-        }
-
-    def execute(self, context, volume, volume_spec):
-        volume_id = volume['id']
-        new_status = self.status_translation.get(volume_spec.get('status'),
-                                                 'available')
-        update = {
-            'status': new_status,
-            'launched_at': timeutils.utcnow(),
-        }
-        try:
-            # TODO(harlowja): is it acceptable to only log if this fails??
-            # or are there other side-effects that this will cause if the
-            # status isn't updated correctly (aka it will likely be stuck in
-            # 'building' if this fails)??
-            volume_ref = self.db.volume_update(context, volume_id, update)
-            # Now use the parent to notify.
-            super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
-        except exception.CinderException:
-            LOG.exception(_("Failed updating volume %(volume_id)s with "
-                            "%(update)s") % {'volume_id': volume_id,
-                                             'update': update})
-        # Even if the update fails, the volume is ready.
-        msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
-        LOG.info(msg % {
-            'volume_name': volume_spec['volume_name'],
-            'volume_id': volume_id,
-        })
-
-
-def get_api_flow(scheduler_rpcapi, volume_rpcapi, db,
-                 image_service,
-                 az_check_functor,
-                 create_what):
+def get_flow(scheduler_rpcapi, volume_rpcapi, db,
+             image_service,
+             az_check_functor,
+             create_what):
     """Constructs and returns the api entrypoint flow.
 
     This flow will do the following:
@@ -1549,136 +717,3 @@ def get_api_flow(scheduler_rpcapi, volume_rpcapi, db,
 
     # Now load (but do not run) the flow using the provided initial data.
     return taskflow.engines.load(api_flow, store=create_what)
-
-
-def get_scheduler_flow(context, db, driver, request_spec=None,
-                       filter_properties=None,
-                       volume_id=None, snapshot_id=None, image_id=None):
-
-    """Constructs and returns the scheduler entrypoint flow.
-
-    This flow will do the following:
-
-    1. Inject keys & values for dependent tasks.
-    2. Extracts a scheduler specification from the provided inputs.
-    3. Attaches 2 activated only on *failure* tasks (one to update the db
-       status and one to notify on the MQ of the failure that occurred).
-    4. Uses provided driver to to then select and continue processing of
-       volume request.
-    """
-    create_what = {
-        'context': context,
-        'raw_request_spec': request_spec,
-        'filter_properties': filter_properties,
-        'volume_id': volume_id,
-        'snapshot_id': snapshot_id,
-        'image_id': image_id,
-    }
-
-    flow_name = ACTION.replace(":", "_") + "_scheduler"
-    scheduler_flow = linear_flow.Flow(flow_name)
-
-    # This will extract and clean the spec from the starting values.
-    scheduler_flow.add(ExtractSchedulerSpecTask(
-        db,
-        rebind={'request_spec': 'raw_request_spec'}))
-
-    def schedule_create_volume(context, request_spec, filter_properties):
-
-        def _log_failure(cause):
-            LOG.error(_("Failed to schedule_create_volume: %(cause)s") %
-                      {'cause': cause})
-
-        def _notify_failure(cause):
-            """When scheduling fails send out a event that it failed."""
-            topic = "scheduler.create_volume"
-            payload = {
-                'request_spec': request_spec,
-                'volume_properties': request_spec.get('volume_properties', {}),
-                'volume_id': volume_id,
-                'state': 'error',
-                'method': 'create_volume',
-                'reason': cause,
-            }
-            try:
-                publisher_id = notifier.publisher_id("scheduler")
-                notifier.notify(context, publisher_id, topic, notifier.ERROR,
-                                payload)
-            except exception.CinderException:
-                LOG.exception(_("Failed notifying on %(topic)s "
-                                "payload %(payload)s") % {'topic': topic,
-                                                          'payload': payload})
-
-        try:
-            driver.schedule_create_volume(context, request_spec,
-                                          filter_properties)
-        except exception.NoValidHost as e:
-            # Not host found happened, notify on the scheduler queue and log
-            # that this happened and set the volume to errored out and
-            # *do not* reraise the error (since whats the point).
-            _notify_failure(e)
-            _log_failure(e)
-            _error_out_volume(context, db, volume_id, reason=e)
-        except Exception as e:
-            # Some other error happened, notify on the scheduler queue and log
-            # that this happened and set the volume to errored out and
-            # *do* reraise the error.
-            with excutils.save_and_reraise_exception():
-                _notify_failure(e)
-                _log_failure(e)
-                _error_out_volume(context, db, volume_id, reason=e)
-
-    scheduler_flow.add(task.FunctorTask(schedule_create_volume))
-
-    # Now load (but do not run) the flow using the provided initial data.
-    return taskflow.engines.load(scheduler_flow, store=create_what)
-
-
-def get_manager_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
-                     allow_reschedule, reschedule_context, request_spec,
-                     filter_properties, snapshot_id=None, image_id=None,
-                     source_volid=None):
-    """Constructs and returns the manager entrypoint flow.
-
-    This flow will do the following:
-
-    1. Determines if rescheduling is enabled (ahead of time).
-    2. Inject keys & values for dependent tasks.
-    3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
-       status & notify or one to update the db status & notify & *reschedule*).
-    4. Extracts a volume specification from the provided inputs.
-    5. Notifies that the volume has start to be created.
-    6. Creates a volume from the extracted volume specification.
-    7. Attaches a on-success *only* task that notifies that the volume creation
-       has ended and performs further database status updates.
-    """
-
-    flow_name = ACTION.replace(":", "_") + "_manager"
-    volume_flow = linear_flow.Flow(flow_name)
-
-    # This injects the initial starting flow values into the workflow so that
-    # the dependency order of the tasks provides/requires can be correctly
-    # determined.
-    create_what = {
-        'context': context,
-        'filter_properties': filter_properties,
-        'image_id': image_id,
-        'request_spec': request_spec,
-        'snapshot_id': snapshot_id,
-        'source_volid': source_volid,
-        'volume_id': volume_id,
-    }
-
-    volume_flow.add(ExtractVolumeRefTask(db))
-
-    if allow_reschedule and request_spec:
-        volume_flow.add(OnFailureRescheduleTask(reschedule_context,
-                                                db, scheduler_rpcapi))
-
-    volume_flow.add(ExtractVolumeSpecTask(db),
-                    NotifyVolumeActionTask(db, host, "create.start"),
-                    CreateVolumeFromSpecTask(db, host, driver),
-                    CreateVolumeOnFinishTask(db, host, "create.end"))
-
-    # Now load (but do not run) the flow using the provided initial data.
-    return taskflow.engines.load(volume_flow, store=create_what)
diff --git a/cinder/volume/flows/common.py b/cinder/volume/flows/common.py
new file mode 100644 (file)
index 0000000..f5e34d4
--- /dev/null
@@ -0,0 +1,92 @@
+#    Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
+#    Copyright (c) 2013 OpenStack Foundation
+#    Copyright 2010 United States Government as represented by the
+#    Administrator of the National Aeronautics and Space Administration.
+#    All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+# When a volume errors out we have the ability to save a piece of the exception
+# that caused said failure, but we don't want to save the whole message since
+# that could be very large, just save up to this number of characters.
+REASON_LENGTH = 128
+
+
+def make_pretty_name(method):
+    """Makes a pretty name for a function/method."""
+    meth_pieces = [method.__name__]
+    # If its an instance method attempt to tack on the class name
+    if hasattr(method, 'im_self') and method.im_self is not None:
+        try:
+            meth_pieces.insert(0, method.im_self.__class__.__name__)
+        except AttributeError:
+            pass
+    return ".".join(meth_pieces)
+
+
+def restore_source_status(context, db, volume_spec):
+    # NOTE(harlowja): Only if the type of the volume that was being created is
+    # the source volume type should we try to reset the source volume status
+    # back to its original value.
+    if not volume_spec or volume_spec.get('type') != 'source_vol':
+        return
+    source_volid = volume_spec['source_volid']
+    source_status = volume_spec['source_volstatus']
+    try:
+        LOG.debug(_('Restoring source %(source_volid)s status to %(status)s') %
+                  {'status': source_status, 'source_volid': source_volid})
+        db.volume_update(context, source_volid, {'status': source_status})
+    except exception.CinderException:
+        # NOTE(harlowja): Don't let this cause further exceptions since this is
+        # a non-critical failure.
+        LOG.exception(_("Failed setting source volume %(source_volid)s back to"
+                        " its initial %(source_status)s status") %
+                      {'source_status': source_status,
+                       'source_volid': source_volid})
+
+
+def error_out_volume(context, db, volume_id, reason=None):
+
+    def _clean_reason(reason):
+        if reason is None:
+            return '???'
+        reason = str(reason)
+        if len(reason) <= REASON_LENGTH:
+            return reason
+        else:
+            return reason[0:REASON_LENGTH] + '...'
+
+    update = {
+        'status': 'error',
+    }
+    reason = _clean_reason(reason)
+    # TODO(harlowja): re-enable when we can support this in the database.
+    # if reason:
+    #     status['details'] = reason
+    try:
+        LOG.debug(_('Updating volume: %(volume_id)s with %(update)s'
+                    ' due to: %(reason)s') % {'volume_id': volume_id,
+                                              'reason': reason,
+                                              'update': update})
+        db.volume_update(context, volume_id, update)
+    except exception.CinderException:
+        # Don't let this cause further exceptions.
+        LOG.exception(_("Failed updating volume %(volume_id)s with"
+                        " %(update)s") % {'volume_id': volume_id,
+                                          'update': update})
diff --git a/cinder/volume/flows/manager/__init__.py b/cinder/volume/flows/manager/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
new file mode 100644 (file)
index 0000000..de3cd33
--- /dev/null
@@ -0,0 +1,785 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import traceback
+
+from oslo.config import cfg
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow.utils import misc
+
+from cinder import exception
+from cinder import flow_utils
+from cinder.image import glance
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import processutils
+from cinder.openstack.common import timeutils
+from cinder import utils
+from cinder.volume.flows import common
+from cinder.volume import utils as volume_utils
+
+LOG = logging.getLogger(__name__)
+
+ACTION = 'volume:create'
+CONF = cfg.CONF
+
+# These attributes we will attempt to save for the volume if they exist
+# in the source image metadata.
+IMAGE_ATTRIBUTES = (
+    'checksum',
+    'container_format',
+    'disk_format',
+    'min_disk',
+    'min_ram',
+    'size',
+)
+
+
+class OnFailureRescheduleTask(flow_utils.CinderTask):
+    """Triggers a rescheduling request to be sent when reverting occurs.
+
+    Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
+    sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
+    this volume elsewhere.
+    """
+
+    def __init__(self, reschedule_context, db, scheduler_rpcapi):
+        requires = ['filter_properties', 'image_id', 'request_spec',
+                    'snapshot_id', 'volume_id', 'context']
+        super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
+                                                      requires=requires)
+        self.scheduler_rpcapi = scheduler_rpcapi
+        self.db = db
+        self.reschedule_context = reschedule_context
+        # These exception types will trigger the volume to be set into error
+        # status rather than being rescheduled.
+        self.no_reschedule_types = [
+            # The volume has already finished being created when the exports
+            # occur, rescheduling would be bad if it happened due to exports
+            # not succeeding.
+            exception.ExportFailure,
+            # Image copying happens after volume creation so rescheduling due
+            # to copy failure will mean the same volume will be created at
+            # another place when it still exists locally.
+            exception.ImageCopyFailure,
+            # Metadata updates happen after the volume has been created so if
+            # they fail, rescheduling will likely attempt to create the volume
+            # on another machine when it still exists locally.
+            exception.MetadataCopyFailure,
+            exception.MetadataCreateFailure,
+            exception.MetadataUpdateFailure,
+            # The volume/snapshot has been removed from the database, that
+            # can not be fixed by rescheduling.
+            exception.VolumeNotFound,
+            exception.SnapshotNotFound,
+            exception.VolumeTypeNotFound,
+            exception.ImageUnacceptable,
+        ]
+
+    def execute(self, **kwargs):
+        pass
+
+    def _reschedule(self, context, cause, request_spec, filter_properties,
+                    snapshot_id, image_id, volume_id, **kwargs):
+        """Actions that happen during the rescheduling attempt occur here."""
+
+        create_volume = self.scheduler_rpcapi.create_volume
+        if not filter_properties:
+            filter_properties = {}
+        if 'retry' not in filter_properties:
+            filter_properties['retry'] = {}
+
+        retry_info = filter_properties['retry']
+        num_attempts = retry_info.get('num_attempts', 0)
+        request_spec['volume_id'] = volume_id
+
+        LOG.debug(_("Volume %(volume_id)s: re-scheduling %(method)s "
+                    "attempt %(num)d due to %(reason)s") %
+                  {'volume_id': volume_id,
+                   'method': common.make_pretty_name(create_volume),
+                   'num': num_attempts,
+                   'reason': cause.exception_str})
+
+        if all(cause.exc_info):
+            # Stringify to avoid circular ref problem in json serialization
+            retry_info['exc'] = traceback.format_exception(*cause.exc_info)
+
+        return create_volume(context, CONF.volume_topic, volume_id,
+                             snapshot_id=snapshot_id, image_id=image_id,
+                             request_spec=request_spec,
+                             filter_properties=filter_properties)
+
+    def _post_reschedule(self, context, volume_id):
+        """Actions that happen after the rescheduling attempt occur here."""
+
+        LOG.debug(_("Volume %s: re-scheduled"), volume_id)
+
+    def _pre_reschedule(self, context, volume_id):
+        """Actions that happen before the rescheduling attempt occur here."""
+
+        try:
+            # Reset the volume state.
+            #
+            # NOTE(harlowja): this is awkward to be done here, shouldn't
+            # this happen at the scheduler itself and not before it gets
+            # sent to the scheduler? (since what happens if it never gets
+            # there??). It's almost like we need a status of 'on-the-way-to
+            # scheduler' in the future.
+            update = {
+                'status': 'creating',
+                'scheduled_at': timeutils.utcnow(),
+            }
+            LOG.debug(_("Updating volume %(volume_id)s with %(update)s.") %
+                      {'update': update, 'volume_id': volume_id})
+            self.db.volume_update(context, volume_id, update)
+        except exception.CinderException:
+            # Don't let resetting the status cause the rescheduling to fail.
+            LOG.exception(_("Volume %s: resetting 'creating' status failed."),
+                          volume_id)
+
+    def revert(self, context, result, flow_failures, **kwargs):
+        # Check if we have a cause which can tell us not to reschedule.
+        for failure in flow_failures.values():
+            if failure.check(*self.no_reschedule_types):
+                return
+
+        volume_id = kwargs['volume_id']
+        # Use a different context when rescheduling.
+        if self.reschedule_context:
+            context = self.reschedule_context
+            try:
+                cause = list(flow_failures.values())[0]
+                self._pre_reschedule(context, volume_id)
+                self._reschedule(context, cause, **kwargs)
+                self._post_reschedule(context, volume_id)
+            except exception.CinderException:
+                LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
+
+
+class ExtractSchedulerSpecTask(flow_utils.CinderTask):
+    """Extracts a spec object from a partial and/or incomplete request spec.
+
+    Reversion strategy: N/A
+    """
+
+    default_provides = set(['request_spec'])
+
+    def __init__(self, db, **kwargs):
+        super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
+                                                       **kwargs)
+        self.db = db
+
+    def _populate_request_spec(self, context, volume_id, snapshot_id,
+                               image_id):
+        # Create the full request spec using the volume_id.
+        #
+        # NOTE(harlowja): this will fetch the volume from the database, if
+        # the volume has been deleted before we got here then this should fail.
+        #
+        # In the future we might want to have a lock on the volume_id so that
+        # the volume can not be deleted while its still being created?
+        if not volume_id:
+            msg = _("No volume_id provided to populate a request_spec from")
+            raise exception.InvalidInput(reason=msg)
+        volume_ref = self.db.volume_get(context, volume_id)
+        volume_type_id = volume_ref.get('volume_type_id')
+        vol_type = self.db.volume_type_get(context, volume_type_id)
+        return {
+            'volume_id': volume_id,
+            'snapshot_id': snapshot_id,
+            'image_id': image_id,
+            'volume_properties': {
+                'size': utils.as_int(volume_ref.get('size'), quiet=False),
+                'availability_zone': volume_ref.get('availability_zone'),
+                'volume_type_id': volume_type_id,
+            },
+            'volume_type': list(dict(vol_type).iteritems()),
+        }
+
+    def execute(self, context, request_spec, volume_id, snapshot_id,
+                image_id):
+        # For RPC version < 1.2 backward compatibility
+        if request_spec is None:
+            request_spec = self._populate_request_spec(context, volume_id,
+                                                       snapshot_id, image_id)
+        return {
+            'request_spec': request_spec,
+        }
+
+
+class ExtractVolumeRefTask(flow_utils.CinderTask):
+    """Extracts volume reference for given volume id."""
+
+    default_provides = 'volume_ref'
+
+    def __init__(self, db):
+        super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
+        self.db = db
+
+    def execute(self, context, volume_id):
+        # NOTE(harlowja): this will fetch the volume from the database, if
+        # the volume has been deleted before we got here then this should fail.
+        #
+        # In the future we might want to have a lock on the volume_id so that
+        # the volume can not be deleted while its still being created?
+        volume_ref = self.db.volume_get(context, volume_id)
+
+        return volume_ref
+
+    def revert(self, context, volume_id, result, **kwargs):
+        if isinstance(result, misc.Failure):
+            return
+
+        common.error_out_volume(context, self.db, volume_id)
+        LOG.error(_("Volume %s: create failed"), volume_id)
+
+
+class ExtractVolumeSpecTask(flow_utils.CinderTask):
+    """Extracts a spec of a volume to be created into a common structure.
+
+    This task extracts and organizes the input requirements into a common
+    and easier to analyze structure for later tasks to use. It will also
+    attach the underlying database volume reference which can be used by
+    other tasks to reference for further details about the volume to be.
+
+    Reversion strategy: N/A
+    """
+
+    default_provides = 'volume_spec'
+
+    def __init__(self, db):
+        requires = ['image_id', 'snapshot_id', 'source_volid']
+        super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
+                                                    requires=requires)
+        self.db = db
+
+    def execute(self, context, volume_ref, **kwargs):
+        get_remote_image_service = glance.get_remote_image_service
+
+        volume_name = volume_ref['name']
+        volume_size = utils.as_int(volume_ref['size'], quiet=False)
+
+        # Create a dictionary that will represent the volume to be so that
+        # later tasks can easily switch between the different types and create
+        # the volume according to the volume types specifications (which are
+        # represented in this dictionary).
+        specs = {
+            'status': volume_ref['status'],
+            'type': 'raw',  # This will have the type of the volume to be
+                            # created, which should be one of [raw, snap,
+                            # source_vol, image]
+            'volume_id': volume_ref['id'],
+            'volume_name': volume_name,
+            'volume_size': volume_size,
+        }
+
+        if kwargs.get('snapshot_id'):
+            # We are making a snapshot based volume instead of a raw volume.
+            specs.update({
+                'type': 'snap',
+                'snapshot_id': kwargs['snapshot_id'],
+            })
+        elif kwargs.get('source_volid'):
+            # We are making a source based volume instead of a raw volume.
+            #
+            # NOTE(harlowja): This will likely fail if the source volume
+            # disappeared by the time this call occurred.
+            source_volid = kwargs['source_volid']
+            source_volume_ref = self.db.volume_get(context, source_volid)
+            specs.update({
+                'source_volid': source_volid,
+                # This is captured incase we have to revert and we want to set
+                # back the source volume status to its original status. This
+                # may or may not be sketchy to do??
+                'source_volstatus': source_volume_ref['status'],
+                'type': 'source_vol',
+            })
+        elif kwargs.get('image_id'):
+            # We are making a image based volume instead of a raw volume.
+            image_href = kwargs['image_id']
+            image_service, image_id = get_remote_image_service(context,
+                                                               image_href)
+            specs.update({
+                'type': 'image',
+                'image_id': image_id,
+                'image_location': image_service.get_location(context,
+                                                             image_id),
+                'image_meta': image_service.show(context, image_id),
+                # Instead of refetching the image service later just save it.
+                #
+                # NOTE(harlowja): if we have to later recover this tasks output
+                # on another 'node' that this object won't be able to be
+                # serialized, so we will have to recreate this object on
+                # demand in the future.
+                'image_service': image_service,
+            })
+
+        return specs
+
+    def revert(self, context, result, **kwargs):
+        if isinstance(result, misc.Failure):
+            return
+        volume_spec = result.get('volume_spec')
+        # Restore the source volume status and set the volume to error status.
+        common.restore_source_status(context, self.db, volume_spec)
+
+
+class NotifyVolumeActionTask(flow_utils.CinderTask):
+    """Performs a notification about the given volume when called.
+
+    Reversion strategy: N/A
+    """
+
+    def __init__(self, db, host, event_suffix):
+        super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
+                                                             event_suffix])
+        self.db = db
+        self.event_suffix = event_suffix
+        self.host = host
+
+    def execute(self, context, volume_ref):
+        volume_id = volume_ref['id']
+        try:
+            volume_utils.notify_about_volume_usage(context, volume_ref,
+                                                   self.event_suffix,
+                                                   host=self.host)
+        except exception.CinderException:
+            # If notification sending of volume database entry reading fails
+            # then we shouldn't error out the whole workflow since this is
+            # not always information that must be sent for volumes to operate
+            LOG.exception(_("Failed notifying about the volume"
+                            " action %(event)s for volume %(volume_id)s") %
+                          {'event': self.event_suffix,
+                           'volume_id': volume_id})
+
+
+class CreateVolumeFromSpecTask(flow_utils.CinderTask):
+    """Creates a volume from a provided specification.
+
+    Reversion strategy: N/A
+    """
+
+    default_provides = 'volume'
+
+    def __init__(self, db, host, driver):
+        super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
+        self.db = db
+        self.driver = driver
+        # This maps the different volume specification types into the methods
+        # that can create said volume type (aka this is a jump table).
+        self._create_func_mapping = {
+            'raw': self._create_raw_volume,
+            'snap': self._create_from_snapshot,
+            'source_vol': self._create_from_source_volume,
+            'image': self._create_from_image,
+        }
+        self.host = host
+
+    def _handle_bootable_volume_glance_meta(self, context, volume_id,
+                                            **kwargs):
+        """Enable bootable flag and properly handle glance metadata.
+
+        Caller should provide one and only one of snapshot_id,source_volid
+        and image_id. If an image_id specified, a image_meta should also be
+        provided, otherwise will be treated as an empty dictionary.
+        """
+
+        log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
+                         "%(vol_id)s.")
+        exception_template = _("Failed updating volume %(vol_id)s metadata"
+                               " using the provided %(src_type)s"
+                               " %(src_id)s metadata")
+        src_type = None
+        src_id = None
+        self._enable_bootable_flag(context, volume_id)
+        try:
+            if kwargs.get('snapshot_id'):
+                src_type = 'snapshot'
+                src_id = kwargs['snapshot_id']
+                snapshot_id = src_id
+                LOG.debug(log_template % {'src_type': src_type,
+                                          'src_id': src_id,
+                                          'vol_id': volume_id})
+                self.db.volume_glance_metadata_copy_to_volume(
+                    context, volume_id, snapshot_id)
+            elif kwargs.get('source_volid'):
+                src_type = 'source volume'
+                src_id = kwargs['source_volid']
+                source_volid = src_id
+                LOG.debug(log_template % {'src_type': src_type,
+                                          'src_id': src_id,
+                                          'vol_id': volume_id})
+                self.db.volume_glance_metadata_copy_from_volume_to_volume(
+                    context,
+                    source_volid,
+                    volume_id)
+            elif kwargs.get('image_id'):
+                src_type = 'image'
+                src_id = kwargs['image_id']
+                image_id = src_id
+                image_meta = kwargs.get('image_meta', {})
+                LOG.debug(log_template % {'src_type': src_type,
+                                          'src_id': src_id,
+                                          'vol_id': volume_id})
+                self._capture_volume_image_metadata(context, volume_id,
+                                                    image_id, image_meta)
+        except exception.CinderException as ex:
+            LOG.exception(exception_template % {'src_type': src_type,
+                                                'src_id': src_id,
+                                                'vol_id': volume_id})
+            raise exception.MetadataCopyFailure(reason=ex)
+
+    def _create_from_snapshot(self, context, volume_ref, snapshot_id,
+                              **kwargs):
+        volume_id = volume_ref['id']
+        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+        model_update = self.driver.create_volume_from_snapshot(volume_ref,
+                                                               snapshot_ref)
+        # NOTE(harlowja): Subtasks would be useful here since after this
+        # point the volume has already been created and further failures
+        # will not destroy the volume (although they could in the future).
+        make_bootable = False
+        try:
+            originating_vref = self.db.volume_get(context,
+                                                  snapshot_ref['volume_id'])
+            make_bootable = originating_vref.bootable
+        except exception.CinderException as ex:
+            LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
+                            " flag using the provided glance snapshot "
+                            "%(snapshot_ref_id)s volume reference") %
+                          {'snapshot_id': snapshot_id,
+                           'snapshot_ref_id': snapshot_ref['volume_id']})
+            raise exception.MetadataUpdateFailure(reason=ex)
+        if make_bootable:
+            self._handle_bootable_volume_glance_meta(context, volume_id,
+                                                     snapshot_id=snapshot_id)
+        return model_update
+
+    def _enable_bootable_flag(self, context, volume_id):
+        try:
+            LOG.debug(_('Marking volume %s as bootable.'), volume_id)
+            self.db.volume_update(context, volume_id, {'bootable': True})
+        except exception.CinderException as ex:
+            LOG.exception(_("Failed updating volume %(volume_id)s bootable"
+                            " flag to true") % {'volume_id': volume_id})
+            raise exception.MetadataUpdateFailure(reason=ex)
+
+    def _create_from_source_volume(self, context, volume_ref,
+                                   source_volid, **kwargs):
+        # NOTE(harlowja): if the source volume has disappeared this will be our
+        # detection of that since this database call should fail.
+        #
+        # NOTE(harlowja): likely this is not the best place for this to happen
+        # and we should have proper locks on the source volume while actions
+        # that use the source volume are underway.
+        srcvol_ref = self.db.volume_get(context, source_volid)
+        model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
+        # NOTE(harlowja): Subtasks would be useful here since after this
+        # point the volume has already been created and further failures
+        # will not destroy the volume (although they could in the future).
+        if srcvol_ref.bootable:
+            self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
+                                                     source_volid=source_volid)
+        return model_update
+
+    def _copy_image_to_volume(self, context, volume_ref,
+                              image_id, image_location, image_service):
+        """Downloads Glance image to the specified volume."""
+        copy_image_to_volume = self.driver.copy_image_to_volume
+        volume_id = volume_ref['id']
+        LOG.debug(_("Attempting download of %(image_id)s (%(image_location)s)"
+                    " to volume %(volume_id)s.") %
+                  {'image_id': image_id, 'volume_id': volume_id,
+                   'image_location': image_location})
+        try:
+            copy_image_to_volume(context, volume_ref, image_service, image_id)
+        except processutils.ProcessExecutionError as ex:
+            LOG.error(_("Failed to copy image %(image_id)s to volume: "
+                        "%(volume_id)s, error: %(error)s") %
+                      {'volume_id': volume_id,
+                       'error': ex.stderr, 'image_id': image_id})
+            raise exception.ImageCopyFailure(reason=ex.stderr)
+        except exception.ImageUnacceptable as ex:
+            LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
+                        "error: %(error)s") % {'volume_id': volume_id,
+                                               'error': ex})
+            raise exception.ImageUnacceptable(ex)
+        except Exception as ex:
+            LOG.error(_("Failed to copy image %(image_id)s to "
+                        "volume: %(volume_id)s, error: %(error)s") %
+                      {'volume_id': volume_id, 'error': ex,
+                       'image_id': image_id})
+            if not isinstance(ex, exception.ImageCopyFailure):
+                raise exception.ImageCopyFailure(reason=ex)
+            else:
+                raise
+
+        LOG.debug(_("Downloaded image %(image_id)s (%(image_location)s)"
+                    " to volume %(volume_id)s successfully.") %
+                  {'image_id': image_id, 'volume_id': volume_id,
+                   'image_location': image_location})
+
+    def _capture_volume_image_metadata(self, context, volume_id,
+                                       image_id, image_meta):
+
+        # Save some base attributes into the volume metadata
+        base_metadata = {
+            'image_id': image_id,
+        }
+        name = image_meta.get('name', None)
+        if name:
+            base_metadata['image_name'] = name
+
+        # Save some more attributes into the volume metadata from the image
+        # metadata
+        for key in IMAGE_ATTRIBUTES:
+            if key not in image_meta:
+                continue
+            value = image_meta.get(key, None)
+            if value is not None:
+                base_metadata[key] = value
+
+        # Save all the image metadata properties into the volume metadata
+        property_metadata = {}
+        image_properties = image_meta.get('properties', {})
+        for (key, value) in image_properties.items():
+            if value is not None:
+                property_metadata[key] = value
+
+        # NOTE(harlowja): The best way for this to happen would be in bulk,
+        # but that doesn't seem to exist (yet), so we go through one by one
+        # which means we can have partial create/update failure.
+        volume_metadata = dict(property_metadata)
+        volume_metadata.update(base_metadata)
+        LOG.debug(_("Creating volume glance metadata for volume %(volume_id)s"
+                    " backed by image %(image_id)s with: %(vol_metadata)s.") %
+                  {'volume_id': volume_id, 'image_id': image_id,
+                   'vol_metadata': volume_metadata})
+        for (key, value) in volume_metadata.items():
+            try:
+                self.db.volume_glance_metadata_create(context, volume_id,
+                                                      key, value)
+            except exception.GlanceMetadataExists:
+                pass
+
+    def _create_from_image(self, context, volume_ref,
+                           image_location, image_id, image_meta,
+                           image_service, **kwargs):
+        LOG.debug(_("Cloning %(volume_id)s from image %(image_id)s "
+                    " at location %(image_location)s.") %
+                  {'volume_id': volume_ref['id'],
+                   'image_location': image_location, 'image_id': image_id})
+        # Create the volume from an image.
+        #
+        # NOTE (singn): two params need to be returned
+        # dict containing provider_location for cloned volume
+        # and clone status.
+        model_update, cloned = self.driver.clone_image(
+            volume_ref, image_location, image_id, image_meta)
+        if not cloned:
+            # TODO(harlowja): what needs to be rolled back in the clone if this
+            # volume create fails?? Likely this should be a subflow or broken
+            # out task in the future. That will bring up the question of how
+            # do we make said subflow/task which is only triggered in the
+            # clone image 'path' resumable and revertable in the correct
+            # manner.
+            #
+            # Create the volume and then download the image onto the volume.
+            model_update = self.driver.create_volume(volume_ref)
+            updates = dict(model_update or dict(), status='downloading')
+            try:
+                volume_ref = self.db.volume_update(context,
+                                                   volume_ref['id'], updates)
+            except exception.CinderException:
+                LOG.exception(_("Failed updating volume %(volume_id)s with "
+                                "%(updates)s") %
+                              {'volume_id': volume_ref['id'],
+                               'updates': updates})
+            self._copy_image_to_volume(context, volume_ref,
+                                       image_id, image_location, image_service)
+
+        self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
+                                                 image_id=image_id,
+                                                 image_meta=image_meta)
+        return model_update
+
+    def _create_raw_volume(self, context, volume_ref, **kwargs):
+        return self.driver.create_volume(volume_ref)
+
+    def execute(self, context, volume_ref, volume_spec):
+        volume_spec = dict(volume_spec)
+        volume_id = volume_spec.pop('volume_id', None)
+
+        # we can't do anything if the driver didn't init
+        if not self.driver.initialized:
+            driver_name = self.driver.__class__.__name__
+            LOG.error(_("Unable to create volume. "
+                        "Volume driver %s not initialized") % driver_name)
+            # NOTE(flaper87): Set the error status before
+            # raising any exception.
+            self.db.volume_update(context, volume_id, dict(status='error'))
+            raise exception.DriverNotInitialized()
+
+        create_type = volume_spec.pop('type', None)
+        create_functor = self._create_func_mapping.get(create_type)
+        if not create_functor:
+            raise exception.VolumeTypeNotFound(volume_type_id=create_type)
+
+        if not volume_id:
+            volume_id = volume_ref['id']
+        LOG.info(_("Volume %(volume_id)s: being created using %(functor)s "
+                   "with specification: %(volume_spec)s") %
+                 {'volume_spec': volume_spec, 'volume_id': volume_id,
+                  'functor': common.make_pretty_name(create_functor)})
+
+        # NOTE(vish): so we don't have to get volume from db again before
+        # passing it to the driver.
+        volume_ref['host'] = self.host
+
+        # Call the given functor to make the volume.
+        model_update = create_functor(context, volume_ref=volume_ref,
+                                      **volume_spec)
+
+        # Persist any model information provided on creation.
+        try:
+            if model_update:
+                volume_ref = self.db.volume_update(context, volume_ref['id'],
+                                                   model_update)
+        except exception.CinderException as ex:
+            # If somehow the update failed we want to ensure that the
+            # failure is logged (but not try rescheduling since the volume at
+            # this point has been created).
+            if model_update:
+                LOG.exception(_("Failed updating model of volume %(volume_id)s"
+                                " with creation provided model %(model)s") %
+                              {'volume_id': volume_id, 'model': model_update})
+                raise exception.ExportFailure(reason=ex)
+
+        # Persist any driver exported model information.
+        model_update = None
+        try:
+            LOG.debug(_("Volume %s: creating export"), volume_ref['id'])
+            model_update = self.driver.create_export(context, volume_ref)
+            if model_update:
+                self.db.volume_update(context, volume_ref['id'], model_update)
+        except exception.CinderException as ex:
+            # If somehow the read *or* create export failed we want to ensure
+            # that the failure is logged (but not try rescheduling since
+            # the volume at this point has been created).
+            #
+            # NOTE(harlowja): Notice that since the model_update is initially
+            # empty, the only way it will still be empty is if there is no
+            # model_update (which we don't care about) or there was an
+            # model_update and updating failed.
+            if model_update:
+                LOG.exception(_("Failed updating model of volume %(volume_id)s"
+                              " with driver provided model %(model)s") %
+                              {'volume_id': volume_id, 'model': model_update})
+                raise exception.ExportFailure(reason=ex)
+
+        return volume_ref
+
+
+class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
+    """On successful volume creation this will perform final volume actions.
+
+    When a volume is created successfully it is expected that MQ notifications
+    and database updates will occur to 'signal' to others that the volume is
+    now ready for usage. This task does those notifications and updates in a
+    reliable manner (not re-raising exceptions if said actions can not be
+    triggered).
+
+    Reversion strategy: N/A
+    """
+
+    def __init__(self, db, host, event_suffix):
+        super(CreateVolumeOnFinishTask, self).__init__(db, host, event_suffix)
+        self.status_translation = {
+            'migration_target_creating': 'migration_target',
+        }
+
+    def execute(self, context, volume, volume_spec):
+        volume_id = volume['id']
+        new_status = self.status_translation.get(volume_spec.get('status'),
+                                                 'available')
+        update = {
+            'status': new_status,
+            'launched_at': timeutils.utcnow(),
+        }
+        try:
+            # TODO(harlowja): is it acceptable to only log if this fails??
+            # or are there other side-effects that this will cause if the
+            # status isn't updated correctly (aka it will likely be stuck in
+            # 'building' if this fails)??
+            volume_ref = self.db.volume_update(context, volume_id, update)
+            # Now use the parent to notify.
+            super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
+        except exception.CinderException:
+            LOG.exception(_("Failed updating volume %(volume_id)s with "
+                            "%(update)s") % {'volume_id': volume_id,
+                                             'update': update})
+        # Even if the update fails, the volume is ready.
+        msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
+        LOG.info(msg % {
+            'volume_name': volume_spec['volume_name'],
+            'volume_id': volume_id,
+        })
+
+
+def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
+             allow_reschedule, reschedule_context, request_spec,
+             filter_properties, snapshot_id=None, image_id=None,
+             source_volid=None):
+    """Constructs and returns the manager entrypoint flow.
+
+    This flow will do the following:
+
+    1. Determines if rescheduling is enabled (ahead of time).
+    2. Inject keys & values for dependent tasks.
+    3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
+       status & notify or one to update the db status & notify & *reschedule*).
+    4. Extracts a volume specification from the provided inputs.
+    5. Notifies that the volume has start to be created.
+    6. Creates a volume from the extracted volume specification.
+    7. Attaches a on-success *only* task that notifies that the volume creation
+       has ended and performs further database status updates.
+    """
+
+    flow_name = ACTION.replace(":", "_") + "_manager"
+    volume_flow = linear_flow.Flow(flow_name)
+
+    # This injects the initial starting flow values into the workflow so that
+    # the dependency order of the tasks provides/requires can be correctly
+    # determined.
+    create_what = {
+        'context': context,
+        'filter_properties': filter_properties,
+        'image_id': image_id,
+        'request_spec': request_spec,
+        'snapshot_id': snapshot_id,
+        'source_volid': source_volid,
+        'volume_id': volume_id,
+    }
+
+    volume_flow.add(ExtractVolumeRefTask(db))
+
+    if allow_reschedule and request_spec:
+        volume_flow.add(OnFailureRescheduleTask(reschedule_context,
+                                                db, scheduler_rpcapi))
+
+    volume_flow.add(ExtractVolumeSpecTask(db),
+                    NotifyVolumeActionTask(db, host, "create.start"),
+                    CreateVolumeFromSpecTask(db, host, driver),
+                    CreateVolumeOnFinishTask(db, host, "create.end"))
+
+    # Now load (but do not run) the flow using the provided initial data.
+    return taskflow.engines.load(volume_flow, store=create_what)
index 41476043e3a123b7cee47f49217f5c7b15a78040..ad8d4a6fb9d6837893637d900c0b42a049baf93e 100644 (file)
@@ -54,7 +54,7 @@ from cinder.openstack.common import uuidutils
 from cinder import quota
 from cinder import utils
 from cinder.volume.configuration import Configuration
-from cinder.volume.flows.api import create_volume
+from cinder.volume.flows.manager import create_volume
 from cinder.volume import rpcapi as volume_rpcapi
 from cinder.volume import utils as volume_utils
 from cinder.volume import volume_types
@@ -300,7 +300,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         try:
             # NOTE(flaper87): Driver initialization is
             # verified by the task itself.
-            flow_engine = create_volume.get_manager_flow(
+            flow_engine = create_volume.get_flow(
                 context,
                 self.db,
                 self.driver,