default_provides = 'volume_ref'
- def __init__(self, db):
+ def __init__(self, db, host):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
+ self.host = host
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
+ # NOTE(vish): so we don't have to get volume from db again before
+ # passing it to the driver.
+ volume_ref['host'] = self.host
+
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
Reversion strategy: N/A
"""
- def __init__(self, db, host, event_suffix):
+ def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
- self.host = host
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
- host=self.host)
+ host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
default_provides = 'volume'
- def __init__(self, db, host, driver):
+ def __init__(self, db, driver):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
'source_vol': self._create_from_source_volume,
'image': self._create_from_image,
}
- self.host = host
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
{'volume_spec': volume_spec, 'volume_id': volume_id,
'functor': common.make_pretty_name(create_functor)})
- # NOTE(vish): so we don't have to get volume from db again before
- # passing it to the driver.
- volume_ref['host'] = self.host
-
# Call the given functor to make the volume.
model_update = create_functor(context, volume_ref=volume_ref,
**volume_spec)
Reversion strategy: N/A
"""
- def __init__(self, db, host, event_suffix):
- super(CreateVolumeOnFinishTask, self).__init__(db, host, event_suffix)
+ def __init__(self, db, event_suffix):
+ super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
'volume_id': volume_id,
}
- volume_flow.add(ExtractVolumeRefTask(db))
+ volume_flow.add(ExtractVolumeRefTask(db, host))
if allow_reschedule and request_spec:
volume_flow.add(OnFailureRescheduleTask(reschedule_context,
db, scheduler_rpcapi))
volume_flow.add(ExtractVolumeSpecTask(db),
- NotifyVolumeActionTask(db, host, "create.start"),
- CreateVolumeFromSpecTask(db, host, driver),
- CreateVolumeOnFinishTask(db, host, "create.end"))
+ NotifyVolumeActionTask(db, "create.start"),
+ CreateVolumeFromSpecTask(db, driver),
+ CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)