usage = ('''Usage:
%s resource-list-details stack_name [logical_resource_id]
%s resource-list-details physical_resource_id [logical_resource_id]''' %
- (scriptname, scriptname))
+ (scriptname, scriptname))
try:
name_or_pid = arguments.pop(0)
if options.debug:
logging.basicConfig(format='%(levelname)s:%(message)s',
- level=logging.DEBUG)
+ level=logging.DEBUG)
logging.debug("Debug level logging enabled")
elif options.verbose:
logging.basicConfig(format='%(levelname)s:%(message)s',
- level=logging.INFO)
+ level=logging.INFO)
else:
logging.basicConfig(format='%(levelname)s:%(message)s',
- level=logging.WARNING)
+ level=logging.WARNING)
return (options, command, args)
def lookup_command(parser, command_name):
base_commands = {'help': print_help}
- stack_commands = {
- 'create': stack_create,
- 'update': stack_update,
- 'delete': stack_delete,
- 'list': stack_list,
- 'events_list': stack_events_list, # DEPRECATED
- 'event-list': stack_events_list,
- 'resource': stack_resource_show,
- 'resource-list': stack_resources_list,
- 'resource-list-details': stack_resources_list_details,
- 'validate': template_validate,
- 'gettemplate': get_template,
- 'estimate-template-cost': estimate_template_cost,
- 'describe': stack_describe}
+ stack_commands = {'create': stack_create,
+ 'update': stack_update,
+ 'delete': stack_delete,
+ 'list': stack_list,
+ 'events_list': stack_events_list, # DEPRECATED
+ 'event-list': stack_events_list,
+ 'resource': stack_resource_show,
+ 'resource-list': stack_resources_list,
+ 'resource-list-details': stack_resources_list_details,
+ 'validate': template_validate,
+ 'gettemplate': get_template,
+ 'estimate-template-cost': estimate_template_cost,
+ 'describe': stack_describe}
commands = {}
for command_set in (base_commands, stack_commands):
from heat.engine import service as engine
db_api.configure()
- srv = engine.EngineService(cfg.CONF.host,
- 'engine')
+ srv = engine.EngineService(cfg.CONF.host, 'engine')
launcher = service.launch(srv)
launcher.wait()
'verb': req.method,
'path': req.path,
'params': auth_params,
- }}
+ }}
creds_json = None
try:
creds_json = json.dumps(creds)
else:
message = self.explanation
return {'ErrorResponse': {'Error': {'Type': self.err_type,
- 'Code': self.title, 'Message': message}}}
+ 'Code': self.title, 'Message': message}}}
# Common Error Subclasses:
"""
plist = extract_param_list(params, prefix)
kvs = [(p[keyname], p[valuename]) for p in plist
- if keyname in p and valuename in p]
+ if keyname in p and valuename in p]
return dict(kvs)
for action in self._actions:
mapper.connect("/", controller=stacks_resource, action=action,
- conditions=conditions(action))
+ conditions=conditions(action))
mapper.connect("/", controller=stacks_resource, action="index")
Parameters.member.1.ParameterValue
"""
return api_utils.extract_param_pairs(params,
- prefix='Parameters',
- keyname='ParameterKey',
- valuename='ParameterValue')
+ prefix='Parameters',
+ keyname='ParameterKey',
+ valuename='ParameterValue')
def _get_identity(self, con, stack_name):
"""
return exception.map_remote_error(ex)
res = {'StackSummaries': [format_stack_summary(s)
- for s in stack_list['stacks']]}
+ for s in stack_list['stacks']]}
return api_utils.format_response('ListStacks', res)
}
def replacecolon(d):
- return dict(map(lambda (k, v):
- (k.replace(':', '.'), v), d.items()))
+ return dict(map(lambda (k, v): (k.replace(':', '.'), v),
+ d.items()))
def transform(attrs):
"""
# Reformat Parameters dict-of-dict into AWS API format
# This is a list-of-dict with nasty "ParameterKey" : key
# "ParameterValue" : value format.
- result['Parameters'] = [{'ParameterKey':k,
- 'ParameterValue':v}
- for (k, v) in result['Parameters'].items()]
+ result['Parameters'] = [{'ParameterKey': k,
+ 'ParameterValue': v}
+ for (k, v) in result['Parameters'].items()]
return self._id_format(result)
return None
CREATE_OR_UPDATE_ACTION = (
- CREATE_STACK, UPDATE_STACK
- ) = (
- "CreateStack", "UpdateStack")
+ CREATE_STACK, UPDATE_STACK,
+ ) = (
+ "CreateStack", "UpdateStack",
+ )
def create(self, req):
return self.create_or_update(req, self.CREATE_STACK)
Get the estimated monthly cost of a template
"""
return api_utils.format_response('EstimateTemplateCost',
- {'Url': 'http://en.wikipedia.org/wiki/Gratis'})
+ {'Url':
+ 'http://en.wikipedia.org/wiki/Gratis'
+ }
+ )
def validate_template(self, req):
"""
}
result = api_utils.reformat_dict_keys(keymap, e)
- result['ResourceProperties'] = json.dumps(
- result['ResourceProperties'])
+ result['ResourceProperties'] = json.dumps(result[
+ 'ResourceProperties'])
return self._id_format(result)
result = [format_stack_event(e) for e in events]
return api_utils.format_response('DescribeStackEvents',
- {'StackEvents': result})
+ {'StackEvents': result})
def describe_stack_resource(self, req):
"""
try:
identity = self._get_identity(con, req.params['StackName'])
- resource_details = self.engine_rpcapi.describe_stack_resource(con,
- stack_identity=identity,
- resource_name=req.params.get('LogicalResourceId'))
+ resource_details = self.engine_rpcapi.describe_stack_resource(
+ con,
+ stack_identity=identity,
+ resource_name=req.params.get('LogicalResourceId'))
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
result = format_resource_detail(resource_details)
return api_utils.format_response('DescribeStackResource',
- {'StackResourceDetail': result})
+ {'StackResourceDetail': result})
def describe_stack_resources(self, req):
"""
try:
identity = self._get_identity(con, stack_name)
- resources = self.engine_rpcapi.describe_stack_resources(con,
+ resources = self.engine_rpcapi.describe_stack_resources(
+ con,
stack_identity=identity,
physical_resource_id=physical_resource_id,
logical_resource_id=req.params.get('LogicalResourceId'))
result = [format_stack_resource(r) for r in resources]
return api_utils.format_response('DescribeStackResources',
- {'StackResources': result})
+ {'StackResources': result})
def list_stack_resources(self, req):
"""
try:
identity = self._get_identity(con, req.params['StackName'])
- resources = self.engine_rpcapi.list_stack_resources(con,
- stack_identity=identity)
+ resources = self.engine_rpcapi.list_stack_resources(
+ con,
+ stack_identity=identity)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
summaries = [format_resource_summary(r) for r in resources]
return api_utils.format_response('ListStackResources',
- {'StackResourceSummaries': summaries})
+ {'StackResourceSummaries': summaries})
def create_resource(options):
con = req.context
identity = identifier.ResourceIdentifier.from_arn(arn)
try:
- md = self.engine.metadata_update(con,
- stack_id=dict(identity.stack()),
- resource_name=identity.resource_name,
- metadata=body)
+ md = self.engine.metadata_update(
+ con,
+ stack_id=dict(identity.stack()),
+ resource_name=identity.resource_name,
+ metadata=body)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
for action in self._actions:
mapper.connect("/", controller=controller_resource, action=action,
- conditions=conditions(action))
+ conditions=conditions(action))
mapper.connect("/", controller=controller_resource, action="index")
Reformat engine output into the AWS "MetricAlarm" format
"""
keymap = {
- engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
- engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
- engine_api.WATCH_TOPIC: 'AlarmArn',
- engine_api.WATCH_UPDATED_TIME:
+ engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
+ engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
+ engine_api.WATCH_TOPIC: 'AlarmArn',
+ engine_api.WATCH_UPDATED_TIME:
'AlarmConfigurationUpdatedTimestamp',
- engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
- engine_api.WATCH_NAME: 'AlarmName',
- engine_api.WATCH_COMPARISON: 'ComparisonOperator',
- engine_api.WATCH_DIMENSIONS: 'Dimensions',
- engine_api.WATCH_PERIODS: 'EvaluationPeriods',
- engine_api.WATCH_INSUFFICIENT_ACTIONS: 'InsufficientDataActions',
- engine_api.WATCH_METRIC_NAME: 'MetricName',
- engine_api.WATCH_NAMESPACE: 'Namespace',
- engine_api.WATCH_OK_ACTIONS: 'OKActions',
- engine_api.WATCH_PERIOD: 'Period',
- engine_api.WATCH_STATE_REASON: 'StateReason',
- engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
- engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
- engine_api.WATCH_STATE_VALUE: 'StateValue',
- engine_api.WATCH_STATISTIC: 'Statistic',
- engine_api.WATCH_THRESHOLD: 'Threshold',
- engine_api.WATCH_UNIT: 'Unit'}
+ engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
+ engine_api.WATCH_NAME: 'AlarmName',
+ engine_api.WATCH_COMPARISON: 'ComparisonOperator',
+ engine_api.WATCH_DIMENSIONS: 'Dimensions',
+ engine_api.WATCH_PERIODS: 'EvaluationPeriods',
+ engine_api.WATCH_INSUFFICIENT_ACTIONS:
+ 'InsufficientDataActions',
+ engine_api.WATCH_METRIC_NAME: 'MetricName',
+ engine_api.WATCH_NAMESPACE: 'Namespace',
+ engine_api.WATCH_OK_ACTIONS: 'OKActions',
+ engine_api.WATCH_PERIOD: 'Period',
+ engine_api.WATCH_STATE_REASON: 'StateReason',
+ engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
+ engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
+ engine_api.WATCH_STATE_VALUE: 'StateValue',
+ engine_api.WATCH_STATISTIC: 'Statistic',
+ engine_api.WATCH_THRESHOLD: 'Threshold',
+ engine_api.WATCH_UNIT: 'Unit'}
# AWS doesn't return StackId in the main MetricAlarm
# structure, so we add StackId as a dimension to all responses
a[engine_api.WATCH_DIMENSIONS].append({'StackId':
- a[engine_api.WATCH_STACK_ID]})
+ a[engine_api.WATCH_STACK_ID]
+ })
# Reformat dimensions list into AWS API format
a[engine_api.WATCH_DIMENSIONS] = self._reformat_dimensions(
- a[engine_api.WATCH_DIMENSIONS])
+ a[engine_api.WATCH_DIMENSIONS])
return api_utils.reformat_dict_keys(keymap, a)
return exception.map_remote_error(ex)
res = {'MetricAlarms': [format_metric_alarm(a)
- for a in watch_list]}
+ for a in watch_list]}
result = api_utils.format_response("DescribeAlarms", res)
return result
dimensions = []
for p in metric_data:
dimension = api_utils.extract_param_pairs(p,
- prefix='Dimensions',
- keyname='Name',
- valuename='Value')
+ prefix='Dimensions',
+ keyname='Name',
+ valuename='Value')
if 'AlarmName' in dimension:
watch_name = dimension['AlarmName']
else:
"""
# Map from AWS state names to those used in the engine
state_map = {'OK': engine_api.WATCH_STATE_OK,
- 'ALARM': engine_api.WATCH_STATE_ALARM,
- 'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
+ 'ALARM': engine_api.WATCH_STATE_ALARM,
+ 'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
con = req.context
parms = dict(req.params)
# API controller
msg = _("Processing request: %(method)s %(path)s Accept: "
"%(accept)s") % ({'method': req.method,
- 'path': req.path, 'accept': req.accept})
+ 'path': req.path, 'accept': req.accept})
logger.debug(msg)
# If the request is for /versions, just return the versions container
match = self._match_version_string(req.path_info_peek(), req)
if match:
if (req.environ['api.major_version'] == 1 and
- req.environ['api.minor_version'] == 0):
+ req.environ['api.minor_version'] == 0):
logger.debug(_("Matched versioned URI. Version: %d.%d"),
req.environ['api.major_version'],
req.environ['api.minor_version'])
match = self._match_version_string(accept_version, req)
if match:
if (req.environ['api.major_version'] == 1 and
- req.environ['api.minor_version'] == 0):
+ req.environ['api.minor_version'] == 0):
logger.debug(_("Matched versioned media type. "
"Version: %d.%d"),
req.environ['api.major_version'],
def create_stack(self, **kwargs):
if 'TemplateUrl' in kwargs:
- return super(BotoClient, self).create_stack(kwargs['StackName'],
- template_url=kwargs['TemplateUrl'],
- parameters=kwargs['Parameters'])
+ return super(BotoClient, self).create_stack(
+ kwargs['StackName'],
+ template_url=kwargs['TemplateUrl'],
+ parameters=kwargs['Parameters'])
elif 'TemplateBody' in kwargs:
- return super(BotoClient, self).create_stack(kwargs['StackName'],
- template_body=kwargs['TemplateBody'],
- parameters=kwargs['Parameters'])
+ return super(BotoClient, self).create_stack(
+ kwargs['StackName'],
+ template_body=kwargs['TemplateBody'],
+ parameters=kwargs['Parameters'])
else:
logger.error("Must specify TemplateUrl or TemplateBody!")
def update_stack(self, **kwargs):
if 'TemplateUrl' in kwargs:
- return super(BotoClient, self).update_stack(kwargs['StackName'],
- template_url=kwargs['TemplateUrl'],
- parameters=kwargs['Parameters'])
+ return super(BotoClient, self).update_stack(
+ kwargs['StackName'],
+ template_url=kwargs['TemplateUrl'],
+ parameters=kwargs['Parameters'])
elif 'TemplateBody' in kwargs:
- return super(BotoClient, self).update_stack(kwargs['StackName'],
- template_body=kwargs['TemplateBody'],
- parameters=kwargs['Parameters'])
+ return super(BotoClient, self).update_stack(
+ kwargs['StackName'],
+ template_body=kwargs['TemplateBody'],
+ parameters=kwargs['Parameters'])
else:
logger.error("Must specify TemplateUrl or TemplateBody!")
def list_stack_events(self, **kwargs):
return super(BotoClient, self).describe_stack_events(
- kwargs['StackName'])
+ kwargs['StackName'])
def describe_stack_resource(self, **kwargs):
return super(BotoClient, self).describe_stack_resource(
- kwargs['StackName'], kwargs['LogicalResourceId'])
+ kwargs['StackName'], kwargs['LogicalResourceId'])
def describe_stack_resources(self, **kwargs):
# Check if this is a StackName, if not assume it's a physical res ID
stack_names = [s.stack_name for s in list_stacks]
if kwargs['NameOrPid'] in stack_names:
logger.debug("Looking up resources for StackName:%s" %
- kwargs['NameOrPid'])
+ kwargs['NameOrPid'])
return super(BotoClient, self).describe_stack_resources(
- stack_name_or_id=kwargs['NameOrPid'],
- logical_resource_id=kwargs['LogicalResourceId'])
+ stack_name_or_id=kwargs['NameOrPid'],
+ logical_resource_id=kwargs['LogicalResourceId'])
else:
logger.debug("Looking up resources for PhysicalResourceId:%s" %
- kwargs['NameOrPid'])
+ kwargs['NameOrPid'])
return super(BotoClient, self).describe_stack_resources(
- stack_name_or_id=None,
- logical_resource_id=kwargs['LogicalResourceId'],
- physical_resource_id=kwargs['NameOrPid'])
+ stack_name_or_id=None,
+ logical_resource_id=kwargs['LogicalResourceId'],
+ physical_resource_id=kwargs['NameOrPid'])
def list_stack_resources(self, **kwargs):
return super(BotoClient, self).list_stack_resources(
- kwargs['StackName'])
+ kwargs['StackName'])
def validate_template(self, **kwargs):
if 'TemplateUrl' in kwargs:
return super(BotoClient, self).validate_template(
- template_url=kwargs['TemplateUrl'])
+ template_url=kwargs['TemplateUrl'])
elif 'TemplateBody' in kwargs:
return super(BotoClient, self).validate_template(
- template_body=kwargs['TemplateBody'])
+ template_body=kwargs['TemplateBody'])
else:
logger.error("Must specify TemplateUrl or TemplateBody!")
def estimate_template_cost(self, **kwargs):
if 'TemplateUrl' in kwargs:
return super(BotoClient, self).estimate_template_cost(
- kwargs['StackName'],
- template_url=kwargs['TemplateUrl'],
- parameters=kwargs['Parameters'])
+ kwargs['StackName'],
+ template_url=kwargs['TemplateUrl'],
+ parameters=kwargs['Parameters'])
elif 'TemplateBody' in kwargs:
return super(BotoClient, self).estimate_template_cost(
- kwargs['StackName'],
- template_body=kwargs['TemplateBody'],
- parameters=kwargs['Parameters'])
+ kwargs['StackName'],
+ template_body=kwargs['TemplateBody'],
+ parameters=kwargs['Parameters'])
else:
logger.error("Must specify TemplateUrl or TemplateBody!")
ret.append("ResourceProperties : %s" % event.resource_properties)
ret.append("ResourceStatus : %s" % event.resource_status)
ret.append("ResourceStatusReason : %s" %
- event.resource_status_reason)
+ event.resource_status_reason)
ret.append("ResourceType : %s" % event.resource_type)
ret.append("StackId : %s" % event.stack_id)
ret.append("StackName : %s" % event.stack_name)
ret.append("PhysicalResourceId : %s" % res.physical_resource_id)
ret.append("ResourceStatus : %s" % res.resource_status)
ret.append("ResourceStatusReason : %s" %
- res.resource_status_reason)
+ res.resource_status_reason)
ret.append("ResourceType : %s" % res.resource_type)
ret.append("StackId : %s" % res.stack_id)
ret.append("StackName : %s" % res.stack_name)
ret = []
for res in resources:
ret.append("LastUpdatedTimestamp : %s" %
- res.last_updated_timestamp)
+ res.last_updated_timestamp)
ret.append("LogicalResourceId : %s" % res.logical_resource_id)
ret.append("PhysicalResourceId : %s" % res.physical_resource_id)
ret.append("ResourceStatus : %s" % res.resource_status)
ret.append("ResourceStatusReason : %s" %
- res.resource_status_reason)
+ res.resource_status_reason)
ret.append("ResourceType : %s" % res.resource_type)
ret.append("--")
return '\n'.join(ret)
For now, we format the dict response as a workaround
'''
resource_detail = res['DescribeStackResourceResponse'][
- 'DescribeStackResourceResult']['StackResourceDetail']
+ 'DescribeStackResourceResult']['StackResourceDetail']
ret = []
for key in resource_detail:
ret.append("%s : %s" % (key, resource_detail[key]))
# Also note is_secure is defaulted to False as HTTPS connections
# don't seem to work atm, FIXME
cloudformation = BotoClient(aws_access_key_id=aws_access_key,
- aws_secret_access_key=aws_secret_key, is_secure=False,
- port=port, path="/v1")
+ aws_secret_access_key=aws_secret_key,
+ is_secure=False,
+ port=port,
+ path="/v1")
if cloudformation:
logger.debug("Got CF connection object OK")
else:
# TODO : These should probably go in the CW API and be imported
DEFAULT_NAMESPACE = "heat/unknown"
METRIC_UNITS = ("Seconds", "Microseconds", "Milliseconds", "Bytes",
- "Kilobytes", "Megabytes", "Gigabytes", "Terabytes",
- "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits",
- "Percent", "Count", "Bytes/Second", "Kilobytes/Second",
- "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second",
- "Bits/Second", "Kilobits/Second", "Megabits/Second",
- "Gigabits/Second", "Terabits/Second", "Count/Second", None)
+ "Kilobytes", "Megabytes", "Gigabytes", "Terabytes",
+ "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits",
+ "Percent", "Count", "Bytes/Second", "Kilobytes/Second",
+ "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second",
+ "Bits/Second", "Kilobits/Second", "Megabits/Second",
+ "Gigabits/Second", "Terabits/Second", "Count/Second", None)
METRIC_COMPARISONS = (">=", ">", "<", "<=")
ALARM_STATES = ("OK", "ALARM", "INSUFFICIENT_DATA")
METRIC_STATISTICS = ("Average", "Sum", "SampleCount", "Maximum", "Minimum")
except KeyError:
name = None
return super(BotoCWClient, self).describe_alarms(
- alarm_names=[name])
+ alarm_names=[name])
def list_metrics(self, **kwargs):
# list_metrics returns non-null index in next_token if there
token = None
while True:
results.append(super(BotoCWClient, self).list_metrics(
- next_token=token,
- dimensions=None,
- metric_name=name,
- namespace=None))
+ next_token=token,
+ dimensions=None,
+ metric_name=name,
+ namespace=None))
if not token:
break
metric_value = kwargs['MetricValue']
metric_namespace = kwargs['Namespace']
except KeyError:
- logger.error("Must pass MetricName, MetricUnit, " +\
- "Namespace, MetricValue!")
+ logger.error("Must pass MetricName, MetricUnit, " +
+ "Namespace, MetricValue!")
return
try:
return
return super(BotoCWClient, self).put_metric_data(
- namespace=metric_namespace,
- name=metric_name,
- value=metric_value,
- timestamp=None, # This means use "now" in the engine
- unit=metric_unit,
- dimensions=metric_dims,
- statistics=None)
+ namespace=metric_namespace,
+ name=metric_name,
+ value=metric_value,
+ timestamp=None, # This means use "now" in the engine
+ unit=metric_unit,
+ dimensions=metric_dims,
+ statistics=None)
def set_alarm_state(self, **kwargs):
return super(BotoCWClient, self).set_alarm_state(
- alarm_name=kwargs['AlarmName'],
- state_reason=kwargs['StateReason'],
- state_value=kwargs['StateValue'],
- state_reason_data=kwargs['StateReasonData'])
+ alarm_name=kwargs['AlarmName'],
+ state_reason=kwargs['StateReason'],
+ state_value=kwargs['StateValue'],
+ state_reason_data=kwargs['StateReasonData'])
def format_metric_alarm(self, alarms):
'''
ret.append("AlarmActions : %s" % s.alarm_actions)
ret.append("AlarmArn : %s" % s.alarm_arn)
ret.append("AlarmConfigurationUpdatedTimestamp : %s" %
- s.last_updated)
+ s.last_updated)
ret.append("ComparisonOperator : %s" % s.comparison)
ret.append("Dimensions : %s" % s.dimensions)
ret.append("EvaluationPeriods : %s" % s.evaluation_periods)
ret.append("InsufficientDataActions : %s" %
- s.insufficient_data_actions)
+ s.insufficient_data_actions)
ret.append("MetricName : %s" % s.metric)
ret.append("Namespace : %s" % s.namespace)
ret.append("OKActions : %s" % s.ok_actions)
ret.append("Period : %s" % s.period)
ret.append("StateReason : %s" % s.state_reason)
ret.append("StateUpdatedTimestamp : %s" %
- s.last_updated)
+ s.last_updated)
ret.append("StateValue : %s" % s.state_value)
ret.append("Statistic : %s" % s.statistic)
ret.append("Threshold : %s" % s.threshold)
# Also note is_secure is defaulted to False as HTTPS connections
# don't seem to work atm, FIXME
cloudwatch = BotoCWClient(aws_access_key_id=aws_access_key,
- aws_secret_access_key=aws_secret_key, is_secure=False,
- port=port, path="/v1")
+ aws_secret_access_key=aws_secret_key,
+ is_secure=False,
+ port=port,
+ path="/v1")
if cloudwatch:
logger.debug("Got CW connection object OK")
else:
'SignatureVersion', 'Timestamp', 'AWSAccessKeyId',
'Signature', 'TimeoutInMinutes',
'LogicalResourceId', 'PhysicalResourceId', 'NextToken',
-)
+ )
class V1Client(base_client.BaseClient):
'LogicalResourceId': kwargs['LogicalResourceId']}
try:
result = self.stack_request("DescribeStackResources", "GET",
- **parameters)
+ **parameters)
except Exception:
logger.debug("Failed to lookup resource details with key %s:%s"
% (lookup_key, lookup_value))
tenant=tenant,
auth_url=auth_url,
strategy=force_strategy or auth_strategy,
- region=region,
- )
+ region=region)
if creds['strategy'] == 'keystone' and not creds['auth_url']:
msg = ("--auth_url option or OS_AUTH_URL environment variable "
raise exception.ClientConfigurationError(msg)
use_ssl = (creds['auth_url'] is not None and
- creds['auth_url'].find('https') != -1)
+ creds['auth_url'].find('https') != -1)
client = HeatClient
return client(host=host,
- port=port,
- use_ssl=use_ssl,
- auth_tok=auth_token,
- creds=creds,
- insecure=insecure,
- service_type='cloudformation')
+ port=port,
+ use_ssl=use_ssl,
+ auth_tok=auth_token,
+ creds=creds,
+ insecure=insecure,
+ service_type='cloudformation')
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
- "your credentials (OS_USERNAME, OS_PASSWORD, " +
- "OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
+ "your credentials (OS_USERNAME, OS_PASSWORD, " +
+ "OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
- "returned the following message:\n\n%s"
- % e.message)
+ "returned the following message:\n\n%s" % e.message)
return FAILURE
except Exception, e:
options = arguments[0]
region_matches = lambda e: region is None or e['region'] == region
endpoints = [ep for s in service_catalog if service_type_matches(s)
- for ep in s['endpoints'] if region_matches(ep)]
+ for ep in s['endpoints'] if region_matches(ep)]
if len(endpoints) > 1:
raise exception.RegionAmbiguity(region=region)
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
- "password": creds['password']
- }
- }
- }
+ "password": creds['password']}}}
headers = {}
headers['Content-Type'] = 'application/json'
req_body = json.dumps(creds)
resp, resp_body = self._do_request(
- token_url, 'POST', headers=headers, body=req_body)
+ token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = json.loads(resp_body)['access']
'X-Role',
)
LOG.debug('Removing headers from request environment: %s' %
- ','.join(auth_headers))
+ ','.join(auth_headers))
self._remove_headers(env, auth_headers)
def _get_user_token_from_header(self, env):
self.admin_token = None
else:
LOG.error('Bad response code while validating token: %s' %
- response.status)
+ response.status)
if retry:
LOG.info('Retrying validation')
return self._validate_user_token(user_token, False)
raise exception.ClientConnectionError(msg)
if (self.key_file is not None and
- not os.path.exists(self.key_file)):
+ not os.path.exists(self.key_file)):
msg = _("The key file you specified %s does not "
"exist") % self.key_file
raise exception.ClientConnectionError(msg)
connect_kwargs['key_file'] = self.key_file
if (self.cert_file is not None and
- not os.path.exists(self.cert_file)):
+ not os.path.exists(self.cert_file)):
msg = _("The cert file you specified %s does not "
"exist") % self.cert_file
raise exception.ClientConnectionError(msg)
connect_kwargs['cert_file'] = self.cert_file
if (self.ca_file is not None and
- not os.path.exists(self.ca_file)):
+ not os.path.exists(self.ca_file)):
msg = _("The CA file you specified %s does not "
"exist") % self.ca_file
raise exception.ClientConnectionError(msg)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor'),
- cfg.StrOpt('config_file'),
- ]
+ cfg.StrOpt('config_file')]
-bind_opts = [cfg.IntOpt('bind_port', default=8000),
- cfg.StrOpt('bind_host', default='127.0.0.1')]
+bind_opts = [
+ cfg.IntOpt('bind_port', default=8000),
+ cfg.StrOpt('bind_host', default='127.0.0.1')]
service_opts = [
-cfg.IntOpt('report_interval',
- default=10,
- help='seconds between nodes reporting state to datastore'),
-cfg.IntOpt('periodic_interval',
- default=60,
- help='seconds between running periodic tasks'),
-cfg.StrOpt('ec2_listen',
- default="0.0.0.0",
- help='IP address for EC2 API to listen'),
-cfg.IntOpt('ec2_listen_port',
- default=8773,
- help='port for ec2 api to listen'),
-cfg.StrOpt('osapi_compute_listen',
- default="0.0.0.0",
- help='IP address for OpenStack API to listen'),
-cfg.IntOpt('osapi_compute_listen_port',
- default=8774,
- help='list port for osapi compute'),
-cfg.StrOpt('osapi_volume_listen',
- default="0.0.0.0",
- help='IP address for OpenStack Volume API to listen'),
-cfg.IntOpt('osapi_volume_listen_port',
- default=8776,
- help='port for os volume api to listen'),
-cfg.StrOpt('heat_metadata_server_url',
- default="",
- help='URL of the Heat metadata server'),
-cfg.StrOpt('heat_waitcondition_server_url',
- default="",
- help='URL of the Heat waitcondition server'),
-cfg.StrOpt('heat_watch_server_url',
- default="",
- help='URL of the Heat cloudwatch server'),
-cfg.StrOpt('heat_stack_user_role',
- default="heat_stack_user",
- help='Keystone role for heat template-defined users'),
-]
+ cfg.IntOpt('report_interval',
+ default=10,
+ help='seconds between nodes reporting state to datastore'),
+ cfg.IntOpt('periodic_interval',
+ default=60,
+ help='seconds between running periodic tasks'),
+ cfg.StrOpt('ec2_listen',
+ default="0.0.0.0",
+ help='IP address for EC2 API to listen'),
+ cfg.IntOpt('ec2_listen_port',
+ default=8773,
+ help='port for ec2 api to listen'),
+ cfg.StrOpt('osapi_compute_listen',
+ default="0.0.0.0",
+ help='IP address for OpenStack API to listen'),
+ cfg.IntOpt('osapi_compute_listen_port',
+ default=8774,
+ help='list port for osapi compute'),
+ cfg.StrOpt('osapi_volume_listen',
+ default="0.0.0.0",
+ help='IP address for OpenStack Volume API to listen'),
+ cfg.IntOpt('osapi_volume_listen_port',
+ default=8776,
+ help='port for os volume api to listen'),
+ cfg.StrOpt('heat_metadata_server_url',
+ default="",
+ help='URL of the Heat metadata server'),
+ cfg.StrOpt('heat_waitcondition_server_url',
+ default="",
+ help='URL of the Heat waitcondition server'),
+ cfg.StrOpt('heat_watch_server_url',
+ default="",
+ help='URL of the Heat cloudwatch server'),
+ cfg.StrOpt('heat_stack_user_role',
+ default="heat_stack_user",
+ help='Keystone role for heat template-defined users')]
+
db_opts = [
-cfg.StrOpt('sql_connection',
- default='mysql://heat:heat@localhost/heat',
- help='The SQLAlchemy connection string used to connect to the '
- 'database'),
-cfg.IntOpt('sql_idle_timeout',
- default=3600,
- help='timeout before idle sql connections are reaped'),
-]
+ cfg.StrOpt('sql_connection',
+ default='mysql://heat:heat@localhost/heat',
+ help='The SQLAlchemy connection string used to connect to the '
+ 'database'),
+ cfg.IntOpt('sql_idle_timeout',
+ default=3600,
+ help='timeout before idle sql connections are reaped')]
+
engine_opts = [
-cfg.StrOpt('instance_driver',
- default='heat.engine.nova',
- help='Driver to use for controlling instances'),
-cfg.ListOpt('plugin_dirs',
- default=['/usr/lib64/heat', '/usr/lib/heat'],
- help='List of directories to search for Plugins'),
-]
+ cfg.StrOpt('instance_driver',
+ default='heat.engine.nova',
+ help='Driver to use for controlling instances'),
+ cfg.ListOpt('plugin_dirs',
+ default=['/usr/lib64/heat', '/usr/lib/heat'],
+ help='List of directories to search for Plugins')]
+
rpc_opts = [
-cfg.StrOpt('host',
- default=socket.gethostname(),
- help='Name of the engine node. This can be an opaque identifier.'
- 'It is not necessarily a hostname, FQDN, or IP address.'),
-cfg.StrOpt('control_exchange',
- default='heat',
- help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
-cfg.StrOpt('engine_topic',
- default='engine',
- help='the topic engine nodes listen on')
-]
+ cfg.StrOpt('host',
+ default=socket.gethostname(),
+ help='Name of the engine node. '
+ 'This can be an opaque identifier.'
+ 'It is not necessarily a hostname, FQDN, or IP address.'),
+ cfg.StrOpt('control_exchange',
+ default='heat',
+ help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+ cfg.StrOpt('engine_topic',
+ default='engine',
+ help='the topic engine nodes listen on')]
def register_api_opts():
class ContextMiddleware(wsgi.Middleware):
- opts = [
- cfg.BoolOpt('owner_is_tenant', default=True),
- cfg.StrOpt('admin_role', default='admin'),
- ]
+ opts = [cfg.BoolOpt('owner_is_tenant', default=True),
+ cfg.StrOpt('admin_role', default='admin')]
def __init__(self, app, conf, **local_conf):
cfg.CONF.register_opts(self.opts)
# deployed on an instance (hence are implicitly untrusted)
roles = self.client.roles.list()
stack_user_role = [r.id for r in roles
- if r.name == cfg.CONF.heat_stack_user_role]
+ if r.name == cfg.CONF.heat_stack_user_role]
if len(stack_user_role) == 1:
role_id = stack_user_role[0]
logger.debug("Adding user %s to role %s" % (user.id, role_id))
# Sanity check the URL
urlp = urlparse.urlparse(url)
if (urlp.scheme not in ('http', 'https') or
- not urlp.netloc or not urlp.path):
+ not urlp.netloc or not urlp.path):
raise ValueError('"%s" is not a valid URL' % url)
# Remove any query-string and extract the ARN
except yaml.scanner.ScannerError as e:
raise ValueError(e)
else:
- if tpl == None:
+ if tpl is None:
tpl = {}
default_for_missing(tpl, u'HeatTemplateFormatVersion',
- HEAT_VERSIONS)
+ HEAT_VERSIONS)
return tpl
global key_order
# Replace AWS format version with Heat format version
json_str = re.sub('"AWSTemplateFormatVersion"\s*:\s*"[^"]+"\s*,',
- '', json_str)
+ '', json_str)
# insert a sortable order into the key to preserve file ordering
key_order = 0
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
- bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
- if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
+ bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
+ if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
conf.register_opts(socket_opts)
eventlet.patcher.monkey_patch(all=False, socket=True)
self.pool = eventlet.GreenPool(size=self.threads)
try:
- eventlet_wsgi_server(self.sock, self.application,
- custom_pool=self.pool,
- url_length_limit=URL_LENGTH_LIMIT,
- log=WritableLogger(self.logger))
+ eventlet_wsgi_server(self.sock,
+ self.application,
+ custom_pool=self.pool,
+ url_length_limit=URL_LENGTH_LIMIT,
+ log=WritableLogger(self.logger))
except socket.error, err:
if err[0] != errno.EINVAL:
raise
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
- help='The backend to use for db'),
- ]
+ help='The backend to use for db')]
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='heat.db.sqlalchemy.api')
def resource_get_by_name_and_stack(context, resource_name, stack_id):
result = model_query(context, models.Resource).\
- filter_by(name=resource_name).\
- filter_by(stack_id=stack_id).first()
+ filter_by(name=resource_name).\
+ filter_by(stack_id=stack_id).first()
return result
.filter_by(nova_instance=physical_resource_id)
.first())
if (result is not None and context is not None and
- result.stack.tenant != context.tenant_id):
+ result.stack.tenant != context.tenant_id):
return None
return result
def resource_get_all_by_stack(context, stack_id):
results = model_query(context, models.Resource).\
- filter_by(stack_id=stack_id).all()
+ filter_by(stack_id=stack_id).all()
if not results:
raise NotFound("no resources for stack_id %s were found" % stack_id)
def stack_get_by_name(context, stack_name, owner_id=None):
query = model_query(context, models.Stack).\
- filter_by(tenant=context.tenant_id).\
- filter_by(name=stack_name).\
- filter_by(owner_id=owner_id)
+ filter_by(tenant=context.tenant_id).\
+ filter_by(name=stack_name).\
+ filter_by(owner_id=owner_id)
return query.first()
return result
if (result is not None and context is not None and
- result.tenant != context.tenant_id):
+ result.tenant != context.tenant_id):
return None
return result
def stack_get_all(context):
results = model_query(context, models.Stack).\
- filter_by(owner_id=None).all()
+ filter_by(owner_id=None).all()
return results
def stack_get_all_by_tenant(context):
results = model_query(context, models.Stack).\
- filter_by(owner_id=None).\
- filter_by(tenant=context.tenant_id).all()
+ filter_by(owner_id=None).\
+ filter_by(tenant=context.tenant_id).all()
return results
if not stack:
raise NotFound('Attempt to update a stack with id: %s %s' %
- (stack_id, 'that does not exist'))
+ (stack_id, 'that does not exist'))
old_template_id = stack.raw_template_id
s = stack_get(context, stack_id)
if not s:
raise NotFound('Attempt to delete a stack with id: %s %s' %
- (stack_id, 'that does not exist'))
+ (stack_id, 'that does not exist'))
session = Session.object_session(s)
def event_get_all_by_tenant(context):
stacks = model_query(context, models.Stack).\
- filter_by(tenant=context.tenant_id).all()
+ filter_by(tenant=context.tenant_id).all()
results = []
for stack in stacks:
results.extend(model_query(context, models.Event).
- filter_by(stack_id=stack.id).all())
+ filter_by(stack_id=stack.id).all())
return results
def event_get_all_by_stack(context, stack_id):
results = model_query(context, models.Event).\
- filter_by(stack_id=stack_id).all()
+ filter_by(stack_id=stack_id).all()
return results
def watch_rule_get(context, watch_rule_id):
result = model_query(context, models.WatchRule).\
- filter_by(id=watch_rule_id).first()
+ filter_by(id=watch_rule_id).first()
return result
def watch_rule_get_by_name(context, watch_rule_name):
result = model_query(context, models.WatchRule).\
- filter_by(name=watch_rule_name).first()
+ filter_by(name=watch_rule_name).first()
return result
def watch_rule_get_all_by_stack(context, stack_id):
results = model_query(context, models.WatchRule).\
- filter_by(stack_id=stack_id).all()
+ filter_by(stack_id=stack_id).all()
return results
if not wr:
raise NotFound('Attempt to update a watch with id: %s %s' %
- (watch_id, 'that does not exist'))
+ (watch_id, 'that does not exist'))
wr.update(values)
wr.save(_session(context))
def watch_rule_delete(context, watch_name):
wr = model_query(context, models.WatchRule).\
- filter_by(name=watch_name).first()
+ filter_by(name=watch_name).first()
if not wr:
raise NotFound('Attempt to delete a watch_rule with name: %s %s' %
- (watch_name, 'that does not exist'))
+ (watch_name, 'that does not exist'))
session = Session.object_session(wr)
def watch_data_delete(context, watch_name):
ds = model_query(context, models.WatchRule).\
- filter_by(name=watch_name).all()
+ filter_by(name=watch_name).all()
if not ds:
raise NotFound('Attempt to delete watch_data with name: %s %s' %
- (watch_name, 'that does not exist'))
+ (watch_name, 'that does not exist'))
session = Session.object_session(ds)
for d in ds:
Column('id', Integer, primary_key=True),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
- Column('name', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
+ Column('name', String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
Column('raw_template_id', Integer, ForeignKey("raw_template.id"),
nullable=False),
)
Column('stack_id', Integer, ForeignKey("stack.id"), nullable=False),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
- Column('name', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
+ Column('name', String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
)
resource = Table(
'resource', meta,
Column('id', Integer, primary_key=True),
- Column('nova_instance', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('name', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
-
+ Column('nova_instance', String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('name', String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
- Column('state', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False)),
+ Column('state', String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
Column('state_description', String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None,
Column('id', Integer, primary_key=True),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
- Column('stack_name', String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
+ Column('stack_name', String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
Column('name', String(length=255, convert_unicode=False,
assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('auth_url', Text()),
Column('aws_auth_url', Text()),
Column('tenant_id', String(length=256, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False)),
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
Column('aws_creds', Text())
)
fkeys = list(event.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[event.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(
+ columns=[event.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
fkeys = list(resource.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[resource.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(
+ columns=[resource.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
- stack.c.id.alter(String(36), primary_key=True,
+ stack.c.id.alter(
+ String(36), primary_key=True,
default=uuidutils.generate_uuid)
event.c.stack_id.alter(String(36), nullable=False)
resource.c.stack_id.alter(String(36), nullable=False)
fkeys = list(event.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[event.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[event.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
fkeys = list(resource.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[resource.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[resource.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
def downgrade(migrate_engine):
fkeys = list(event.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[event.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(
+ columns=[event.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
fkeys = list(resource.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[resource.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(
+ columns=[resource.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
- stack.c.id.alter(Integer, primary_key=True,
+ stack.c.id.alter(
+ Integer, primary_key=True,
default=utils.generate_uuid)
event.c.stack_id.alter(Integer, nullable=False)
resource.c.stack_id.alter(Integer, nullable=False)
fkeys = list(event.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[event.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[event.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
fkeys = list(resource.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[resource.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[resource.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
fkeys = list(stack.c.owner_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[stack.c.owner_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(columns=[
+ stack.c.owner_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
stack.c.owner_id.alter(String(36), nullable=True)
fkeys = list(stack.c.owner_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[stack.c.owner_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[stack.c.owner_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
def downgrade(migrate_engine):
fkeys = list(stack.c.owner_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[stack.c.owner_id],
- refcolumns=[stack.c.id],
- name=fkey_name).drop()
+ ForeignKeyConstraint(
+ columns=[stack.c.owner_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).drop()
stack.c.owner_id.alter(Integer, nullable=True)
fkeys = list(event.c.stack_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[event.c.stack_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[event.c.stack_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
fkeys = list(stack.c.owner_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[stack.c.owner_id],
- refcolumns=[stack.c.id],
- name=fkey_name).create()
+ ForeignKeyConstraint(
+ columns=[stack.c.owner_id],
+ refcolumns=[stack.c.id],
+ name=fkey_name).create()
watch_rule = Table('watch_rule', meta, autoload=True)
Column('stack_id', String(length=36), ForeignKey("stack.id"),
- nullable=False).create(watch_rule)
+ nullable=False).create(watch_rule)
watch_rule.c.stack_name.drop()
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
- dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
- migrate_util.with_engine = patched_with_engine
+ dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
+ migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
meta.reflect(bind=engine)
try:
for table in ('stack', 'resource', 'event',
- 'parsed_template', 'raw_template'):
+ 'parsed_template', 'raw_template'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
__tablename__ = 'stack'
- id = Column(String, primary_key=True,
- default=uuidutils.generate_uuid)
+ id = Column(String, primary_key=True, default=uuidutils.generate_uuid)
name = Column(String)
- raw_template_id = Column(Integer, ForeignKey('raw_template.id'),
- nullable=False)
- raw_template = relationship(RawTemplate,
- backref=backref('stack'))
+ raw_template_id = Column(
+ Integer,
+ ForeignKey('raw_template.id'),
+ nullable=False)
+ raw_template = relationship(RawTemplate, backref=backref('stack'))
username = Column(String)
tenant = Column(String)
status = Column('status', String)
status_reason = Column('status_reason', String)
parameters = Column('parameters', Json)
- user_creds_id = Column(Integer, ForeignKey('user_creds.id'),
- nullable=False)
+ user_creds_id = Column(
+ Integer,
+ ForeignKey('user_creds.id'),
+ nullable=False)
owner_id = Column(Integer, nullable=True)
timeout = Column(Integer)
aws_auth_url = Column(String)
tenant_id = Column(String)
aws_creds = Column(String)
- stack = relationship(Stack,
- backref=backref('user_creds'))
+ stack = relationship(Stack, backref=backref('user_creds'))
class Event(BASE, HeatBase):
__tablename__ = 'event'
id = Column(Integer, primary_key=True)
- stack_id = Column(String, ForeignKey('stack.id'),
- nullable=False)
- stack = relationship(Stack,
- backref=backref('events'))
+ stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
+ stack = relationship(Stack, backref=backref('events'))
name = Column(String)
logical_resource_id = Column(String)
# odd name as "metadata" is reserved
rsrc_metadata = Column('rsrc_metadata', Json)
- stack_id = Column(String, ForeignKey('stack.id'),
- nullable=False)
+ stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
stack = relationship(Stack, backref=backref('resources'))
state = Column('state', String)
last_evaluated = Column(DateTime, default=timeutils.utcnow)
- stack_id = Column(String, ForeignKey('stack.id'),
- nullable=False)
+ stack_id = Column(String, ForeignKey('stack.id'), nullable=False)
stack = relationship(Stack, backref=backref('watch_rule'))
id = Column(Integer, primary_key=True)
data = Column('data', Json)
- watch_rule_id = Column(Integer, ForeignKey('watch_rule.id'),
- nullable=False)
+ watch_rule_id = Column(
+ Integer,
+ ForeignKey('watch_rule.id'),
+ nullable=False)
watch_rule = relationship(WatchRule, backref=backref('watch_data'))
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
- ses = sqlalchemy.orm.sessionmaker(bind=engine,
- autocommit=autocommit,
- expire_on_commit=expire_on_commit)
+ ses = sqlalchemy.orm.sessionmaker(
+ bind=engine,
+ autocommit=autocommit,
+ expire_on_commit=expire_on_commit)
return sqlalchemy.orm.scoped_session(ses)
WATCH_STATE_REASON: watch.rule.get(RULE_STATE_REASON),
WATCH_STATE_REASON_DATA: watch.rule.get(RULE_STATE_REASON_DATA),
WATCH_STATE_UPDATED_TIME: timeutils.isotime(
- watch.rule.get(RULE_STATE_UPDATED_TIME)),
+ watch.rule.get(RULE_STATE_UPDATED_TIME)),
WATCH_STATE_VALUE: watch.state,
WATCH_STATISTIC: watch.rule.get(RULE_STATISTIC),
WATCH_THRESHOLD: watch.rule.get(RULE_THRESHOLD),
# Lookup endpoint for object-store service type
service_type = 'object-store'
endpoints = self.keystone().service_catalog.get_endpoints(
- service_type=service_type)
+ service_type=service_type)
if len(endpoints[service_type]) == 1:
args['preauthurl'] = endpoints[service_type][0]['publicURL']
else:
args['token'] = con.auth_token
else:
logger.error("Quantum connection failed, "
- "no password or auth_token!")
+ "no password or auth_token!")
return None
logger.debug('quantum args %s', args)
Map the supplied filter function onto each Parameter (with an
optional filter function) and return the resulting dictionary.
'''
- return dict((n, func(p)) for n, p in self.params.iteritems()
- if filter_func(p))
+ return dict((n, func(p))
+ for n, p in self.params.iteritems() if filter_func(p))
def user_parameters(self):
'''
for res in reversed(self):
if not res.name in newstack.keys():
logger.debug("resource %s not found in updated stack"
- % res.name + " definition, deleting")
+ % res.name + " definition, deleting")
result = res.destroy()
if result:
failures.append('Resource %s delete failed'
for res in newstack:
if not res.name in self.keys():
logger.debug("resource %s not found in current stack"
- % res.name + " definition, adding")
+ % res.name + " definition, adding")
res.stack = self
self[res.name] = res
result = self[res.name].create()
# Currently all resource have a default handle_update method
# which returns "requires replacement" (res.UPDATE_REPLACE)
for res in newstack:
- if self.resolve_runtime_data(
- self[res.name].t) != self.resolve_runtime_data(res.t):
+ if self.resolve_runtime_data(self[res.name].t) !=\
+ self.resolve_runtime_data(res.t):
# Can fail if underlying resource class does not
# implement update logic or update requires replacement
% res.name)
else:
logger.warning("Cannot update resource %s," %
- res.name + " reason %s" % retval)
+ res.name + " reason %s" % retval)
failures.append('Resource %s update failed'
% res.name)
for key in self.schema:
assert key in SCHEMA_KEYS, 'Unknown schema key "%s"' % key
- assert self.type() in SCHEMA_TYPES, \
- 'Unknown property type "%s"' % self.type()
+ assert self.type() in SCHEMA_TYPES,\
+ 'Unknown property type "%s"' % self.type()
def required(self):
return self.schema.get(REQUIRED, False)
def _validate_list(self, value):
if (not isinstance(value, collections.Sequence) or
- isinstance(value, basestring)):
+ isinstance(value, basestring)):
raise TypeError('"%s" is not a list' % repr(value))
for v in value:
logger.info(_('Registering resource type %s') % resource_type)
if resource_type in _resource_classes:
logger.warning(_('Replacing existing resource type %s') %
- resource_type)
+ resource_type)
_resource_classes[resource_type] = resource_class
'AllowedValues': ['GreaterThanOrEqualToThreshold',
'GreaterThanThreshold', 'LessThanThreshold',
'LessThanOrEqualToThreshold']},
- 'AlarmDescription': {'Type': 'String'},
- 'EvaluationPeriods': {'Type': 'String'},
- 'MetricName': {'Type': 'String'},
- 'Namespace': {'Type': 'String'},
- 'Period': {'Type': 'String'},
- 'Statistic': {'Type': 'String',
- 'AllowedValues': ['SampleCount', 'Average', 'Sum',
- 'Minimum', 'Maximum']},
- 'AlarmActions': {'Type': 'List'},
- 'OKActions': {'Type': 'List'},
- 'Dimensions': {'Type': 'List'},
- 'InsufficientDataActions': {'Type': 'List'},
- 'Threshold': {'Type': 'String'},
- 'Units': {'Type': 'String',
- 'AllowedValues': ['Seconds', 'Microseconds', 'Milliseconds',
- 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
- 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits',
- 'Terabits', 'Percent', 'Count', 'Bytes/Second',
- 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
- 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second',
- 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second',
- 'Count/Second', None]}}
+ 'AlarmDescription': {'Type': 'String'},
+ 'EvaluationPeriods': {'Type': 'String'},
+ 'MetricName': {'Type': 'String'},
+ 'Namespace': {'Type': 'String'},
+ 'Period': {'Type': 'String'},
+ 'Statistic': {'Type': 'String',
+ 'AllowedValues': ['SampleCount',
+ 'Average',
+ 'Sum',
+ 'Minimum',
+ 'Maximum']},
+ 'AlarmActions': {'Type': 'List'},
+ 'OKActions': {'Type': 'List'},
+ 'Dimensions': {'Type': 'List'},
+ 'InsufficientDataActions': {'Type': 'List'},
+ 'Threshold': {'Type': 'String'},
+ 'Units': {'Type': 'String',
+ 'AllowedValues': ['Seconds',
+ 'Microseconds',
+ 'Milliseconds',
+ 'Bytes',
+ 'Kilobytes',
+ 'Megabytes',
+ 'Gigabytes',
+ 'Terabytes',
+ 'Bits',
+ 'Kilobits',
+ 'Megabits',
+ 'Gigabits',
+ 'Terabits',
+ 'Percent',
+ 'Count',
+ 'Bytes/Second',
+ 'Kilobytes/Second',
+ 'Megabytes/Second',
+ 'Gigabytes/Second',
+ 'Terabytes/Second',
+ 'Bits/Second',
+ 'Kilobits/Second',
+ 'Megabits/Second',
+ 'Gigabits/Second',
+ 'Terabits/Second',
+ 'Count/Second', None]}}
strict_dependency = False
if victim is None:
logger.info('%s Alarm, can not find instance %s' %
- (self.name, self.properties['InstanceId']))
+ (self.name, self.properties['InstanceId']))
return
logger.info('%s Alarm, restarting resource: %s' %
'Required': True}}
properties_schema = {'ImageId': {'Type': 'String',
- 'Required': True},
+ 'Required': True},
'InstanceType': {'Type': 'String',
- 'Required': True},
+ 'Required': True},
'KeyName': {'Type': 'String',
'Required': True},
'AvailabilityZone': {'Type': 'String',
'SourceDestCheck': {'Type': 'Boolean',
'Implemented': False},
'SubnetId': {'Type': 'String',
- 'Implemented': False},
+ 'Implemented': False},
'Tags': {'Type': 'List',
'Schema': {'Type': 'Map',
'Schema': tags_schema}},
return self.mime_string
def handle_create(self):
- if self.properties.get('SecurityGroups') == None:
+ if self.properties.get('SecurityGroups') is None:
security_groups = None
else:
- security_groups = [self.physical_resource_name_find(sg) for sg in
- self.properties.get('SecurityGroups')]
+ security_groups = [self.physical_resource_name_find(sg)
+ for sg in self.properties.get('SecurityGroups')]
userdata = self.properties['UserData'] or ''
userdata += '\ntouch /var/lib/cloud/instance/provision-finished\n'
if res:
return res
- #check validity of key
+ # check validity of key
try:
key_name = self.properties['KeyName']
except ValueError:
'Timeout': {'Type': 'Number',
'Required': True},
'UnhealthyThreshold': {'Type': 'Number',
- 'Required': True},
+ 'Required': True},
}
properties_schema = {
class FloatingIP(quantum.QuantumResource):
properties_schema = {'floating_network_id': {'Type': 'String',
- 'Required': True},
- 'value_specs': {'Type': 'Map',
- 'Default': {}},
- 'port_id': {'Type': 'String'},
- 'fixed_ip_address': {'Type': 'String'},
- }
+ 'Required': True},
+ 'value_specs': {'Type': 'Map',
+ 'Default': {}},
+ 'port_id': {'Type': 'String'},
+ 'fixed_ip_address': {'Type': 'String'}}
def handle_create(self):
props = self.prepare_properties(self.properties, self.name)
class FloatingIPAssociation(quantum.QuantumResource):
properties_schema = {'floatingip_id': {'Type': 'String',
- 'Required': True},
- 'port_id': {'Type': 'String',
- 'Required': True},
- 'fixed_ip_address': {'Type': 'String'}
- }
+ 'Required': True},
+ 'port_id': {'Type': 'String',
+ 'Required': True},
+ 'fixed_ip_address': {'Type': 'String'}}
def __init__(self, name, json_snippet, stack):
super(FloatingIPAssociation, self).__init__(name, json_snippet, stack)
client = self.quantum()
(floatingip_id, port_id) = self.resource_id.split(':')
client.update_floatingip(floatingip_id,
- {'floatingip': {'port_id': None}})
+ {'floatingip': {'port_id': None}})
def resource_mapping():
class Net(quantum.QuantumResource):
properties_schema = {'name': {'Type': 'String'},
- 'value_specs': {'Type': 'Map',
- 'Default': {}},
- 'admin_state_up': {'Default': True,
- 'Type': 'Boolean'},
- }
+ 'value_specs': {'Type': 'Map',
+ 'Default': {}},
+ 'admin_state_up': {'Default': True,
+ 'Type': 'Boolean'}}
def __init__(self, name, json_snippet, stack):
super(Net, self).__init__(name, json_snippet, stack)
class Port(quantum.QuantumResource):
fixed_ip_schema = {'subnet_id': {'Type': 'String',
- 'Required': True},
- 'ip_address': {'Type': 'String',
- 'Required': True}}
+ 'Required': True},
+ 'ip_address': {'Type': 'String',
+ 'Required': True}}
properties_schema = {'network_id': {'Type': 'String',
- 'Required': True},
- 'name': {'Type': 'String'},
- 'value_specs': {'Type': 'Map',
- 'Default': {}},
- 'admin_state_up': {'Default': True,
- 'Type': 'Boolean'},
- 'fixed_ips': {'Type': 'List',
- 'Schema': {'Type': 'Map',
- 'Schema': fixed_ip_schema}},
- 'mac_address': {'Type': 'String'},
- 'device_id': {'Type': 'String'},
- }
+ 'Required': True},
+ 'name': {'Type': 'String'},
+ 'value_specs': {'Type': 'Map',
+ 'Default': {}},
+ 'admin_state_up': {'Default': True,
+ 'Type': 'Boolean'},
+ 'fixed_ips': {'Type': 'List',
+ 'Schema': {'Type': 'Map',
+ 'Schema': fixed_ip_schema}},
+ 'mac_address': {'Type': 'String'},
+ 'device_id': {'Type': 'String'}}
def __init__(self, name, json_snippet, stack):
super(Port, self).__init__(name, json_snippet, stack)
values.
'''
props = dict((k, v) for k, v in properties.items()
- if v is not None and k != 'value_specs')
+ if v is not None and k != 'value_specs')
if 'name' in properties.keys():
props.setdefault('name', name)
if key in attributes.keys():
return attributes[key]
- raise exception.InvalidTemplateAttribute(resource=name,
- key=key)
+ raise exception.InvalidTemplateAttribute(resource=name, key=key)
def handle_update(self):
return self.UPDATE_REPLACE
class Router(quantum.QuantumResource):
properties_schema = {'name': {'Type': 'String'},
- 'value_specs': {'Type': 'Map',
- 'Default': {}},
- 'admin_state_up': {'Type': 'Boolean',
- 'Default': True},
- }
+ 'value_specs': {'Type': 'Map',
+ 'Default': {}},
+ 'admin_state_up': {'Type': 'Boolean',
+ 'Default': True}}
def __init__(self, name, json_snippet, stack):
super(Router, self).__init__(name, json_snippet, stack)
class RouterInterface(quantum.QuantumResource):
properties_schema = {'router_id': {'Type': 'String',
- 'Required': True},
- 'subnet_id': {'Type': 'String',
- 'Required': True},
- }
+ 'Required': True},
+ 'subnet_id': {'Type': 'String',
+ 'Required': True}}
def __init__(self, name, json_snippet, stack):
super(RouterInterface, self).__init__(name, json_snippet, stack)
router_id = self.properties.get('router_id')
subnet_id = self.properties.get('subnet_id')
self.quantum().add_interface_router(router_id,
- {'subnet_id': subnet_id})
+ {'subnet_id': subnet_id})
self.resource_id_set('%s:%s' % (router_id, subnet_id))
def handle_delete(self):
client = self.quantum()
(router_id, subnet_id) = self.resource_id.split(':')
client.remove_interface_router(router_id,
- {'subnet_id': subnet_id})
+ {'subnet_id': subnet_id})
class RouterGateway(quantum.QuantumResource):
properties_schema = {'router_id': {'Type': 'String',
- 'Required': True},
- 'network_id': {'Type': 'String',
- 'Required': True},
- }
+ 'Required': True},
+ 'network_id': {'Type': 'String',
+ 'Required': True}}
def __init__(self, name, json_snippet, stack):
super(RouterGateway, self).__init__(name, json_snippet, stack)
router_id = self.properties.get('router_id')
network_id = self.properties.get('network_id')
self.quantum().add_gateway_router(router_id,
- {'network_id': network_id})
+ {'network_id': network_id})
self.resource_id_set('%s:%s' % (router_id, network_id))
def handle_delete(self):
class Subnet(quantum.QuantumResource):
allocation_schema = {'start': {'Type': 'String',
- 'Required': True},
- 'end': {'Type': 'String',
- 'Required': True}}
+ 'Required': True},
+ 'end': {'Type': 'String',
+ 'Required': True}}
properties_schema = {'network_id': {'Type': 'String',
- 'Required': True},
- 'cidr': {'Type': 'String',
- 'Required': True},
- 'value_specs': {'Type': 'Map',
- 'Default': {}},
- 'name': {'Type': 'String'},
- 'admin_state_up': {'Default': True,
- 'Type': 'Boolean'},
- 'ip_version': {'Type': 'Integer',
- 'AllowedValues': [4, 6],
- 'Default': 4},
- 'gateway_ip': {'Type': 'String'},
- 'allocation_pools': {'Type': 'List',
- 'Schema': {
- 'Type': 'Map',
- 'Schema': allocation_schema
- }}
- }
+ 'Required': True},
+ 'cidr': {'Type': 'String',
+ 'Required': True},
+ 'value_specs': {'Type': 'Map',
+ 'Default': {}},
+ 'name': {'Type': 'String'},
+ 'admin_state_up': {'Default': True,
+ 'Type': 'Boolean'},
+ 'ip_version': {'Type': 'Integer',
+ 'AllowedValues': [4, 6],
+ 'Default': 4},
+ 'gateway_ip': {'Type': 'String'},
+ 'allocation_pools': {'Type': 'List',
+ 'Schema': {
+ 'Type': 'Map',
+ 'Schema': allocation_schema
+ }}}
def __init__(self, name, json_snippet, stack):
super(Subnet, self).__init__(name, json_snippet, stack)
website_schema = {'IndexDocument': {'Type': 'String'},
'ErrorDocument': {'Type': 'String'}}
properties_schema = {'AccessControl': {
- 'Type': 'String',
- 'AllowedValues': ['Private',
- 'PublicRead',
- 'PublicReadWrite',
- 'AuthenticatedRead',
- 'BucketOwnerRead',
- 'BucketOwnerFullControl']},
- 'DeletionPolicy': {
- 'Type': 'String',
- 'AllowedValues': ['Delete',
- 'Retain']},
- 'WebsiteConfiguration': {'Type': 'Map',
- 'Schema': website_schema}}
+ 'Type': 'String',
+ 'AllowedValues': ['Private',
+ 'PublicRead',
+ 'PublicReadWrite',
+ 'AuthenticatedRead',
+ 'BucketOwnerRead',
+ 'BucketOwnerFullControl']},
+ 'DeletionPolicy': {
+ 'Type': 'String',
+ 'AllowedValues': ['Delete', 'Retain']},
+ 'WebsiteConfiguration': {'Type': 'Map',
+ 'Schema': website_schema}}
def __init__(self, name, json_snippet, stack):
super(S3Bucket, self).__init__(name, json_snippet, stack)
def handle_create(self):
"""Create a bucket."""
container = S3Bucket._create_container_name(
- self.physical_resource_name())
+ self.physical_resource_name())
headers = {}
logger.debug('S3Bucket create container %s with headers %s' %
(container, headers))
return parsed[1].split(':')[0]
elif key == 'WebsiteURL':
return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
- self.resource_id)
+ self.resource_id)
else:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
'Implemented': False},
'SecurityGroupIngress': {'Type': 'List'},
'SecurityGroupEgress': {'Type': 'List',
- 'Implemented': False}}
+ 'Implemented': False}}
def __init__(self, name, json_snippet, stack):
super(SecurityGroup, self).__init__(name, json_snippet, stack)
if not sec:
sec = self.nova().security_groups.create(
- self.physical_resource_name(),
- self.properties['GroupDescription'])
+ self.physical_resource_name(),
+ self.properties['GroupDescription'])
self.resource_id_set(sec.id)
if self.properties['SecurityGroupIngress']:
stack = self.nested()
if op not in stack.outputs:
raise exception.InvalidTemplateAttribute(
- resource=self.physical_resource_name(), key=key)
+ resource=self.physical_resource_name(), key=key)
return stack.output(op)
def FnGetAtt(self, key):
if not key.startswith('Outputs.'):
raise exception.InvalidTemplateAttribute(
- resource=self.physical_resource_name(), key=key)
+ resource=self.physical_resource_name(), key=key)
prefix, dot, op = key.partition('.')
return self.get_output(op)
'LoginProfile': {'Type': 'Map',
'Schema': {
'Password': {'Type': 'String'}
- }},
+ }},
'Policies': {'Type': 'List'}}
def __init__(self, name, json_snippet, stack):
def handle_create(self):
passwd = ''
if self.properties['LoginProfile'] and \
- 'Password' in self.properties['LoginProfile']:
- passwd = self.properties['LoginProfile']['Password']
+ 'Password' in self.properties['LoginProfile']:
+ passwd = self.properties['LoginProfile']['Password']
uid = self.keystone().create_stack_user(self.physical_resource_name(),
- passwd)
+ passwd)
self.resource_id_set(uid)
def handle_update(self):
def FnGetAtt(self, key):
#TODO Implement Arn attribute
raise exception.InvalidTemplateAttribute(
- resource=self.physical_resource_name(), key=key)
+ resource=self.physical_resource_name(), key=key)
class AccessKey(resource.Resource):
log_res = "<SANITIZED>"
else:
raise exception.InvalidTemplateAttribute(
- resource=self.physical_resource_name(), key=key)
+ resource=self.physical_resource_name(), key=key)
logger.info('%s.GetAtt(%s) == %s' % (self.physical_resource_name(),
key, log_res))
super(Volume, self).__init__(name, json_snippet, stack)
def handle_create(self):
- vol = self.nova('volume').volumes.create(self.properties['Size'],
- display_name=self.physical_resource_name(),
- display_description=self.physical_resource_name())
+ vol = self.nova('volume').volumes.create(
+ self.properties['Size'],
+ display_name=self.physical_resource_name(),
+ display_description=self.physical_resource_name())
while vol.status == 'creating':
eventlet.sleep(1)
vol.get()
except clients.novaclient.exceptions.NotFound as e:
logger.warning('Deleting VolumeAttachment %s %s - not found' %
- (server_id, volume_id))
+ (server_id, volume_id))
def resource_mapping():
class VPC(resource.Resource):
properties_schema = {'CidrBlock': {'Type': 'String'},
'InstanceTenancy': {'Type': 'String',
- 'AllowedValues': ['default', 'dedicated'],
- 'Default': 'default',
- 'Implemented': False}
- }
+ 'AllowedValues': ['default',
+ 'dedicated'],
+ 'Default': 'default',
+ 'Implemented': False}}
def __init__(self, name, json_snippet, stack):
super(VPC, self).__init__(name, json_snippet, stack)
def handle_create(self):
# Create a keystone user so we can create a signed URL via FnGetRefId
user_id = self.keystone().create_stack_user(
- self.physical_resource_name())
+ self.physical_resource_name())
kp = self.keystone().get_ec2_keypair(user_id)
if not kp:
raise exception.Error("Error creating ec2 keypair for user %s" %
properties_schema = {'Handle': {'Type': 'String',
'Required': True},
'Timeout': {'Type': 'Number',
- 'Required': True,
- 'MinValue': '1'},
+ 'Required': True,
+ 'MinValue': '1'},
'Count': {'Type': 'Number',
'MinValue': '1'}}
for s in stacks:
try:
stack = parser.Stack.load(context, stack=s,
- resolve_data=False)
+ resolve_data=False)
except exception.NotFound:
# The stack may have been deleted between listing
# and formatting
if stack_identity is not None:
s = self._get_stack(context, stack_identity)
else:
- rs = db_api.resource_get_by_physical_resource_id(context,
- physical_resource_id)
+ rs = db_api.resource_get_by_physical_resource_id(
+ context, physical_resource_id)
if not rs:
msg = "The specified PhysicalResourceId doesn't exist"
raise AttributeError(msg)
name_match = lambda r: True
return [api.format_stack_resource(resource)
- for resource in stack if resource.id is not None and
- name_match(resource)]
+ for resource in stack
+ if resource.id is not None and name_match(resource)]
@request_context
def list_stack_resources(self, context, stack_identity):
stack = db_api.stack_get(admin_context, sid, admin=True)
if not stack:
logger.error("Unable to retrieve stack %s for periodic task" %
- sid)
+ sid)
return
user_creds = db_api.user_creds_get(stack.user_creds_id)
stack_context = context.RequestContext.from_dict(user_creds)
# DB API and schema does not yet allow us to easily query by
# namespace/metric, but we will want this at some point
# for now, the API can query all metric data and filter locally
- if namespace != None or metric_name != None:
+ if namespace is not None or metric_name is not None:
logger.error("Filtering by namespace/metric not yet supported")
return
'''
Load the watchrule object, either by name or via an existing DB object
'''
- if watch == None:
+ if watch is None:
try:
watch = db_api.watch_rule_get_by_name(context, watch_name)
except Exception as ex:
logger.warn('WatchRule.load (%s) db error %s' %
(watch_name, str(ex)))
- if watch == None:
+ if watch is None:
raise AttributeError('Unknown watch name %s' % watch_name)
else:
return cls(context=context,
else:
s = db_api.stack_get(self.context, self.stack_id)
if s and s.status in (parser.Stack.CREATE_COMPLETE,
- parser.Stack.UPDATE_COMPLETE):
+ parser.Stack.UPDATE_COMPLETE):
stack = parser.Stack.load(self.context, stack=s)
for a in self.rule[self.ACTION_MAP[new_state]]:
greenpool.spawn_n(stack[a].alarm)
if state != self.state:
if self.rule_action(state):
logger.debug("Overriding state %s for watch %s with %s" %
- (self.state, self.name, state))
+ (self.state, self.name, state))
else:
logger.warning("Unable to override state %s for watch %s" %
- (self.state, self.name))
+ (self.state, self.name))
value = value.strip()
if ((value and value[0] == value[-1]) and
- (value[0] == "\"" or value[0] == "'")):
+ (value[0] == "\"" or value[0] == "'")):
value = value[1:-1]
return key.strip(), [value]
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('heat.openstack.common.notifier.log_notifier' in
- CONF.notification_driver):
+ CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
- CONF.logging_debug_format_suffix):
+ CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
from heat.openstack.common import log as logging
-list_notifier_drivers_opt = cfg.MultiStrOpt('list_notifier_drivers',
- default=['heat.openstack.common.notifier.no_op_notifier'],
- help='List of drivers to send notifications')
+list_notifier_drivers_opt = cfg.MultiStrOpt(
+ 'list_notifier_drivers',
+ default=['heat.openstack.common.notifier.no_op_notifier'],
+ help='List of drivers to send notifications')
CONF = cfg.CONF
CONF.register_opt(list_notifier_drivers_opt)
def __init__(self):
super(EngineClient, self).__init__(
- topic=FLAGS.engine_topic,
- default_version=self.BASE_RPC_API_VERSION)
+ topic=FLAGS.engine_topic,
+ default_version=self.BASE_RPC_API_VERSION)
def identify_stack(self, ctxt, stack_name):
"""
:param params: Params passed from API.
"""
rpc_method = self.cast if cast else self.call
- return rpc_method(ctxt, self.make_msg('delete_stack',
- stack_identity=stack_identity),
- topic=_engine_topic(self.topic, ctxt, None))
+ return rpc_method(ctxt,
+ self.make_msg('delete_stack',
+ stack_identity=stack_identity),
+ topic=_engine_topic(self.topic, ctxt, None))
def list_resource_types(self, ctxt):
"""
def __init__(self, stream):
import win32console as win
red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
- win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
+ win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY
+ )
self.stream = stream
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
- self._colors = {
- 'normal': red | green | blue,
- 'red': red | bold,
- 'green': green | bold,
- 'blue': blue | bold,
- 'yellow': red | green | bold,
- 'magenta': red | blue | bold,
- 'cyan': green | blue | bold,
- 'white': red | green | blue | bold
- }
+ self._colors = {'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold}
def supported(cls, stream=sys.stdout):
try:
def get_distro():
- if os.path.exists('/etc/fedora-release') or \
- os.path.exists('/etc/redhat-release'):
+ if (os.path.exists('/etc/fedora-release') or
+ os.path.exists('/etc/redhat-release')):
return Fedora()
else:
return Distro()
"""Parse command-line arguments"""
parser = optparse.OptionParser()
parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
- default=False, action="store_true",
- help="Do not inherit packages from global Python install")
+ default=False, action="store_true",
+ help=
+ "Do not inherit packages from global Python install")
return parser.parse_args()