try:
parameters['StackName'] = arguments.pop(0)
except IndexError:
- print "Describing all stacks"
+ print "Please specify the stack name you wish to describe "
+ print "as the first argument"
+ return FAILURE
c = get_client(options)
result = c.describe_stacks(**parameters)
--- /dev/null
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Heat Engine Server
+"""
+
+import gettext
+import os
+import sys
+
+# If ../heat/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('heat', unicode=1)
+
+from heat.common import config
+from heat.common import wsgi
+
+
+if __name__ == '__main__':
+ try:
+ conf = config.HeatConfigOpts()
+ conf()
+
+ app = config.load_paste_app(conf)
+
+ server = wsgi.Server()
+ server.start(app, conf, default_port=config.DEFAULT_PORT+1)
+ server.wait()
+ except RuntimeError, e:
+ sys.exit("ERROR: %s" % e)
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
-heat.app_factory = heat.api.v1.router:API
+heat.app_factory = heat.api.v1:API
[filter:versionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
--- /dev/null
+# Default minimal pipeline
+[pipeline:heat-engine]
+pipeline = context engineapp
+
+# Use the following pipeline for keystone auth
+# i.e. in heat-engine.conf:
+# [paste_deploy]
+# flavor = keystone
+#
+[pipeline:heat-engine-keystone]
+pipeline = authtoken auth-context engineapp
+
+[app:engineapp]
+paste.app_factory = heat.common.wsgi:app_factory
+heat.app_factory = heat.engine.api.v1:API
+
+[filter:context]
+paste.filter_factory = heat.common.wsgi:filter_factory
+heat.filter_factory = heat.common.context:ContextMiddleware
+
+[filter:authtoken]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+service_protocol = http
+service_host = 127.0.0.1
+service_port = 5000
+auth_host = 127.0.0.1
+auth_port = 35357
+auth_protocol = http
+auth_uri = http://127.0.0.1:5000/
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+
+[filter:auth-context]
+paste.filter_factory = heat.common.wsgi:filter_factory
+heat.filter_factory = keystone.middleware.heat_auth_token:KeystoneContextMiddleware
--- /dev/null
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose = True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug = True
+
+# Address to bind the server to
+bind_host = 0.0.0.0
+
+# Port the bind the server to
+bind_port = 8001
+
+# Log to this file. Make sure the user running heat-api has
+# permissions to write to this file!
+log_file = /var/log/heat/engine.log
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+# syslog_log_facility = LOG_LOCAL0
# License for the specific language governing permissions and limitations
# under the License.
-SUPPORTED_PARAMS = ('StackName', 'TemplateBody', 'TemplateUrl','NotificationARNs', 'Parameters',
- 'Version', 'SignatureVersion', 'Timestamp', 'AWSAccessKeyId',
- 'Signature')
+import logging
+import routes
+from heat.api.v1 import stacks
+from heat.common import wsgi
+
+logger = logging.getLogger(__name__)
+
+class API(wsgi.Router):
+
+ """WSGI router for Heat v1 API requests."""
+ #TODO GetTemplate, ValidateTemplate
+
+ def __init__(self, conf, **local_conf):
+ self.conf = conf
+ mapper = routes.Mapper()
+
+ stacks_resource = stacks.create_resource(conf)
+
+ mapper.resource("stack", "stacks", controller=stacks_resource,
+ collection={'detail': 'GET'})
+
+ mapper.connect("/CreateStack", controller=stacks_resource,
+ action="create", conditions=dict(method=["POST"]))
+ mapper.connect("/", controller=stacks_resource, action="index")
+ mapper.connect("/ListStacks", controller=stacks_resource,
+ action="list", conditions=dict(method=["GET"]))
+ mapper.connect("/DescribeStacks", controller=stacks_resource,
+ action="describe", conditions=dict(method=["GET"]))
+ mapper.connect("/DeleteStack", controller=stacks_resource,
+ action="delete", conditions=dict(method=["DELETE"]))
+ mapper.connect("/UpdateStack", controller=stacks_resource,
+ action="update", conditions=dict(method=["PUT"]))
+
+ super(API, self).__init__(mapper)
"""
/stack endpoint for heat v1 API
"""
-import dbus
-import errno
-import eventlet
-from eventlet.green import socket
-import fcntl
import httplib
import json
-import libxml2
import logging
import os
-import stat
import sys
import urlparse
HTTPConflict,
HTTPBadRequest)
-from heat.common import exception
-from heat.common import utils
from heat.common import wsgi
+from heat.engine import client as engine
logger = logging.getLogger('heat.api.v1.stacks')
-stack_db = {}
-
-
-class Json2CapeXml:
- def __init__(self, template, stack_name):
-
- self.t = template
- self.parms = self.t['Parameters']
- self.maps = self.t['Mappings']
- self.res = {}
-
- self.parms['AWS::Region'] = {"Description" : "AWS Regions", "Type" : "String", "Default" : "ap-southeast-1",
- "AllowedValues" : ["us-east-1","us-west-1","us-west-2","sa-east-1","eu-west-1","ap-southeast-1","ap-northeast-1"],
- "ConstraintDescription" : "must be a valid EC2 instance type." }
-
- # expected user parameters
- self.parms['AWS::StackName'] = {'Default': stack_name}
- self.parms['KeyName'] = {'Default': 'harry-45-5-34-5'}
-
- for r in self.t['Resources']:
- # fake resource instance references
- self.parms[r] = {'Default': utils.generate_uuid()}
-
- self.resolve_static_refs(self.t['Resources'])
- self.resolve_find_in_map(self.t['Resources'])
- #self.resolve_attributes(self.t['Resources'])
- self.resolve_joins(self.t['Resources'])
- self.resolve_base64(self.t['Resources'])
- #print json.dumps(self.t['Resources'], indent=2)
-
-
- def convert_and_write(self):
-
- name = self.parms['AWS::StackName']['Default']
-
- doc = libxml2.newDoc("1.0")
- dep = doc.newChild(None, "deployable", None)
- dep.setProp("name", name)
- dep.setProp("uuid", 'bogus')
- dep.setProp("monitor", 'active')
- dep.setProp("username", 'nobody-yet')
- n_asses = dep.newChild(None, "assemblies", None)
-
- for r in self.t['Resources']:
- type = self.t['Resources'][r]['Type']
- if type != 'AWS::EC2::Instance':
- print 'ignoring Resource %s (%s)' % (r, type)
- continue
-
- n_ass = n_asses.newChild(None, 'assembly', None)
- n_ass.setProp("name", r)
- n_ass.setProp("uuid", self.parms[r]['Default'])
- props = self.t['Resources'][r]['Properties']
- for p in props:
- if p == 'ImageId':
- n_ass.setProp("image_name", props[p])
- elif p == 'UserData':
- new_script = []
- script_lines = props[p].split('\n')
- for l in script_lines:
- if '#!/' in l:
- new_script.append(l)
- self.insert_package_and_services(self.t['Resources'][r], new_script)
- else:
- new_script.append(l)
-
- startup = n_ass.newChild(None, 'startup', '\n'.join(new_script))
-
-
- try:
- con = self.t['Resources'][r]['Metadata']["AWS::CloudFormation::Init"]['config']
- n_services = n_ass.newChild(None, 'services', None)
- for st in con['services']:
- for s in con['services'][st]:
- n_service = n_services.newChild(None, 'service', None)
- n_service.setProp("name", '%s_%s' % (r, s))
- n_service.setProp("type", s)
- n_service.setProp("provider", 'pacemaker')
- n_service.setProp("class", 'lsb')
- n_service.setProp("monitor_interval", '30s')
- n_service.setProp("escalation_period", '1000')
- n_service.setProp("escalation_failures", '3')
- except KeyError as e:
- # if there is no config then no services.
- pass
-
- try:
- filename = '/var/run/%s.xml' % name
- open(filename, 'w').write(doc.serialize(None, 1))
- doc.freeDoc()
- except IOError as e:
- logger.error('couldn\'t write to /var/run/ error %s' % e)
-
- def insert_package_and_services(self, r, new_script):
-
- try:
- con = r['Metadata']["AWS::CloudFormation::Init"]['config']
- except KeyError as e:
- return
-
- for pt in con['packages']:
- if pt == 'yum':
- for p in con['packages']['yum']:
- new_script.append('yum install -y %s' % p)
- for st in con['services']:
- if st == 'systemd':
- for s in con['services']['systemd']:
- v = con['services']['systemd'][s]
- if v['enabled'] == 'true':
- new_script.append('systemctl enable %s.service' % s)
- if v['ensureRunning'] == 'true':
- new_script.append('systemctl start %s.service' % s)
- elif st == 'sysvinit':
- for s in con['services']['sysvinit']:
- v = con['services']['systemd'][s]
- if v['enabled'] == 'true':
- new_script.append('chkconfig %s on' % s)
- if v['ensureRunning'] == 'true':
- new_script.append('/etc/init.d/start %s' % s)
-
- def resolve_static_refs(self, s):
- '''
- looking for { "Ref": "str" }
- '''
- if isinstance(s, dict):
- for i in s:
- if i == 'Ref' and isinstance(s[i], (basestring, unicode)) and \
- self.parms.has_key(s[i]):
- if self.parms[s[i]] == None:
- print 'None Ref: %s' % str(s[i])
- elif self.parms[s[i]].has_key('Default'):
- # note the "ref: values" are in a dict of
- # size one, so return is fine.
- #print 'Ref: %s == %s' % (s[i], self.parms[s[i]]['Default'])
- return self.parms[s[i]]['Default']
- else:
- print 'missing Ref: %s' % str(s[i])
- else:
- s[i] = self.resolve_static_refs(s[i])
- elif isinstance(s, list):
- for index, item in enumerate(s):
- #print 'resolve_static_refs %d %s' % (index, item)
- s[index] = self.resolve_static_refs(item)
- return s
-
- def resolve_find_in_map(self, s):
- '''
- looking for { "Ref": "str" }
- '''
- if isinstance(s, dict):
- for i in s:
- if i == 'Fn::FindInMap':
- obj = self.maps
- if isinstance(s[i], list):
- #print 'map list: %s' % s[i]
- for index, item in enumerate(s[i]):
- if isinstance(item, dict):
- item = self.resolve_find_in_map(item)
- #print 'map item dict: %s' % (item)
- else:
- pass
- #print 'map item str: %s' % (item)
- obj = obj[item]
- else:
- obj = obj[s[i]]
- return obj
- else:
- s[i] = self.resolve_find_in_map(s[i])
- elif isinstance(s, list):
- for index, item in enumerate(s):
- s[index] = self.resolve_find_in_map(item)
- return s
-
-
- def resolve_joins(self, s):
- '''
- looking for { "Fn::join": [] }
- '''
- if isinstance(s, dict):
- for i in s:
- if i == 'Fn::Join':
- return s[i][0].join(s[i][1])
- else:
- s[i] = self.resolve_joins(s[i])
- elif isinstance(s, list):
- for index, item in enumerate(s):
- s[index] = self.resolve_joins(item)
- return s
-
-
- def resolve_base64(self, s):
- '''
- looking for { "Fn::join": [] }
- '''
- if isinstance(s, dict):
- for i in s:
- if i == 'Fn::Base64':
- return s[i]
- else:
- s[i] = self.resolve_base64(s[i])
- elif isinstance(s, list):
- for index, item in enumerate(s):
- s[index] = self.resolve_base64(item)
- return s
-
-def systemctl(method, name, instance=None):
-
- bus = dbus.SystemBus()
-
- sysd = bus.get_object('org.freedesktop.systemd1',
- '/org/freedesktop/systemd1')
-
- actual_method = ''
- if method == 'start':
- actual_method = 'StartUnit'
- elif method == 'stop':
- actual_method = 'StopUnit'
- else:
- raise
-
- m = sysd.get_dbus_method(actual_method, 'org.freedesktop.systemd1.Manager')
-
- if instance == None:
- service = '%s.service' % (name)
- else:
- service = '%s@%s.service' % (name, instance)
-
- try:
- result = m(service, 'replace')
- except dbus.DBusException as e:
- logger.error('couldn\'t %s %s error: %s' % (method, name, e))
- return None
- return result
-
-
-class CapeEventListener:
-
- def __init__(self):
- self.backlog = 50
- self.file = 'pacemaker-cloud-cped'
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- flags = fcntl.fcntl(sock, fcntl.F_GETFD)
- fcntl.fcntl(sock, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- st = os.stat(self.file)
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
- else:
- if stat.S_ISSOCK(st.st_mode):
- os.remove(self.file)
- else:
- raise ValueError("File %s exists and is not a socket", self.file)
- sock.bind(self.file)
- sock.listen(self.backlog)
- os.chmod(self.file, 0600)
-
- eventlet.spawn_n(self.cape_event_listner, sock)
-
- def cape_event_listner(self, sock):
- eventlet.serve(sock, self.cape_event_handle)
-
- def cape_event_handle(self, sock, client_addr):
- while True:
- x = sock.recv(4096)
- # TODO(asalkeld) format this event "nicely"
- logger.info('%s' % x.strip('\n'))
- if not x: break
-
class StackController(object):
def __init__(self, options):
self.options = options
- self.stack_id = 1
- self.event_listener = CapeEventListener()
+ engine.configure_engine_client(options)
def list(self, req):
"""
Returns the following information for all stacks:
"""
+ c = engine.get_engine_client(req.context)
+ stack_list = c.get_stacks(**req.params)
+
res = {'ListStacksResponse': {'ListStacksResult': {'StackSummaries': [] } } }
summaries = res['ListStacksResponse']['ListStacksResult']['StackSummaries']
- for s in stack_db:
- mem = {}
- mem['StackId'] = stack_db[s]['StackId']
- mem['StackName'] = s
- mem['CreationTime'] = 'now'
- try:
- mem['TemplateDescription'] = stack_db[s]['Description']
- mem['StackStatus'] = stack_db[s]['StackStatus']
- except:
- mem['TemplateDescription'] = 'No description'
- mem['StackStatus'] = 'unknown'
- summaries.append(mem)
+ for s in stack_list:
+ summaries.append(s)
return res
def describe(self, req):
+ """
+ Returns the following information for all stacks:
+ """
+ c = engine.get_engine_client(req.context)
- stack_name = None
- if req.params.has_key('StackName'):
- stack_name = req.params['StackName']
- if not stack_db.has_key(stack_name):
- msg = _("Stack does not exist with that name.")
- return webob.exc.HTTPNotFound(msg)
-
+ stack_list = c.show_stack(req.params['StackName'])
res = {'DescribeStacksResult': {'Stacks': [] } }
- summaries = res['DescribeStacksResult']['Stacks']
- for s in stack_db:
- if stack_name is None or s == stack_name:
- mem = {}
- mem['StackId'] = stack_db[s]['StackId']
- mem['StackStatus'] = stack_db[s]['StackStatus']
- mem['StackName'] = s
- mem['CreationTime'] = 'now'
- mem['DisableRollback'] = 'false'
- mem['Outputs'] = '[]'
- summaries.append(mem)
+ stacks = res['DescribeStacksResult']['Stacks']
+ for s in stack_list:
+ mem = {'member': s}
+ stacks.append(mem)
return res
return None
- def _apply_user_parameters(self, req, stack):
- # TODO
- pass
def create(self, req):
"""
- :param req: The WSGI/Webob Request object
-
- :raises HttpBadRequest if not template is given
- :raises HttpConflict if object already exists
+ Returns the following information for all stacks:
"""
- if stack_db.has_key(req.params['StackName']):
- msg = _("Stack already exists with that name.")
- return webob.exc.HTTPConflict(msg)
+ c = engine.get_engine_client(req.context)
templ = self._get_template(req)
if templ is None:
return webob.exc.HTTPBadRequest(explanation=msg)
stack = json.loads(templ)
- my_id = '%s-%d' % (req.params['StackName'], self.stack_id)
- self.stack_id = self.stack_id + 1
- stack['StackId'] = my_id
- stack['StackStatus'] = 'CREATE_COMPLETE'
- self._apply_user_parameters(req, stack)
- stack_db[req.params['StackName']] = stack
-
- cape_transformer = Json2CapeXml(stack, req.params['StackName'])
- cape_transformer.convert_and_write()
-
- systemctl('start', 'pcloud-cape-sshd', req.params['StackName'])
-
- return {'CreateStackResult': {'StackId': my_id}}
-
- def update(self, req):
- """
- :param req: The WSGI/Webob Request object
-
- :raises HttpNotFound if object is not available
- """
- if not stack_db.has_key(req.params['StackName']):
- msg = _("Stack does not exist with that name.")
- return webob.exc.HTTPNotFound(msg)
-
- stack = stack_db[req.params['StackName']]
- my_id = stack['StackId']
- templ = self._get_template(req)
- if templ:
- stack = json.loads(templ)
- stack['StackId'] = my_id
- stack_db[req.params['StackName']] = stack
-
- self._apply_user_parameters(req, stack)
- stack['StackStatus'] = 'UPDATE_COMPLETE'
-
- return {'UpdateStackResult': {'StackId': my_id}}
+ stack['StackName'] = req.params['StackName']
+ return c.create_stack(stack)
def delete(self, req):
"""
- Deletes the object and all its resources
-
- :param req: The WSGI/Webob Request object
-
- :raises HttpBadRequest if the request is invalid
- :raises HttpNotFound if object is not available
- :raises HttpNotAuthorized if object is not
- deleteable by the requesting user
+ Returns the following information for all stacks:
"""
- logger.info('in delete %s ' % req.params['StackName'])
- if not stack_db.has_key(req.params['StackName']):
- msg = _("Stack does not exist with that name.")
- return webob.exc.HTTPNotFound(msg)
-
- del stack_db[req.params['StackName']]
+ logger.info('in api delete ')
+ c = engine.get_engine_client(req.context)
+ res = c.delete_stack(req.params['StackName'])
+ if res.status == 200:
+ return {'DeleteStackResult': ''}
+ else:
+ return webob.exc.HTTPNotFound()
- systemctl('stop', 'pcloud-cape-sshd', req.params['StackName'])
def create_resource(options):
- """Stacks resource factory method"""
+ """Stacks resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(StackController(options), deserializer, serializer)
Client classes for callers of a heat system
"""
-import errno
-import httplib
import json
import logging
import os
-import socket
-import sys
-import heat.api.v1
from heat.common import client as base_client
from heat.common import exception
-from heat.common import utils
+
+from heat.cloudformations import *
logger = logging.getLogger(__name__)
-SUPPORTED_PARAMS = heat.api.v1.SUPPORTED_PARAMS
class V1Client(base_client.BaseClient):
def delete_stack(self, **kwargs):
params = self._extract_params(kwargs, SUPPORTED_PARAMS)
self._insert_common_parameters(params)
- self.do_request("DELETE", "/DeleteStack", params=params)
- return True
+ res = self.do_request("DELETE", "/DeleteStack", params=params)
+ data = json.loads(res.read())
+ return data
Client = V1Client
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+SUPPORTED_PARAMS = ('StackName', 'TemplateBody', 'TemplateUrl','NotificationARNs', 'Parameters',
+ 'Version', 'SignatureVersion', 'Timestamp', 'AWSAccessKeyId',
+ 'Signature')
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
import routes
-from heat.api.v1 import stacks
from heat.common import wsgi
-
-logger = logging.getLogger(__name__)
+from heat.engine.api.v1 import stacks
class API(wsgi.Router):
-
- """WSGI router for Heat v1 API requests."""
- #TODO GetTemplate, ValidateTemplate
+ """WSGI entry point for all stac requests."""
def __init__(self, conf, **local_conf):
- self.conf = conf
mapper = routes.Mapper()
stacks_resource = stacks.create_resource(conf)
-
mapper.resource("stack", "stacks", controller=stacks_resource,
collection={'detail': 'GET'})
-
- mapper.connect("/CreateStack", controller=stacks_resource,
- action="create", conditions=dict(method=["POST"]))
mapper.connect("/", controller=stacks_resource, action="index")
- mapper.connect("/ListStacks", controller=stacks_resource,
- action="list", conditions=dict(method=["GET"]))
- mapper.connect("/DescribeStacks", controller=stacks_resource,
- action="describe", conditions=dict(method=["GET"]))
- mapper.connect("/DeleteStack", controller=stacks_resource,
- action="delete", conditions=dict(method=["DELETE"]))
- mapper.connect("/UpdateStack", controller=stacks_resource,
- action="update", conditions=dict(method=["PUT"]))
super(API, self).__init__(mapper)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Reference implementation stacks server WSGI controller
+"""
+import json
+import logging
+
+import webob
+from webob.exc import (HTTPNotFound,
+ HTTPConflict,
+ HTTPBadRequest)
+
+from heat.common import exception
+from heat.common import wsgi
+
+from heat.engine import capelistener
+from heat.engine import json2capexml
+from heat.engine import systemctl
+
+
+logger = logging.getLogger('heat.engine.api.v1.stacks')
+
+stack_db = {}
+
+class Controller(object):
+ '''
+ bla
+ '''
+
+ def __init__(self, conf):
+ self.conf = conf
+ self.listener = capelistener.CapeEventListener()
+
+
+ def index(self, req, format='json'):
+ logger.info('format is %s' % format)
+ res = {'stacks': [] }
+ for s in stack_db:
+ mem = {}
+ mem['StackId'] = stack_db[s]['StackId']
+ mem['StackName'] = s
+ mem['CreationTime'] = 'now'
+ try:
+ mem['TemplateDescription'] = stack_db[s]['Description']
+ mem['StackStatus'] = stack_db[s]['StackStatus']
+ except:
+ mem['TemplateDescription'] = 'No description'
+ mem['StackStatus'] = 'unknown'
+ res['stacks'].append(mem)
+
+ return res
+
+ def show(self, req, id):
+ res = {'stacks': [] }
+ if stack_db.has_key(id):
+ mem = {}
+ mem['StackId'] = stack_db[id]['StackId']
+ mem['StackName'] = id
+ mem['CreationTime'] = 'TODO'
+ mem['LastUpdatedTime'] = 'TODO'
+ mem['NotificationARNs'] = 'TODO'
+ mem['Outputs'] = [{'Description': 'TODO', 'OutputKey': 'TODO', 'OutputValue': 'TODO' }]
+ mem['Parameters'] = stack_db[id]['Parameters']
+ mem['StackStatusReason'] = 'TODO'
+ mem['TimeoutInMinutes'] = 'TODO'
+ try:
+ mem['TemplateDescription'] = stack_db[id]['Description']
+ mem['StackStatus'] = stack_db[id]['StackStatus']
+ except:
+ mem['TemplateDescription'] = 'No description'
+ mem['StackStatus'] = 'unknown'
+ res['stacks'].append(mem)
+ else:
+ return webob.exc.HTTPNotFound('No stack by that name')
+
+ return res
+
+ def create(self, req, body=None):
+
+ if body is None:
+ msg = _("TemplateBody or TemplateUrl were not given.")
+ return webob.exc.HTTPBadRequest(explanation=msg)
+
+ if stack_db.has_key(body['StackName']):
+ msg = _("Stack already exists with that name.")
+ return webob.exc.HTTPConflict(msg)
+
+ stack = body
+ stack['StackId'] = body['StackName']
+ stack['StackStatus'] = 'CREATE_COMPLETE'
+ # TODO self._apply_user_parameters(req, stack)
+ stack_db[body['StackName']] = stack
+
+ cape_transformer = json2capexml.Json2CapeXml(stack, body['StackName'])
+ cape_transformer.convert_and_write()
+
+ systemctl.systemctl('start', 'pcloud-cape-sshd', body['StackName'])
+
+ return {'stack': {'id': body['StackName']}}
+
+ def delete(self, req, id):
+ if not stack_db.has_key(id):
+ return webob.exc.HTTPNotFound('No stack by that name')
+
+ logger.info('deleting stack %s' % id)
+ systemctl.systemctl('stop', 'pcloud-cape-sshd', id)
+ del stack_db[id]
+ return None
+
+def create_resource(conf):
+ """Stacks resource factory method."""
+ deserializer = wsgi.JSONRequestDeserializer()
+ serializer = wsgi.JSONResponseSerializer()
+ return wsgi.Resource(Controller(conf), deserializer, serializer)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import eventlet
+from eventlet.green import socket
+import fcntl
+import logging
+import os
+import stat
+
+class CapeEventListener:
+
+ def __init__(self):
+ self.backlog = 50
+ self.file = 'pacemaker-cloud-cped'
+
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ flags = fcntl.fcntl(sock, fcntl.F_GETFD)
+ fcntl.fcntl(sock, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ st = os.stat(self.file)
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ else:
+ if stat.S_ISSOCK(st.st_mode):
+ os.remove(self.file)
+ else:
+ raise ValueError("File %s exists and is not a socket", self.file)
+ sock.bind(self.file)
+ sock.listen(self.backlog)
+ os.chmod(self.file, 0600)
+
+ eventlet.spawn_n(self.cape_event_listner, sock)
+
+ def cape_event_listner(self, sock):
+ eventlet.serve(sock, self.cape_event_handle)
+
+ def cape_event_handle(self, sock, client_addr):
+ while True:
+ x = sock.recv(4096)
+ # TODO(asalkeld) format this event "nicely"
+ logger.info('%s' % x.strip('\n'))
+ if not x: break
+
+
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple client class to speak with any RESTful service that implements
+the heat Engine API
+"""
+
+import json
+
+from heat.common.client import BaseClient
+from heat.common import crypt
+from heat.common import config
+from openstack.common import cfg
+
+from heat.cloudformations import *
+
+_CLIENT_CREDS = None
+_CLIENT_HOST = None
+_CLIENT_PORT = None
+_CLIENT_KWARGS = {}
+# AES key used to encrypt 'location' metadata
+_METADATA_ENCRYPTION_KEY = None
+
+
+engine_addr_opts = [
+ cfg.StrOpt('engine_host', default='0.0.0.0'),
+ cfg.IntOpt('engine_port', default=8001),
+ ]
+engine_client_opts = [
+ cfg.StrOpt('engine_client_protocol', default='http'),
+ cfg.StrOpt('engine_client_key_file'),
+ cfg.StrOpt('engine_client_cert_file'),
+ cfg.StrOpt('engine_client_ca_file'),
+ cfg.StrOpt('metadata_encryption_key'),
+ ]
+
+class EngineClient(BaseClient):
+
+ """A client for the Engine stack metadata service"""
+
+ DEFAULT_PORT = 8001
+
+ def __init__(self, host=None, port=None, metadata_encryption_key=None,
+ **kwargs):
+ """
+ :param metadata_encryption_key: Key used to encrypt 'location' metadata
+ """
+ self.metadata_encryption_key = metadata_encryption_key
+ # NOTE (dprince): by default base client overwrites host and port
+ # settings when using keystone. configure_via_auth=False disables
+ # this behaviour to ensure we still send requests to the Engine API
+ BaseClient.__init__(self, host, port, configure_via_auth=False,
+ **kwargs)
+
+ def get_stacks(self, **kwargs):
+ """
+ Returns a list of stack id/name mappings from Engine
+
+ :param filters: dict of keys & expected values to filter results
+ :param marker: stack id after which to start page
+ :param limit: max number of stacks to return
+ :param sort_key: results will be ordered by this stack attribute
+ :param sort_dir: direction in which to to order results (asc, desc)
+ """
+ params = self._extract_params(kwargs, SUPPORTED_PARAMS)
+ res = self.do_request("GET", "/stacks", params=params)
+ return json.loads(res.read())['stacks']
+
+ def show_stack(self, stack_id):
+ """Returns a mapping of stack metadata from Engine"""
+ res = self.do_request("GET", "/stacks/%s" % stack_id)
+ data = json.loads(res.read())['stacks']
+ return data
+
+
+ def create_stack(self, template):
+ """
+ Tells engine about an stack's metadata
+ """
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ res = self.do_request("POST", "/stacks", json.dumps(template), headers=headers)
+ data = json.loads(res.read())
+ return data
+
+ def update_stack(self, stack_id, template):
+ """
+ Updates Engine's information about an stack
+ """
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ res = self.do_request("PUT", "/stacks/%s" % (stack_id), json.dumps(template), headers)
+ data = json.loads(res.read())
+ stack = data['stack']
+ return stack
+
+ def delete_stack(self, stack_name):
+ """
+ Deletes Engine's information about an stack
+ """
+ res = self.do_request("DELETE", "/stacks/%s" % stack_name)
+ return res
+
+def get_engine_addr(conf):
+ conf.register_opts(engine_addr_opts)
+ return (conf.engine_host, conf.engine_port)
+
+
+def configure_engine_client(conf):
+ """
+ Sets up a engine client for use in engine lookups
+
+ :param conf: Configuration options coming from controller
+ """
+ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY
+ try:
+ host, port = get_engine_addr(conf)
+ except cfg.ConfigFileValueError:
+ msg = _("Configuration option was not valid")
+ logger.error(msg)
+ raise exception.BadEngineConnectionConfiguration(msg)
+ except IndexError:
+ msg = _("Could not find required configuration option")
+ logger.error(msg)
+ raise exception.BadEngineConnectionConfiguration(msg)
+
+ conf.register_opts(engine_client_opts)
+
+ _CLIENT_HOST = host
+ _CLIENT_PORT = port
+ _METADATA_ENCRYPTION_KEY = conf.metadata_encryption_key
+ _CLIENT_KWARGS = {
+ 'use_ssl': conf.engine_client_protocol.lower() == 'https',
+ 'key_file': conf.engine_client_key_file,
+ 'cert_file': conf.engine_client_cert_file,
+ 'ca_file': conf.engine_client_ca_file
+ }
+
+
+
+def get_engine_client(cxt):
+ global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
+ global _METADATA_ENCRYPTION_KEY
+ kwargs = _CLIENT_KWARGS.copy()
+ kwargs['auth_tok'] = cxt.auth_tok
+ if _CLIENT_CREDS:
+ kwargs['creds'] = _CLIENT_CREDS
+ return EngineClient(_CLIENT_HOST, _CLIENT_PORT,
+ _METADATA_ENCRYPTION_KEY, **kwargs)
+
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import libxml2
+import logging
+
+from heat.common import utils
+
+logger = logging.getLogger('heat.engine.json2capexml')
+
+class Json2CapeXml:
+ def __init__(self, template, stack_name):
+
+ self.t = template
+ self.parms = self.t['Parameters']
+ self.maps = self.t['Mappings']
+ self.res = {}
+ self.doc = None
+ self.name = stack_name
+
+ self.parms['AWS::Region'] = {"Description" : "AWS Regions", "Type" : "String", "Default" : "ap-southeast-1",
+ "AllowedValues" : ["us-east-1","us-west-1","us-west-2","sa-east-1","eu-west-1","ap-southeast-1","ap-northeast-1"],
+ "ConstraintDescription" : "must be a valid EC2 instance type." }
+
+ # expected user parameters
+ self.parms['AWS::StackName'] = {'Default': stack_name}
+ self.parms['KeyName'] = {'Default': 'harry-45-5-34-5'}
+
+ for r in self.t['Resources']:
+ # fake resource instance references
+ self.parms[r] = {'Default': utils.generate_uuid()}
+
+ self.resolve_static_refs(self.t['Resources'])
+ self.resolve_find_in_map(self.t['Resources'])
+ #self.resolve_attributes(self.t['Resources'])
+ self.resolve_joins(self.t['Resources'])
+ self.resolve_base64(self.t['Resources'])
+ #print json.dumps(self.t['Resources'], indent=2)
+
+
+ def convert(self):
+
+ self.doc = libxml2.newDoc("1.0")
+ dep = self.doc.newChild(None, "deployable", None)
+ dep.setProp("name", self.name)
+ dep.setProp("uuid", 'bogus')
+ dep.setProp("username", 'nobody-yet')
+ n_asses = dep.newChild(None, "assemblies", None)
+
+ for r in self.t['Resources']:
+ type = self.t['Resources'][r]['Type']
+ if type != 'AWS::EC2::Instance':
+ print 'ignoring Resource %s (%s)' % (r, type)
+ continue
+
+ n_ass = n_asses.newChild(None, 'assembly', None)
+ n_ass.setProp("name", r)
+ n_ass.setProp("uuid", self.parms[r]['Default'])
+ props = self.t['Resources'][r]['Properties']
+ for p in props:
+ if p == 'ImageId':
+ n_ass.setProp("image_name", props[p])
+ elif p == 'UserData':
+ new_script = []
+ script_lines = props[p].split('\n')
+ for l in script_lines:
+ if '#!/' in l:
+ new_script.append(l)
+ self.insert_package_and_services(self.t['Resources'][r], new_script)
+ else:
+ new_script.append(l)
+
+ startup = n_ass.newChild(None, 'startup', '\n'.join(new_script))
+
+
+ try:
+ con = self.t['Resources'][r]['Metadata']["AWS::CloudFormation::Init"]['config']
+ n_services = n_ass.newChild(None, 'services', None)
+ for st in con['services']:
+ for s in con['services'][st]:
+ n_service = n_services.newChild(None, 'service', None)
+ n_service.setProp("name", '%s_%s' % (r, s))
+ n_service.setProp("type", s)
+ n_service.setProp("provider", 'pacemaker')
+ n_service.setProp("class", 'lsb')
+ n_service.setProp("monitor_interval", '30s')
+ n_service.setProp("escalation_period", '1000')
+ n_service.setProp("escalation_failures", '3')
+ except KeyError as e:
+ # if there is no config then no services.
+ pass
+
+ def get_xml(self):
+ str = self.doc.serialize(None, 1)
+ self.doc.freeDoc()
+ self.doc = None
+ return str
+
+ def convert_and_write(self):
+ self.convert()
+ try:
+ filename = '/var/run/%s.xml' % self.name
+ open(filename, 'w').write(self.doc.serialize(None, 1))
+ self.doc.freeDoc()
+ self.doc = None
+ except IOError as e:
+ logger.error('couldn\'t write to /var/run/ error %s' % e)
+
+ def insert_package_and_services(self, r, new_script):
+
+ try:
+ con = r['Metadata']["AWS::CloudFormation::Init"]['config']
+ except KeyError as e:
+ return
+
+ for pt in con['packages']:
+ if pt == 'yum':
+ for p in con['packages']['yum']:
+ new_script.append('yum install -y %s' % p)
+ for st in con['services']:
+ if st == 'systemd':
+ for s in con['services']['systemd']:
+ v = con['services']['systemd'][s]
+ if v['enabled'] == 'true':
+ new_script.append('systemctl enable %s.service' % s)
+ if v['ensureRunning'] == 'true':
+ new_script.append('systemctl start %s.service' % s)
+ elif st == 'sysvinit':
+ for s in con['services']['sysvinit']:
+ v = con['services']['systemd'][s]
+ if v['enabled'] == 'true':
+ new_script.append('chkconfig %s on' % s)
+ if v['ensureRunning'] == 'true':
+ new_script.append('/etc/init.d/start %s' % s)
+
+ def resolve_static_refs(self, s):
+ '''
+ looking for { "Ref": "str" }
+ '''
+ if isinstance(s, dict):
+ for i in s:
+ if i == 'Ref' and isinstance(s[i], (basestring, unicode)) and \
+ self.parms.has_key(s[i]):
+ if self.parms[s[i]] == None:
+ print 'None Ref: %s' % str(s[i])
+ elif self.parms[s[i]].has_key('Default'):
+ # note the "ref: values" are in a dict of
+ # size one, so return is fine.
+ #print 'Ref: %s == %s' % (s[i], self.parms[s[i]]['Default'])
+ return self.parms[s[i]]['Default']
+ else:
+ print 'missing Ref: %s' % str(s[i])
+ else:
+ s[i] = self.resolve_static_refs(s[i])
+ elif isinstance(s, list):
+ for index, item in enumerate(s):
+ #print 'resolve_static_refs %d %s' % (index, item)
+ s[index] = self.resolve_static_refs(item)
+ return s
+
+ def resolve_find_in_map(self, s):
+ '''
+ looking for { "Ref": "str" }
+ '''
+ if isinstance(s, dict):
+ for i in s:
+ if i == 'Fn::FindInMap':
+ obj = self.maps
+ if isinstance(s[i], list):
+ #print 'map list: %s' % s[i]
+ for index, item in enumerate(s[i]):
+ if isinstance(item, dict):
+ item = self.resolve_find_in_map(item)
+ #print 'map item dict: %s' % (item)
+ else:
+ pass
+ #print 'map item str: %s' % (item)
+ obj = obj[item]
+ else:
+ obj = obj[s[i]]
+ return obj
+ else:
+ s[i] = self.resolve_find_in_map(s[i])
+ elif isinstance(s, list):
+ for index, item in enumerate(s):
+ s[index] = self.resolve_find_in_map(item)
+ return s
+
+
+ def resolve_joins(self, s):
+ '''
+ looking for { "Fn::join": [] }
+ '''
+ if isinstance(s, dict):
+ for i in s:
+ if i == 'Fn::Join':
+ return s[i][0].join(s[i][1])
+ else:
+ s[i] = self.resolve_joins(s[i])
+ elif isinstance(s, list):
+ for index, item in enumerate(s):
+ s[index] = self.resolve_joins(item)
+ return s
+
+
+ def resolve_base64(self, s):
+ '''
+ looking for { "Fn::join": [] }
+ '''
+ if isinstance(s, dict):
+ for i in s:
+ if i == 'Fn::Base64':
+ return s[i]
+ else:
+ s[i] = self.resolve_base64(s[i])
+ elif isinstance(s, list):
+ for index, item in enumerate(s):
+ s[index] = self.resolve_base64(item)
+ return s
+
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Start and Stop systemd services
+"""
+import dbus
+import logging
+
+logger = logging.getLogger('heat.engine.systemctl')
+
+def systemctl(method, name, instance=None):
+
+ bus = dbus.SystemBus()
+
+ sysd = bus.get_object('org.freedesktop.systemd1',
+ '/org/freedesktop/systemd1')
+
+ actual_method = ''
+ if method == 'start':
+ actual_method = 'StartUnit'
+ elif method == 'stop':
+ actual_method = 'StopUnit'
+ else:
+ raise
+
+ m = sysd.get_dbus_method(actual_method, 'org.freedesktop.systemd1.Manager')
+
+ if instance == None:
+ service = '%s.service' % (name)
+ else:
+ service = '%s@%s.service' % (name, instance)
+
+ try:
+ result = m(service, 'replace')
+ except dbus.DBusException as e:
+ logger.error('couldn\'t %s %s error: %s' % (method, name, e))
+ return None
+ return result
+
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# See http://code.google.com/p/python-nose/issues/detail?id=373
+# The code below enables nosetests to work with i18n _() blocks
+import __builtin__
+setattr(__builtin__, '_', lambda x: x)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import unittest
+
+from heat.engine.json2capexml import *
+
+class ParseTestCase(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_01(self):
+ done=False
+
+ with open('templates/WordPress_Single_Instance.template') as f:
+ blob = json.load(f)
+ cape_transformer = Json2CapeXml(blob, 'WordPress_Single_Instance')
+ cape_transformer.convert()
+ print cape_transformer.get_xml()
+ done=True
+
+ self.assertTrue(done)
+
--- /dev/null
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""
+Unittest runner for heat
+
+To run all test::
+ python run_tests.py
+
+To run a single test::
+ python run_tests.py test_stores:TestSwiftBackend.test_get
+
+To run a single test module::
+ python run_tests.py test_stores
+"""
+
+import gettext
+import logging
+import os
+import unittest
+import sys
+
+gettext.install('heat', unicode=1)
+
+from nose import config
+from nose import result
+from nose import core
+
+
+class _AnsiColorizer(object):
+ """
+ A colorizer is an object that loosely wraps around a stream, allowing
+ callers to write text to the stream in a particular color.
+
+ Colorizer classes must implement C{supported()} and C{write(text, color)}.
+ """
+ _colors = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, magenta=35, cyan=36, white=37)
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ """
+ A class method that returns True if the current platform supports
+ coloring terminal output using this method. Returns False otherwise.
+ """
+ if not stream.isatty():
+ return False # auto color only on TTYs
+ try:
+ import curses
+ except ImportError:
+ return False
+ else:
+ try:
+ try:
+ return curses.tigetnum("colors") > 2
+ except curses.error:
+ curses.setupterm()
+ return curses.tigetnum("colors") > 2
+ except:
+ raise
+ # guess false in case of error
+ return False
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ """
+ Write the given text to the stream in the given color.
+
+ @param text: Text to be written to the stream.
+
+ @param color: A string label for a color. e.g. 'red', 'white'.
+ """
+ color = self._colors[color]
+ self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ from win32console import GetStdHandle, STD_OUT_HANDLE, \
+ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
+ FOREGROUND_INTENSITY
+ red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
+ FOREGROUND_BLUE, FOREGROUND_INTENSITY)
+ self.stream = stream
+ self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self._colors = {
+ 'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold}
+
+ def supported(cls, stream=sys.stdout):
+ try:
+ import win32console
+ screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ except ImportError:
+ return False
+ import pywintypes
+ try:
+ screenBuffer.SetConsoleTextAttribute(
+ win32console.FOREGROUND_RED |
+ win32console.FOREGROUND_GREEN |
+ win32console.FOREGROUND_BLUE)
+ except pywintypes.error:
+ return False
+ else:
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ color = self._colors[color]
+ self.screenBuffer.SetConsoleTextAttribute(color)
+ self.stream.write(text)
+ self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ self.stream.write(text)
+
+
+class HeatTestResult(result.TextTestResult):
+ def __init__(self, *args, **kw):
+ result.TextTestResult.__init__(self, *args, **kw)
+ self._last_case = None
+ self.colorizer = None
+ # NOTE(vish, tfukushima): reset stdout for the terminal check
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+ if colorizer.supported():
+ self.colorizer = colorizer(self.stream)
+ break
+ sys.stdout = stdout
+
+ def getDescription(self, test):
+ return str(test)
+
+ # NOTE(vish, tfukushima): copied from unittest with edit to add color
+ def addSuccess(self, test):
+ unittest.TestResult.addSuccess(self, test)
+ if self.showAll:
+ self.colorizer.write("OK", 'green')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ # NOTE(vish, tfukushima): copied from unittest with edit to add color
+ def addFailure(self, test, err):
+ unittest.TestResult.addFailure(self, test, err)
+ if self.showAll:
+ self.colorizer.write("FAIL", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ # NOTE(vish, tfukushima): copied from unittest with edit to add color
+ def addError(self, test, err):
+ """
+ Overrides normal addError to add support for errorClasses.
+ If the exception is a registered class, the error will be added
+ to the list for that class, not errors.
+ """
+ stream = getattr(self, 'stream', None)
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # This is for compatibility with Python 2.3.
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ if result.isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passwd = False
+ storage.append((test, exc_info))
+ # Might get patched into a streamless result
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ detail = result._exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ if stream is not None:
+ if self.showAll:
+ self.colorizer.write("ERROR", 'red')
+ self.stream.writeln()
+ elif self.dots:
+ stream.write('E')
+
+ def startTest(self, test):
+ unittest.TestResult.startTest(self, test)
+ current_case = test.test.__class__.__name__
+
+ if self.showAll:
+ if current_case != self._last_case:
+ self.stream.writeln(current_case)
+ self._last_case = current_case
+
+ self.stream.write(
+ ' %s' % str(test.test._testMethodName).ljust(60))
+ self.stream.flush()
+
+
+class HeatTestRunner(core.TextTestRunner):
+ def _makeResult(self):
+ return HeatTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+
+if __name__ == '__main__':
+ logger = logging.getLogger()
+ hdlr = logging.StreamHandler()
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+ hdlr.setFormatter(formatter)
+ logger.addHandler(hdlr)
+ logger.setLevel(logging.DEBUG)
+
+ c = config.Config(stream=sys.stdout,
+ env=os.environ,
+ verbosity=3,
+ plugins=core.DefaultPluginManager())
+
+ runner = HeatTestRunner(stream=c.stream,
+ verbosity=c.verbosity,
+ config=c)
+ sys.exit(not core.run(config=c, testRunner=runner))
--- /dev/null
+#!/bin/bash
+
+function usage {
+ echo "Usage: $0 [OPTION]..."
+ echo "Run Heat's test suite(s)"
+ echo ""
+ echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
+ echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+ echo " --unittests-only Run unit tests only, exclude functional tests."
+ echo " -p, --pep8 Just run pep8"
+ echo " -h, --help Print this usage message"
+ echo ""
+ echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
+ echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
+ echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
+ exit
+}
+
+function process_option {
+ case "$1" in
+ -h|--help) usage;;
+ -V|--virtual-env) let always_venv=1; let never_venv=0;;
+ -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
+ -f|--force) let force=1;;
+ --unittests-only) noseargs="$noseargs --exclude-dir=heat/tests/functional";;
+ *) noseargs="$noseargs $1"
+ esac
+}
+
+venv=.venv
+with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=1
+force=0
+noseargs=
+wrapper=""
+just_pep8=0
+
+for arg in "$@"; do
+ process_option $arg
+done
+
+function run_tests {
+ # Just run the test suites in current environment
+ ${wrapper} rm -f tests.sqlite
+ ${wrapper} $NOSETESTS 2> run_tests.err.log
+}
+
+NOSETESTS="python run_tests.py $noseargs"
+
+if [ $never_venv -eq 0 ]
+then
+ # Remove the virtual environment if --force used
+ if [ $force -eq 1 ]; then
+ echo "Cleaning virtualenv..."
+ rm -rf ${venv}
+ fi
+ if [ -e ${venv} ]; then
+ wrapper="${with_venv}"
+ else
+ if [ $always_venv -eq 1 ]; then
+ # Automatically install the virtualenv
+ python tools/install_venv.py
+ wrapper="${with_venv}"
+ else
+ echo -e "No virtual environment found...create one? (Y/n) \c"
+ read use_ve
+ if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
+ # Install the virtualenv and run the test suite in it
+ python tools/install_venv.py
+ wrapper=${with_venv}
+ fi
+ fi
+ fi
+fi
+
+run_tests || exit
+
}
""" % (branch_nick, revid, revno))
-
-class local_sdist(sdist):
- """Customized sdist hook - builds the ChangeLog file from VC first"""
-
- def run(self):
- if os.path.isdir('.bzr'):
- # We're in a bzr branch
-
- log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
- stdout=subprocess.PIPE)
- changelog = log_cmd.communicate()[0]
- with open("ChangeLog", "w") as changelog_file:
- changelog_file.write(changelog)
- sdist.run(self)
-
-cmdclass = {'sdist': local_sdist}
+cmdclass = {}
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
'Environment :: No Input/Output (Daemon)',
],
scripts=['bin/heat',
- 'bin/heat-api'],
- data_files=[('/etc/heat', ['etc/heat-api.conf', 'etc/heat-api-paste.ini'])],
+ 'bin/heat-api',
+ 'bin/heat-engine'],
+ data_files=[('/etc/heat', ['etc/heat-api.conf',
+ 'etc/heat-api-paste.ini',
+ 'etc/heat-engine.conf',
+ 'etc/heat-engine-paste.ini'])],
py_modules=[])