--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+try:
+ from pyrax.exceptions import NotFound
+except ImportError:
+ #Setup fake exception for testing without pyrax
+ class NotFound(Exception):
+ pass
+
+from heat.openstack.common import log as logging
+from heat.openstack.common.exception import OpenstackException
+from heat.openstack.common.gettextutils import _
+from heat.engine import scheduler
+from heat.engine.properties import Properties
+from heat.engine.resources.rackspace import rackspace_resource
+from heat.common import exception
+
+logger = logging.getLogger(__name__)
+
+
+class LoadbalancerBuildError(OpenstackException):
+ message = _("There was an error building the loadbalancer:%(lb_name)s.")
+
+
+class CloudLoadBalancer(rackspace_resource.RackspaceResource):
+
+ protocol_values = ["DNS_TCP", "DNS_UDP", "FTP", "HTTP", "HTTPS", "IMAPS",
+ "IMAPv4", "LDAP", "LDAPS", "MYSQL", "POP3", "POP3S",
+ "SMTP", "TCP", "TCP_CLIENT_FIRST", "UDP", "UDP_STREAM",
+ "SFTP"]
+
+ algorithm_values = ["LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
+ "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"]
+
+ nodes_schema = {
+ 'address': {'Type': 'String', 'Required': False},
+ 'ref': {'Type': 'String', 'Required': False},
+ 'port': {'Type': 'Number', 'Required': True},
+ 'condition': {'Type': 'String', 'Required': True,
+ 'AllowedValues': ['ENABLED', 'DISABLED'],
+ 'Default': 'ENABLED'},
+ 'type': {'Type': 'String', 'Required': False,
+ 'AllowedValues': ['PRIMARY', 'SECONDARY']},
+ 'weight': {'Type': 'Number', 'MinValue': 1, 'MaxValue': 100}
+ }
+
+ access_list_schema = {
+ 'address': {'Type': 'String', 'Required': True},
+ 'type': {'Type': 'String', 'Required': True,
+ 'AllowedValues': ['ALLOW', 'DENY']}
+ }
+
+ connection_logging_schema = {
+ 'enabled': {'Type': 'String', 'Required': True,
+ 'AllowedValues': ["true", "false"]}
+ }
+
+ connection_throttle_schema = {
+ 'maxConnectionRate': {'Type': 'Number', 'Required': False,
+ 'MinValue': 0, 'MaxValue': 100000},
+ 'minConnections': {'Type': 'Number', 'Required': False, 'MinValue': 1,
+ 'MaxValue': 1000},
+ 'maxConnections': {'Type': 'Number', 'Required': False, 'MinValue': 1,
+ 'MaxValue': 100000},
+ 'rateInterval': {'Type': 'Number', 'Required': False, 'MinValue': 1,
+ 'MaxValue': 3600}
+ }
+
+ virtualip_schema = {
+ 'type': {'Type': 'String', 'Required': True,
+ 'AllowedValues': ['SERVICENET', 'PUBLIC']},
+ 'ipVersion': {'Type': 'String', 'Required': False,
+ 'AllowedValues': ['IPV6', 'IPV4'],
+ 'Default': 'IPV6'}
+ }
+
+ health_monitor_base_schema = {
+ 'attemptsBeforeDeactivation': {'Type': 'Number', 'MinValue': 1,
+ 'MaxValue': 10, 'Required': True},
+ 'delay': {'Type': 'Number', 'MinValue': 1, 'MaxValue': 3600,
+ 'Required': True},
+ 'timeout': {'Type': 'Number', 'MinValue': 1, 'MaxValue': 300,
+ 'Required': True},
+ 'type': {'Type': 'String',
+ 'AllowedValues': ['CONNECT', 'HTTP', 'HTTPS'],
+ 'Required': True},
+ 'bodyRegex': {'Type': 'String', 'Required': False},
+ 'hostHeader': {'Type': 'String', 'Required': False},
+ 'path': {'Type': 'String', 'Required': False},
+ 'statusRegex': {'Type': 'String', 'Required': False},
+ }
+
+ health_monitor_connect_schema = {
+ 'attemptsBeforeDeactivation': {'Type': 'Number', 'MinValue': 1,
+ 'MaxValue': 10, 'Required': True},
+ 'delay': {'Type': 'Number', 'MinValue': 1, 'MaxValue': 3600,
+ 'Required': True},
+ 'timeout': {'Type': 'Number', 'MinValue': 1, 'MaxValue': 300,
+ 'Required': True},
+ 'type': {'Type': 'String', 'AllowedValues': ['CONNECT'],
+ 'Required': True}
+ }
+
+ health_monitor_http_schema = {
+ 'attemptsBeforeDeactivation': {'Type': 'Number', 'Required': True,
+ 'MaxValue': 10, 'MinValue': 1},
+ 'bodyRegex': {'Type': 'String', 'Required': True},
+ 'delay': {'Type': 'Number', 'Required': True,
+ 'MaxValue': 3600, 'MinValue': 1},
+ 'hostHeader': {'Type': 'String', 'Required': False},
+ 'path': {'Type': 'String', 'Required': True},
+ 'statusRegex': {'Type': 'String', 'Required': True},
+ 'timeout': {'Type': 'Number', 'Required': True,
+ 'MaxValue': 300, 'MinValue': 1},
+ 'type': {'Type': 'String', 'Required': True,
+ 'AllowedValues': ['HTTP', 'HTTPS']}
+ }
+
+ ssl_termination_base_schema = {
+ "enabled": {'Type': 'Boolean', 'Required': True},
+ "securePort": {'Type': 'Number', 'Required': False},
+ "privatekey": {'Type': 'String', 'Required': False},
+ "certificate": {'Type': 'String', 'Required': False},
+ #only required if configuring intermediate ssl termination
+ #add to custom validation
+ "intermediateCertificate": {'Type': 'String', 'Required': False},
+ #pyrax will default to false
+ "secureTrafficOnly": {'Type': 'Boolean', 'Required': False}
+ }
+
+ ssl_termination_enabled_schema = {
+ "securePort": {'Type': 'Number', 'Required': True},
+ "privatekey": {'Type': 'String', 'Required': True},
+ "certificate": {'Type': 'String', 'Required': True},
+ "intermediateCertificate": {'Type': 'String', 'Required': False},
+ "enabled": {'Type': 'Boolean', 'Required': True,
+ 'AllowedValues': [True]},
+ "secureTrafficOnly": {'Type': 'Boolean', 'Required': False}
+ }
+
+ properties_schema = {
+ 'name': {'Type': 'String', 'Required': False},
+ 'nodes': {'Type': 'List', 'Required': True,
+ 'Schema': {'Type': 'Map', 'Schema': nodes_schema}},
+ 'protocol': {'Type': 'String', 'Required': True,
+ 'AllowedValues': protocol_values},
+ 'accessList': {'Type': 'List', 'Required': False,
+ 'Schema': {'Type': 'Map',
+ 'Schema': access_list_schema}},
+ 'halfClosed': {'Type': 'Boolean', 'Required': False},
+ 'algorithm': {'Type': 'String', 'Required': False},
+ 'connectionLogging': {'Type': 'Boolean', 'Required': False},
+ 'metadata': {'Type': 'Map', 'Required': False},
+ 'port': {'Type': 'Number', 'Required': True},
+ 'timeout': {'Type': 'Number', 'Required': False, 'MinValue': 1,
+ 'MaxValue': 120},
+ 'connectionThrottle': {'Type': 'Map', 'Required': False,
+ 'Schema': connection_throttle_schema},
+ 'sessionPersistence': {'Type': 'String', 'Required': False,
+ 'AllowedValues': ['HTTP_COOKIE', 'SOURCE_IP']},
+ 'virtualIps': {'Type': 'List', 'Required': True,
+ 'Schema': {'Type': 'Map', 'Schema': virtualip_schema}},
+ 'contentCaching': {'Type': 'String', 'Required': False,
+ 'AllowedValues': ['ENABLED', 'DISABLED']},
+ 'healthMonitor': {'Type': 'Map', 'Required': False,
+ 'Schema': health_monitor_base_schema},
+ 'sslTermination': {'Type': 'Map', 'Required': False,
+ 'Schema': ssl_termination_base_schema},
+ 'errorPage': {'Type': 'String', 'Required': False}
+ }
+
+ attributes_schema = {
+ 'PublicIp': ('Public IP address of the specified '
+ 'instance.')}
+
+ update_allowed_keys = ('Properties',)
+ update_allowed_properties = ('nodes',)
+
+ def __init__(self, name, json_snippet, stack):
+ super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
+ self.clb = self.cloud_lb()
+
+ def _setup_properties(self, properties, function):
+ """Use defined schema properties as kwargs for loadbalancer objects."""
+ if properties and function:
+ return [function(**item_dict) for item_dict in properties]
+ elif function:
+ return [function()]
+
+ def _alter_properties_for_api(self):
+ """The following properties have usless key/value pairs which must
+ be passed into the api. Set them up to make template definition easier.
+ """
+ session_persistence = None
+ if'sessionPersistence' in self.properties.data:
+ session_persistence = {'persistenceType':
+ self.properties['sessionPersistence']}
+ connection_logging = None
+ if 'connectionLogging' in self.properties.data:
+ connection_logging = {'enabled':
+ self.properties['connectionLogging']}
+ metadata = None
+ if 'metadata' in self.properties.data:
+ metadata = [{'key': k, 'value': v}
+ for k, v in self.properties['metadata'].iteritems()]
+
+ return (session_persistence, connection_logging, metadata)
+
+ def _check_status(self, loadbalancer, status_list):
+ """Update the loadbalancer state, check the status."""
+ loadbalancer.get()
+ if loadbalancer.status in status_list:
+ return True
+ else:
+ return False
+
+ def _configure_post_creation(self, loadbalancer):
+ """Configure all load balancer properties that must be done post
+ creation.
+ """
+ if self.properties['accessList']:
+ while not self._check_status(loadbalancer, ['ACTIVE']):
+ yield
+ loadbalancer.add_access_list(self.properties['accessList'])
+
+ if self.properties['errorPage']:
+ while not self._check_status(loadbalancer, ['ACTIVE']):
+ yield
+ loadbalancer.set_error_page(self.properties['errorPage'])
+
+ if self.properties['sslTermination']:
+ while not self._check_status(loadbalancer, ['ACTIVE']):
+ yield
+ loadbalancer.add_ssl_termination(
+ self.properties['sslTermination']['securePort'],
+ self.properties['sslTermination']['privatekey'],
+ self.properties['sslTermination']['certificate'],
+ intermediateCertificate=
+ self.properties['sslTermination']
+ ['intermediateCertificate'],
+ enabled=self.properties['sslTermination']['enabled'],
+ secureTrafficOnly=self.properties['sslTermination']
+ ['secureTrafficOnly'])
+
+ if 'contentCaching' in self.properties:
+ enabled = True if self.properties['contentCaching'] == 'ENABLED'\
+ else False
+ while not self._check_status(loadbalancer, ['ACTIVE']):
+ yield
+ loadbalancer.content_caching = enabled
+
+ def handle_create(self):
+ node_list = []
+ for node in self.properties['nodes']:
+ # resolve references to stack resource IP's
+ if node.get('ref'):
+ node['address'] = (self.stack
+ .resource_by_refid(node['ref'])
+ .FnGetAtt('PublicIp'))
+ del node['ref']
+ node_list.append(node)
+
+ nodes = self._setup_properties(node_list, self.clb.Node)
+ virtual_ips = self._setup_properties(self.properties.get('virtualIps'),
+ self.clb.VirtualIP)
+
+ (session_persistence, connection_logging, metadata) = \
+ self._alter_properties_for_api()
+
+ lb_body = {
+ 'port': self.properties['port'],
+ 'protocol': self.properties['protocol'],
+ 'nodes': nodes,
+ 'virtual_ips': virtual_ips,
+ 'algorithm': self.properties.get('algorithm'),
+ 'halfClosed': self.properties.get('halfClosed'),
+ 'connectionThrottle': self.properties.get('connectionThrottle'),
+ 'metadata': metadata,
+ 'healthMonitor': self.properties.get('healthMonitor'),
+ 'sessionPersistence': session_persistence,
+ 'timeout': self.properties.get('timeout'),
+ 'connectionLogging': connection_logging,
+ }
+
+ lb_name = self.properties.get('name') or self.physical_resource_name()
+ logger.debug('Creating loadbalancer: %s' % {lb_name: lb_body})
+ loadbalancer = self.clb.create(lb_name, **lb_body)
+ self.resource_id_set(str(loadbalancer.id))
+
+ post_create = scheduler.TaskRunner(self._configure_post_creation,
+ loadbalancer)
+ post_create(timeout=600)
+ return loadbalancer
+
+ def check_create_complete(self, loadbalancer):
+ return self._check_status(loadbalancer, ['ACTIVE'])
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ """
+ Add and remove nodes specified in the prop_diff.
+ """
+ loadbalancer = self.clb.get(self.resource_id)
+ if 'nodes' in prop_diff:
+ current_nodes = loadbalancer.nodes
+ #Loadbalancers can be uniquely identified by address and port.
+ #Old is a dict of all nodes the loadbalancer currently knows about.
+ for node in prop_diff['nodes']:
+ # resolve references to stack resource IP's
+ if node.get('ref'):
+ node['address'] = (self.stack
+ .resource_by_refid(node['ref'])
+ .FnGetAtt('PublicIp'))
+ del node['ref']
+ old = dict(("{0.address}{0.port}".format(node), node)
+ for node in current_nodes)
+ #New is a dict of the nodes the loadbalancer will know about after
+ #this update.
+ new = dict(("%s%s" % (node['address'], node['port']), node)
+ for node in prop_diff['nodes'])
+
+ old_set = set(old.keys())
+ new_set = set(new.keys())
+
+ deleted = old_set.difference(new_set)
+ added = new_set.difference(old_set)
+ updated = new_set.intersection(old_set)
+
+ if len(current_nodes) + len(added) - len(deleted) < 1:
+ raise ValueError("The loadbalancer:%s requires at least one "
+ "node." % self.name)
+ """
+ Add loadbalancers in the new map that are not in the old map.
+ Add before delete to avoid deleting the last node and getting in
+ an invalid state.
+ """
+ new_nodes = [self.clb.Node(**new[lb_node])
+ for lb_node in added]
+ if new_nodes:
+ loadbalancer.add_nodes(new_nodes)
+
+ #Delete loadbalancers in the old dict that are not in the new dict.
+ for node in deleted:
+ old[node].delete()
+
+ #Update nodes that have been changed
+ for node in updated:
+ node_changed = False
+ for attribute in new[node].keys():
+ if new[node][attribute] != getattr(old[node], attribute):
+ node_changed = True
+ setattr(old[node], attribute, new[node][attribute])
+ if node_changed:
+ old[node].update()
+
+ def handle_delete(self):
+ if self.resource_id is None:
+ return
+ try:
+ loadbalancer = self.clb.get(self.resource_id)
+ except NotFound:
+ pass
+ else:
+ if loadbalancer.status != 'DELETED':
+ loadbalancer.delete()
+ self.resource_id_set(None)
+
+ def _remove_none(self, property_dict):
+ '''
+ Remove values that may be initialized to None and would cause problems
+ during schema validation.
+ '''
+ return dict((key, value)
+ for (key, value) in property_dict.iteritems()
+ if value)
+
+ def validate(self):
+ """
+ Validate any of the provided params
+ """
+ res = super(CloudLoadBalancer, self).validate()
+ if res:
+ return res
+
+ if self.properties.get('halfClosed'):
+ if not (self.properties['protocol'] == 'TCP' or
+ self.properties['protocol'] == 'TCP_CLIENT_FIRST'):
+ return {'Error':
+ 'The halfClosed property is only available for the '
+ 'TCP or TCP_CLIENT_FIRST protocols'}
+
+ #health_monitor connect and http types require completely different
+ #schema
+ if self.properties.get('healthMonitor'):
+ health_monitor = \
+ self._remove_none(self.properties['healthMonitor'])
+
+ if health_monitor['type'] == 'CONNECT':
+ schema = CloudLoadBalancer.health_monitor_connect_schema
+ else:
+ schema = CloudLoadBalancer.health_monitor_http_schema
+ try:
+ Properties(schema,
+ health_monitor,
+ self.stack.resolve_runtime_data,
+ self.name).validate()
+ except exception.StackValidationFailed as svf:
+ return {'Error': str(svf)}
+
+ if self.properties.get('sslTermination'):
+ ssl_termination = self._remove_none(
+ self.properties['sslTermination'])
+
+ if ssl_termination['enabled']:
+ try:
+ Properties(CloudLoadBalancer.
+ ssl_termination_enabled_schema,
+ ssl_termination,
+ self.stack.resolve_runtime_data,
+ self.name).validate()
+ except exception.StackValidationFailed as svf:
+ return {'Error': str(svf)}
+
+ def FnGetRefId(self):
+ return unicode(self.name)
+
+ def _public_ip(self):
+ #TODO(andrew-plunk) return list here and let caller choose ip
+ for ip in self.clb.get(self.resource_id).virtual_ips:
+ if ip.type == 'PUBLIC':
+ return ip.address
+
+ def _resolve_attribute(self, key):
+ attribute_function = {
+ 'PublicIp': self._public_ip()
+ }
+ if key not in attribute_function:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=key)
+ function = attribute_function[key]
+ logger.info('%s.GetAtt(%s) == %s' % (self.name, key, function))
+ return unicode(function)
+
+
+def resource_mapping():
+ if rackspace_resource.PYRAX_INSTALLED:
+ return {
+ 'Rackspace::Cloud::LoadBalancer': CloudLoadBalancer
+ }
+ else:
+ return {}
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import uuid
+import json
+import copy
+import random
+import string
+
+from heat.common import template_format
+from heat.engine import scheduler
+from heat.engine import resource
+from heat.engine.resources.rackspace import cloud_loadbalancer as lb
+from heat.tests.common import HeatTestCase
+from heat.tests.utils import setup_dummy_db
+from heat.tests.utils import parse_stack
+
+# The following fakes are for pyrax
+
+
+class FakeClient(object):
+ user_agent = "Fake"
+ USER_AGENT = "Fake"
+
+
+class FakeManager(object):
+ api = FakeClient()
+
+ def list(self):
+ pass
+
+ def get(self, item):
+ pass
+
+ def delete(self, item):
+ pass
+
+ def create(self, *args, **kwargs):
+ pass
+
+ def find(self, *args, **kwargs):
+ pass
+
+ def action(self, item, action_type, body={}):
+ pass
+
+
+class FakeLoadBalancerManager(object):
+ def __init__(self, api=None, *args, **kwargs):
+ pass
+
+ def set_content_caching(self, *args, **kwargs):
+ pass
+
+
+class FakeNode(object):
+ def __init__(self, address="0.0.0.0", port=80, condition=None, weight=None,
+ status=None, parent=None, type=None, id=None):
+ self.address = address
+ self.port = port
+ self.condition = condition
+ self.weight = weight
+ self.status = status
+ self.parent = parent
+ self.type = type
+ self.id = id
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class FakeVirtualIP(object):
+ def __init__(self, address=None, port=None, condition=None,
+ ipVersion=None, type=None):
+ self.address = address
+ self.port = port
+ self.condition = condition
+ self.ipVersion = ipVersion
+ self.type = type
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class FakeLoadBalancerClient(object):
+ def __init__(self, *args, **kwargs):
+ self.Node = FakeNode
+ self.VirtualIP = FakeVirtualIP
+ pass
+
+ def get(*args, **kwargs):
+ pass
+
+ def create(*args, **kwargs):
+ pass
+
+
+class FakeLoadBalancer(object):
+ def __init__(self, name=None, info=None, *args, **kwargs):
+ name = name or uuid.uuid4()
+ info = info or {"fake": "fake"}
+ self.id = uuid.uuid4()
+ self.manager = FakeLoadBalancerManager()
+ self.Node = FakeNode
+ self.VirtualIP = FakeVirtualIP
+ self.nodes = []
+
+ def get(*args, **kwargs):
+ pass
+
+ def add_nodes(*args, **kwargs):
+ pass
+
+ def add_ssl_termination(*args, **kwargs):
+ pass
+
+ def set_error_page(*args, **kwargs):
+ pass
+
+ def add_access_list(*args, **kwargs):
+ pass
+
+
+class LoadBalancerWithFakeClient(lb.CloudLoadBalancer):
+ def cloud_lb(self):
+ return FakeLoadBalancerClient()
+
+
+def override_resource():
+ return {
+ 'Rackspace::Cloud::LoadBalancer': LoadBalancerWithFakeClient
+ }
+
+
+class LoadBalancerTest(HeatTestCase):
+
+ def setUp(self):
+ super(LoadBalancerTest, self).setUp()
+
+ self.lb_template = {
+ "AWSTemplateFormatVersion": "2010-09-09",
+ "Description": "fawef",
+ "Resources": {
+ self._get_lb_resource_name(): {
+ "Type": "Rackspace::Cloud::LoadBalancer",
+ "Properties": {
+ "name": "test-clb",
+ "nodes": [{"address": "166.78.103.141", "port": 80,
+ "condition": "ENABLED"}],
+ "protocol": "HTTP",
+ "port": 80,
+ "virtualIps": [
+ {"type": "PUBLIC", "ipVersion": "IPV6"}],
+ "algorithm": 'LEAST_CONNECTIONS',
+ "connectionThrottle": {'maxConnectionRate': 1000},
+ 'timeout': 110,
+ 'contentCaching': 'DISABLED'
+ }
+ }
+ }
+ }
+
+ self.lb_name = 'test-clb'
+ self.expected_body = {
+ "nodes": [FakeNode(address=u"166.78.103.141", port=80,
+ condition=u"ENABLED")],
+ "protocol": u'HTTP',
+ "port": 80,
+ "virtual_ips": [FakeVirtualIP(type=u"PUBLIC", ipVersion=u"IPV6")],
+ "halfClosed": None,
+ "algorithm": u'LEAST_CONNECTIONS',
+ "connectionThrottle": {'maxConnectionRate': 1000,
+ 'maxConnections': None,
+ 'rateInterval': None,
+ 'minConnections': None},
+ "connectionLogging": None,
+ "halfClosed": None,
+ "healthMonitor": None,
+ "metadata": None,
+ "sessionPersistence": None,
+ "timeout": 110
+ }
+
+ lb.resource_mapping = override_resource
+ setup_dummy_db()
+ resource._register_class("Rackspace::Cloud::LoadBalancer",
+ LoadBalancerWithFakeClient)
+
+ def _get_lb_resource_name(self):
+ return "lb-" + str(uuid.uuid4())
+
+ def __getattribute__(self, name):
+ if name == 'expected_body' or name == 'lb_template':
+ return copy.deepcopy(super(LoadBalancerTest, self)
+ .__getattribute__(name))
+ return super(LoadBalancerTest, self).__getattribute__(name)
+
+ def _mock_create(self, t, stack, resource_name, lb_name, lb_body):
+ rsrc = LoadBalancerWithFakeClient(resource_name,
+ t['Resources'][resource_name],
+ stack)
+ self.m.StubOutWithMock(rsrc.clb, 'create')
+ fake_loadbalancer = FakeLoadBalancer(name=lb_name)
+ rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_loadbalancer)
+ return (rsrc, fake_loadbalancer)
+
+ def _get_first_resource_name(self, templ):
+ return next(k for k in templ['Resources'])
+
+ def _random_name(self):
+ return ''.join(random.choice(string.ascii_uppercase)
+ for x in range(10))
+
+ def _mock_loadbalancer(self, lb_template, expected_name, expected_body):
+ t = template_format.parse(json.dumps(lb_template))
+ s = parse_stack(t, stack_name=self._random_name())
+
+ rsrc, fake_loadbalancer = self._mock_create(t, s,
+ self.
+ _get_first_resource_name(
+ lb_template),
+ expected_name,
+ expected_body)
+ self.m.StubOutWithMock(fake_loadbalancer, 'get')
+ fake_loadbalancer.get().MultipleTimes().AndReturn(None)
+
+ fake_loadbalancer.status = 'ACTIVE'
+
+ return (rsrc, fake_loadbalancer)
+
+ def _set_template(self, templ, **kwargs):
+ for k, v in kwargs.iteritems():
+ templ['Resources'][self._get_first_resource_name(templ)][
+ 'Properties'][k] = v
+ return templ
+
+ def _set_expected(self, expected, **kwargs):
+ for k, v in kwargs.iteritems():
+ expected[k] = v
+ return expected
+
+ def test_alter_properties(self):
+ #test alter properties functions
+ template = self._set_template(self.lb_template,
+ sessionPersistence='HTTP_COOKIE',
+ connectionLogging=True,
+ metadata={'yolo': 'heeyyy_gurl'})
+
+ expected = self._set_expected(self.expected_body,
+ sessionPersistence=
+ {'persistenceType': 'HTTP_COOKIE'},
+ connectionLogging={'enabled': True},
+ metadata=[
+ {'key': 'yolo',
+ 'value': 'heeyyy_gurl'}])
+
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ def test_validate_half_closed(self):
+ #test failure (invalid protocol)
+ template = self._set_template(self.lb_template, halfClosed=True)
+ expected = self._set_expected(self.expected_body, halfClosed=True)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(), {
+ 'Error':
+ 'The halfClosed property is only available for the '
+ 'TCP or TCP_CLIENT_FIRST protocols'})
+
+ #test TCP protocol
+ template = self._set_template(template, protocol='TCP')
+ expected = self._set_expected(expected, protocol='TCP')
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(), None)
+
+ #test TCP_CLIENT_FIRST protocol
+ template = self._set_template(template,
+ protocol='TCP_CLIENT_FIRST')
+ expected = self._set_expected(expected,
+ protocol='TCP_CLIENT_FIRST')
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(), None)
+
+ def test_validate_health_monitor(self):
+ #test connect success
+ health_monitor = {
+ 'type': 'CONNECT',
+ 'attemptsBeforeDeactivation': 1,
+ 'delay': 1,
+ 'timeout': 1
+ }
+ template = self._set_template(self.lb_template,
+ healthMonitor=health_monitor)
+ expected = self._set_expected(self.expected_body,
+ healthMonitor=health_monitor)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+
+ self.assertEquals(rsrc.validate(), None)
+
+ #test connect failure
+ #bodyRegex is only valid for type 'HTTP(S)'
+ health_monitor['bodyRegex'] = 'dfawefawe'
+ template = self._set_template(template,
+ healthMonitor=health_monitor)
+ expected = self._set_expected(expected,
+ healthMonitor=health_monitor)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(),
+ {'Error': 'Unknown Property bodyRegex'})
+
+ #test http fields
+ health_monitor['type'] = 'HTTP'
+ health_monitor['bodyRegex'] = 'bodyRegex'
+ health_monitor['statusRegex'] = 'statusRegex'
+ health_monitor['hostHeader'] = 'hostHeader'
+ health_monitor['path'] = 'path'
+
+ template = self._set_template(template,
+ healthMonitor=health_monitor)
+ expected = self._set_expected(expected,
+ healthMonitor=health_monitor)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(), None)
+
+ def test_validate_ssl_termination(self):
+ ssl_termination = {
+ 'enabled': True,
+ 'privatekey': 'ewfawe',
+ 'certificate': 'dfaewfwef',
+ 'intermediateCertificate': 'fwaefawe',
+ 'secureTrafficOnly': True
+ }
+
+ #test ssl termination enabled without required fields failure
+ template = self._set_template(self.lb_template,
+ sslTermination=ssl_termination)
+ expected = self._set_expected(self.expected_body,
+ sslTermination=ssl_termination)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(),
+ {'Error':
+ 'Property error : %s: Property securePort not '
+ 'assigned' % rsrc.name})
+
+ ssl_termination['securePort'] = 443
+ template = self._set_template(template,
+ sslTermination=ssl_termination)
+ expected = self._set_expected(expected,
+ sslTermination=ssl_termination)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected)
+ self.assertEquals(rsrc.validate(), None)
+
+ def test_post_creation_access_list(self):
+ access_list = [{"address": '192.168.1.1/0',
+ 'type': 'ALLOW'},
+ {'address': '172.165.3.43',
+ 'type': 'DENY'}]
+
+ template = self._set_template(self.lb_template,
+ accessList=access_list)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ self.expected_body)
+ self.m.StubOutWithMock(fake_loadbalancer, 'add_access_list')
+ fake_loadbalancer.add_access_list(access_list)
+
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ def test_post_creation_error_page(self):
+ error_page = "REALLY BIG ERROR"
+
+ template = self._set_template(self.lb_template,
+ errorPage=error_page)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ self.expected_body)
+ self.m.StubOutWithMock(fake_loadbalancer, 'set_error_page')
+ fake_loadbalancer.set_error_page(error_page)
+
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ def test_post_creation_ssl_termination(self):
+ ssl_termination = {
+ 'securePort': 443,
+ 'privatekey': 'afwefawe',
+ 'certificate': 'fawefwea',
+ 'intermediateCertificate': "intermediate_certificate",
+ 'enabled': True,
+ 'secureTrafficOnly': False
+ }
+
+ template = self._set_template(self.lb_template,
+ sslTermination=ssl_termination)
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ self.expected_body)
+ self.m.StubOutWithMock(fake_loadbalancer, 'add_ssl_termination')
+ fake_loadbalancer.add_ssl_termination(
+ ssl_termination['securePort'],
+ ssl_termination['privatekey'],
+ ssl_termination['certificate'],
+ intermediateCertificate=ssl_termination['intermediateCertificate'],
+ enabled=ssl_termination['enabled'],
+ secureTrafficOnly=ssl_termination['secureTrafficOnly'])
+
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ def test_post_creation_content_caching(self):
+ template = self._set_template(self.lb_template,
+ contentCaching='ENABLED')
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
+ self.lb_name,
+ self.expected_body)
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ def test_update_add_node_by_ref(self):
+ added_node = {'nodes': [
+ {"address": "166.78.103.141", "port": 80, "condition": "ENABLED"},
+ {"ref": "TEST_NODE_REF", "port": 80, "condition": "ENABLED"}]}
+ expected_ip = '172.168.1.4'
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(self.lb_template,
+ self.lb_name,
+ self.expected_body)
+ fake_loadbalancer.nodes = self.expected_body['nodes']
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ self.m.StubOutWithMock(rsrc.clb, 'get')
+ rsrc.clb.get(rsrc.resource_id).AndReturn(fake_loadbalancer)
+
+ self.m.StubOutWithMock(rsrc.stack, 'resource_by_refid')
+
+ class FakeFn(object):
+ def FnGetAtt(self, attr):
+ return expected_ip
+
+ rsrc.stack.resource_by_refid('TEST_NODE_REF').AndReturn(FakeFn())
+
+ self.m.StubOutWithMock(fake_loadbalancer, 'add_nodes')
+ fake_loadbalancer.add_nodes([
+ fake_loadbalancer.Node(address=expected_ip,
+ port=80,
+ condition='ENABLED')])
+
+ self.m.ReplayAll()
+ rsrc.handle_update({}, {}, added_node)
+ self.m.VerifyAll()
+
+ def test_update_add_node_by_address(self):
+ expected_ip = '172.168.1.4'
+ added_node = {'nodes': [
+ {"address": "166.78.103.141", "port": 80, "condition": "ENABLED"},
+ {"address": expected_ip, "port": 80, "condition": "ENABLED"}]}
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(self.lb_template,
+ self.lb_name,
+ self.expected_body)
+ fake_loadbalancer.nodes = self.expected_body['nodes']
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ self.m.StubOutWithMock(rsrc.clb, 'get')
+ rsrc.clb.get(rsrc.resource_id).AndReturn(fake_loadbalancer)
+
+ self.m.StubOutWithMock(fake_loadbalancer, 'add_nodes')
+ fake_loadbalancer.add_nodes([
+ fake_loadbalancer.Node(address=expected_ip,
+ port=80,
+ condition='ENABLED')])
+
+ self.m.ReplayAll()
+ rsrc.handle_update({}, {}, added_node)
+ self.m.VerifyAll()
+
+ def test_update_delete_node_failed(self):
+ deleted_node = {'nodes': []}
+ rsrc, fake_loadbalancer = self._mock_loadbalancer(self.lb_template,
+ self.lb_name,
+ self.expected_body)
+ fake_loadbalancer.nodes = self.expected_body['nodes']
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+ self.m.VerifyAll()
+
+ self.m.StubOutWithMock(rsrc.clb, 'get')
+ rsrc.clb.get(rsrc.resource_id).AndReturn(fake_loadbalancer)
+
+ self.m.ReplayAll()
+ self.assertRaises(ValueError, rsrc.handle_update, {}, {}, deleted_node)
+ self.m.VerifyAll()