from heat.common import exception
from heat.engine import resource
+from heat.engine import signal_responder
from heat.engine import scheduler
from heat.openstack.common import log as logging
}
-class ScalingPolicy(resource.Resource, CooldownMixin):
+class ScalingPolicy(signal_responder.SignalResponder, CooldownMixin):
properties_schema = {
'AutoScalingGroupName': {'Type': 'String',
'Required': True},
update_allowed_keys = ('Properties',)
update_allowed_properties = ('ScalingAdjustment', 'AdjustmentType',
'Cooldown',)
+ attributes_schema = {
+ "AlarmUrl": ("A signed url to handle the alarm. "
+ "(Heat extension)")
+ }
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
# If Properties has changed, update self.properties, so we
(self.name, self.properties['Cooldown']))
return
- group = self.stack.resources[self.properties['AutoScalingGroupName']]
+ group = self.stack[self.properties['AutoScalingGroupName']]
logger.info('%s Alarm, adjusting Group %s by %s' %
(self.name, group.name,
(self.properties['AdjustmentType'],
self.properties['ScalingAdjustment']))
+ def _resolve_attribute(self, name):
+ '''
+ heat extension: "AlarmUrl" returns the url to post to the policy
+ when there is an alarm.
+ '''
+ if name == 'AlarmUrl' and self.resource_id is not None:
+ return unicode(self._get_signed_url())
+
+ def FnGetRefId(self):
+ return unicode(self.name)
+
def resource_mapping():
return {
from oslo.config import cfg
+from heat.engine import signal_responder
from heat.engine import clients
from heat.engine import resource
from heat.engine import scheduler
logger = logging.getLogger(__name__)
-class Restarter(resource.Resource):
+class Restarter(signal_responder.SignalResponder):
properties_schema = {'InstanceId': {'Type': 'String',
'Required': True}}
+ attributes_schema = {
+ "AlarmUrl": ("A signed url to handle the alarm. "
+ "(Heat extension)")
+ }
def _find_resource(self, resource_id):
'''
(self.name, victim.name))
self.stack.restart_resource(victim.name)
+ def _resolve_attribute(self, name):
+ '''
+ heat extension: "AlarmUrl" returns the url to post to the policy
+ when there is an alarm.
+ '''
+ if name == 'AlarmUrl' and self.resource_id is not None:
+ return unicode(self._get_signed_url())
+
class Instance(resource.Resource):
# AWS does not require InstanceType but Heat does because the nova
import mox
+from oslo.config import cfg
+
from heat.common import template_format
from heat.common import exception
from heat.engine.resources import autoscaling as asc
from heat.engine.resource import Metadata
from heat.openstack.common import timeutils
from heat.tests.common import HeatTestCase
+from heat.tests import fakes
from heat.tests.utils import setup_dummy_db
from heat.tests.utils import parse_stack
def setUp(self):
super(AutoScalingTest, self).setUp()
setup_dummy_db()
+ cfg.CONF.set_default('heat_waitcondition_server_url',
+ 'http://127.0.0.1:8000/v1/waitcondition')
+ self.fc = fakes.FakeKeystoneClient()
def create_scaling_group(self, t, stack, resource_name):
rsrc = asc.AutoScalingGroup(resource_name,
rsrc = asc.ScalingPolicy(resource_name,
t['Resources'][resource_name],
stack)
-
self.assertEqual(None, rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
+
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
+
+ alarm_url = up_policy.FnGetAtt('AlarmUrl')
+ self.assertNotEqual(None, alarm_url)
up_policy.signal()
self.assertEqual('WebServerGroup-0,WebServerGroup-1',
rsrc.resource_id)
# Scale down one
self._stub_lb_reload(1)
self._stub_meta_expected(now, 'ChangeInCapacity : -1', 2)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
down_policy = self.create_scaling_policy(t, stack,
'WebServerScaleDownPolicy')
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
self._stub_lb_reload(2)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
now = timeutils.utcnow()
self._stub_meta_expected(now, 'ExactCapacity : 1')
self._stub_create(1)
+
+ self.m.StubOutWithMock(asc.ScalingPolicy, 'keystone')
+ asc.ScalingPolicy.keystone().MultipleTimes().AndReturn(
+ self.fc)
+
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc