logger = logging.getLogger(__name__)
+class CooldownMixin(object):
+ '''
+ Utility class to encapsulate Cooldown related logic which is shared
+ between AutoScalingGroup and ScalingPolicy
+ '''
+ def _cooldown_inprogress(self):
+ inprogress = False
+ try:
+ # Negative values don't make sense, so they are clamped to zero
+ cooldown = max(0, int(self.properties['Cooldown']))
+ except TypeError:
+ # If not specified, it will be None, same as cooldown == 0
+ cooldown = 0
+
+ metadata = self.metadata
+ if metadata and cooldown != 0:
+ last_adjust = metadata.keys()[0]
+ if not timeutils.is_older_than(last_adjust, cooldown):
+ inprogress = True
+ return inprogress
+
+ def _cooldown_timestamp(self, reason):
+ # Save resource metadata with a timestamp and reason
+ # If we wanted to implement the AutoScaling API like AWS does,
+ # we could maintain event history here, but since we only need
+ # the latest event for cooldown, just store that for now
+ metadata = {timeutils.strtime(): reason}
+ self.metadata = metadata
+
+
class InstanceGroup(resource.Resource):
tags_schema = {'Key': {'Type': 'String',
'Required': True},
return unicode(self.name)
-class AutoScalingGroup(InstanceGroup):
+class AutoScalingGroup(InstanceGroup, CooldownMixin):
tags_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'String',
else:
num_to_create = int(self.properties['MinSize'])
- self.resize(num_to_create)
+ self.adjust(num_to_create, adjustment_type='ExactCapacity')
def handle_update(self):
return self.UPDATE_REPLACE
def adjust(self, adjustment, adjustment_type='ChangeInCapacity'):
+ if self._cooldown_inprogress():
+ logger.info("%s NOT performing scaling adjustment, cooldown %s" %
+ (self.name, self.properties['Cooldown']))
+ return
+
inst_list = []
if self.resource_id is not None:
inst_list = sorted(self.resource_id.split(','))
logger.warn('can not be less than %s' % self.properties['MinSize'])
return
+ if new_capacity == capacity:
+ logger.debug('no change in capacity %d' % capacity)
+ return
+
self.resize(new_capacity)
+ self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
+
def FnGetRefId(self):
return unicode(self.name)
super(LaunchConfiguration, self).__init__(name, json_snippet, stack)
-class ScalingPolicy(resource.Resource):
+class ScalingPolicy(resource.Resource, CooldownMixin):
properties_schema = {
'AutoScalingGroupName': {'Type': 'String',
'Required': True},
super(ScalingPolicy, self).__init__(name, json_snippet, stack)
def alarm(self):
- try:
- # Negative values don't make sense, so they are clamped to zero
- cooldown = max(0, int(self.properties['Cooldown']))
- except TypeError:
- # If not specified, it will be None, same as cooldown == 0
- cooldown = 0
-
- metadata = self.metadata
- if metadata and cooldown != 0:
- last_adjust = metadata.keys()[0]
- if not timeutils.is_older_than(last_adjust, cooldown):
- logger.info("%s NOT performing scaling action, cooldown %s" %
- (self.name, cooldown))
- return
+ if self._cooldown_inprogress():
+ logger.info("%s NOT performing scaling action, cooldown %s" %
+ (self.name, self.properties['Cooldown']))
+ return
group = self.stack.resources[self.properties['AutoScalingGroupName']]
group.adjust(int(self.properties['ScalingAdjustment']),
self.properties['AdjustmentType'])
- # Save resource metadata with a timestamp and reason
- # If we wanted to implement the AutoScaling API like AWS does,
- # we could maintain event history here, but since we only need
- # the latest event for cooldown, just store that for now
- metadata = {timeutils.strtime(): "%s : %s" % (
- self.properties['AdjustmentType'],
- self.properties['ScalingAdjustment'])}
- self.metadata = metadata
+ self._cooldown_timestamp("%s : %s" %
+ (self.properties['AdjustmentType'],
+ self.properties['ScalingAdjustment']))
def resource_mapping():
class AutoScalingTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
- self.m.StubOutWithMock(loadbalancer.LoadBalancer, 'reload')
def tearDown(self):
self.m.UnsetStubs()
self.m.StubOutWithMock(loadbalancer.LoadBalancer, 'reload')
loadbalancer.LoadBalancer.reload(expected_list).AndReturn(None)
- def _stub_meta_expected(self, now, data):
+ def _stub_meta_expected(self, now, data, nmeta=1):
# Stop time at now
self.m.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().MultipleTimes().AndReturn(now)
# expected based on the timestamp and data
self.m.StubOutWithMock(Metadata, '__set__')
expected = {timeutils.strtime(now): data}
- Metadata.__set__(mox.IgnoreArg(), expected).AndReturn(None)
+ # Note for ScalingPolicy, we expect to get a metadata
+ # update for the policy and autoscaling group, so pass nmeta=2
+ for x in range(nmeta):
+ Metadata.__set__(mox.IgnoreArg(), expected).AndReturn(None)
def test_scaling_group_update(self):
t = self.load_template()
stack = self.parse_stack(t)
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
properties['DesiredCapacity'] = '3'
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 3')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual('WebServerGroup-0,WebServerGroup-1,WebServerGroup-2',
# reduce to 1
self._stub_lb_reload(['WebServerGroup-0'])
+ self._stub_meta_expected(now, 'ChangeInCapacity : -2')
self.m.ReplayAll()
resource.adjust(-2)
self.assertEqual('WebServerGroup-0', resource.resource_id)
# raise to 3
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'])
+ self._stub_meta_expected(now, 'ChangeInCapacity : 2')
self.m.ReplayAll()
resource.adjust(2)
self.assertEqual('WebServerGroup-0,WebServerGroup-1,WebServerGroup-2',
# set to 2
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
resource.adjust(2, 'ExactCapacity')
self.assertEqual('WebServerGroup-0,WebServerGroup-1',
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
# reduce by 50%
self._stub_lb_reload(['WebServerGroup-0'])
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self.m.ReplayAll()
resource.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual('WebServerGroup-0',
# raise by 200%
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'])
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
+ self.m.ReplayAll()
+ resource.adjust(200, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0,WebServerGroup-1,WebServerGroup-2',
+ resource.resource_id)
+
+ resource.delete()
+
+ def test_scaling_group_cooldown_toosoon(self):
+ t = self.load_template()
+ stack = self.parse_stack(t)
+
+ # Create initial group, 2 instances, Cooldown 60s
+ properties = t['Resources']['WebServerGroup']['Properties']
+ properties['DesiredCapacity'] = '2'
+ properties['Cooldown'] = '60'
+ self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self.m.ReplayAll()
+ resource = self.create_scaling_group(t, stack, 'WebServerGroup')
+ stack.resources['WebServerGroup'] = resource
+ self.assertEqual('WebServerGroup-0,WebServerGroup-1',
+ resource.resource_id)
+
+ # reduce by 50%
+ self._stub_lb_reload(['WebServerGroup-0'])
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
+ self.m.ReplayAll()
+ resource.adjust(-50, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0',
+ resource.resource_id)
+
+ # Now move time on 10 seconds - Cooldown in template is 60
+ # so this should not update the policy metadata, and the
+ # scaling group instances should be unchanged
+ # Note we have to stub Metadata.__get__ since up_policy isn't
+ # stored in the DB (because the stack hasn't really been created)
+ previous_meta = {timeutils.strtime(now):
+ 'PercentChangeInCapacity : -50'}
+
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+
+ now = now + datetime.timedelta(seconds=10)
+ self.m.StubOutWithMock(timeutils, 'utcnow')
+ timeutils.utcnow().AndReturn(now)
+
+ self.m.StubOutWithMock(Metadata, '__get__')
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
+
+ self.m.ReplayAll()
+
+ # raise by 200%, too soon for Cooldown so there should be no change
+ resource.adjust(200, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0', resource.resource_id)
+
+ resource.delete()
+
+ def test_scaling_group_cooldown_ok(self):
+ t = self.load_template()
+ stack = self.parse_stack(t)
+
+ # Create initial group, 2 instances, Cooldown 60s
+ properties = t['Resources']['WebServerGroup']['Properties']
+ properties['DesiredCapacity'] = '2'
+ properties['Cooldown'] = '60'
+ self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self.m.ReplayAll()
+ resource = self.create_scaling_group(t, stack, 'WebServerGroup')
+ stack.resources['WebServerGroup'] = resource
+ self.assertEqual('WebServerGroup-0,WebServerGroup-1',
+ resource.resource_id)
+
+ # reduce by 50%
+ self._stub_lb_reload(['WebServerGroup-0'])
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
+ self.m.ReplayAll()
+ resource.adjust(-50, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0',
+ resource.resource_id)
+
+ # Now move time on 61 seconds - Cooldown in template is 60
+ # so this should update the policy metadata, and the
+ # scaling group instances updated
+ previous_meta = {timeutils.strtime(now):
+ 'PercentChangeInCapacity : -50'}
+
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+
+ now = now + datetime.timedelta(seconds=61)
+
+ self.m.StubOutWithMock(Metadata, '__get__')
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
+
+ # raise by 200%, should work
+ self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
+ 'WebServerGroup-2'], unset=False)
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
+ self.m.ReplayAll()
+ resource.adjust(200, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0,WebServerGroup-1,WebServerGroup-2',
+ resource.resource_id)
+
+ resource.delete()
+
+ def test_scaling_group_cooldown_zero(self):
+ t = self.load_template()
+ stack = self.parse_stack(t)
+
+ # Create initial group, 2 instances, Cooldown 0
+ properties = t['Resources']['WebServerGroup']['Properties']
+ properties['DesiredCapacity'] = '2'
+ properties['Cooldown'] = '0'
+ self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self.m.ReplayAll()
+ resource = self.create_scaling_group(t, stack, 'WebServerGroup')
+ stack.resources['WebServerGroup'] = resource
+ self.assertEqual('WebServerGroup-0,WebServerGroup-1',
+ resource.resource_id)
+
+ # reduce by 50%
+ self._stub_lb_reload(['WebServerGroup-0'])
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
+ self.m.ReplayAll()
+ resource.adjust(-50, 'PercentChangeInCapacity')
+ self.assertEqual('WebServerGroup-0',
+ resource.resource_id)
+
+ # Don't move time, since cooldown is zero, it should work
+ previous_meta = {timeutils.strtime(now):
+ 'PercentChangeInCapacity : -50'}
+
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+
+ self.m.StubOutWithMock(Metadata, '__get__')
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
+
+ # raise by 200%, should work
+ self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
+ 'WebServerGroup-2'], unset=False)
+ self._stub_meta_expected(now, 'PercentChangeInCapacity : 200')
self.m.ReplayAll()
resource.adjust(200, 'PercentChangeInCapacity')
self.assertEqual('WebServerGroup-0,WebServerGroup-1,WebServerGroup-2',
# Create initial group
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
# Scale up one
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
- now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
properties = t['Resources']['WebServerGroup']['Properties']
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 2')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
# Scale down one
self._stub_lb_reload(['WebServerGroup-0'])
- now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : -1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : -1', 2)
self.m.ReplayAll()
down_policy = self.create_scaling_policy(t, stack,
'WebServerScaleDownPolicy')
# Create initial group
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
# Scale up one
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
- now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
now = now + datetime.timedelta(seconds=10)
self.m.StubOutWithMock(timeutils, 'utcnow')
- timeutils.utcnow().MultipleTimes().AndReturn(now)
+ timeutils.utcnow().AndReturn(now)
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
# Create initial group
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
# Scale up one
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
- now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
now = now + datetime.timedelta(seconds=61)
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'], unset=False)
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy.alarm()
# Create initial group
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
properties['Cooldown'] = '0'
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
- now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'], unset=False)
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy.alarm()
# Create initial group
self._stub_lb_reload(['WebServerGroup-0'])
+ now = timeutils.utcnow()
+ self._stub_meta_expected(now, 'ExactCapacity : 1')
self.m.ReplayAll()
resource = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = resource
del(properties['Cooldown'])
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1'])
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
self.m.StubOutWithMock(Metadata, '__get__')
Metadata.__get__(mox.IgnoreArg(), up_policy, mox.IgnoreArg()
).AndReturn(previous_meta)
+ Metadata.__get__(mox.IgnoreArg(), resource, mox.IgnoreArg()
+ ).AndReturn(previous_meta)
self._stub_lb_reload(['WebServerGroup-0', 'WebServerGroup-1',
'WebServerGroup-2'], unset=False)
- self._stub_meta_expected(now, 'ChangeInCapacity : 1')
+ self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self.m.ReplayAll()
up_policy.alarm()