except clients.novaclient.exceptions.NotFound:
break
+ def _detach_volumes_task(self):
+ '''
+ Detach volumes from the instance
+ '''
+ detach_tasks = (volume.VolumeDetachTask(self.stack,
+ self.resource_id,
+ volume_id)
+ for volume_id, device in self.volumes())
+ return scheduler.PollingTaskGroup(detach_tasks)
+
def handle_delete(self):
'''
Delete an instance, blocking until it is disposed by OpenStack
if self.resource_id is None:
return
- detach_tasks = (volume.VolumeDetachTask(self.stack,
- self.resource_id,
- volume_id)
- for volume_id, device in self.volumes())
- scheduler.TaskRunner(scheduler.PollingTaskGroup(detach_tasks))()
+ scheduler.TaskRunner(self._detach_volumes_task())()
try:
server = self.nova().servers.get(self.resource_id)
image_id = image_names.popitem()[0]
return image_id
+ def handle_suspend(self):
+ '''
+ Suspend an instance - note we do not wait for the SUSPENDED state,
+ this is polled for by check_suspend_complete in a similar way to the
+ create logic so we can take advantage of coroutines
+ '''
+ if self.resource_id is None:
+ raise exception.Error(_('Cannot suspend %s, resource_id not set') %
+ self.name)
+
+ try:
+ server = self.nova().servers.get(self.resource_id)
+ except clients.novaclient.exceptions.NotFound:
+ raise exception.NotFound(_('Failed to find instance %s') %
+ self.resource_id)
+ else:
+ logger.debug("suspending instance %s" % self.resource_id)
+ # We want the server.suspend to happen after the volume
+ # detachement has finished, so pass both tasks and the server
+ suspend_runner = scheduler.TaskRunner(server.suspend)
+ volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
+ return server, suspend_runner, volumes_runner
+
+ def check_suspend_complete(self, cookie):
+ server, suspend_runner, volumes_runner = cookie
+
+ if not volumes_runner.started():
+ volumes_runner.start()
+
+ if volumes_runner.done():
+ if not suspend_runner.started():
+ suspend_runner.start()
+
+ if suspend_runner.done():
+ if server.status == 'SUSPENDED':
+ return True
+
+ server.get()
+ logger.debug("%s check_suspend_complete status = %s" %
+ (self.name, server.status))
+ if server.status in list(self._deferred_server_statuses +
+ ['ACTIVE']):
+ return server.status == 'SUSPENDED'
+ else:
+ raise exception.Error(_(' nova reported unexpected '
+ 'instance[%(instance)s] '
+ 'status[%(status)s]') %
+ {'instance': self.name,
+ 'status': server.status})
+ else:
+ suspend_runner.step()
+ else:
+ return volumes_runner.step()
+
def resource_mapping():
return {
# License for the specific language governing permissions and limitations
# under the License.
-
import copy
import mox
scheduler.TaskRunner(instance.create)()
self.assertEqual(instance.state, (instance.CREATE, instance.COMPLETE))
+ def test_instance_status_suspend_immediate(self):
+ return_server = self.fc.servers.list()[1]
+ instance = self._create_test_instance(return_server,
+ 'test_instance_suspend')
+
+ instance.resource_id = 1234
+ self.m.ReplayAll()
+
+ # Override the get_servers_1234 handler status to SUSPENDED
+ d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+ d['server']['status'] = 'SUSPENDED'
+ self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+ get = self.fc.client.get_servers_1234
+ get().AndReturn((200, d))
+ mox.Replay(get)
+
+ scheduler.TaskRunner(instance.suspend)()
+ self.assertEqual(instance.state, (instance.SUSPEND, instance.COMPLETE))
+
+ self.m.VerifyAll()
+
+ def test_instance_status_suspend_wait(self):
+ return_server = self.fc.servers.list()[1]
+ instance = self._create_test_instance(return_server,
+ 'test_instance_suspend')
+
+ instance.resource_id = 1234
+ self.m.ReplayAll()
+
+ # Override the get_servers_1234 handler status to SUSPENDED, but
+ # return the ACTIVE state first (twice, so we sleep)
+ d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+ d2 = copy.deepcopy(d1)
+ d1['server']['status'] = 'ACTIVE'
+ d2['server']['status'] = 'SUSPENDED'
+ self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+ get = self.fc.client.get_servers_1234
+ get().AndReturn((200, d1))
+ get().AndReturn((200, d1))
+ self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
+ scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None)
+ get().AndReturn((200, d2))
+ self.m.ReplayAll()
+
+ scheduler.TaskRunner(instance.suspend)()
+ self.assertEqual(instance.state, (instance.SUSPEND, instance.COMPLETE))
+
+ self.m.VerifyAll()
+
+ def test_instance_suspend_volumes_step(self):
+ return_server = self.fc.servers.list()[1]
+ instance = self._create_test_instance(return_server,
+ 'test_instance_suspend')
+
+ instance.resource_id = 1234
+ self.m.ReplayAll()
+
+ # Override the get_servers_1234 handler status to SUSPENDED
+ d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+ d['server']['status'] = 'SUSPENDED'
+
+ # Return a dummy PollingTaskGroup to make check_suspend_complete step
+ def dummy_detach():
+ yield
+ dummy_tg = scheduler.PollingTaskGroup([dummy_detach, dummy_detach])
+ self.m.StubOutWithMock(instance, '_detach_volumes_task')
+ instance._detach_volumes_task().AndReturn(dummy_tg)
+
+ self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+ get = self.fc.client.get_servers_1234
+ get().AndReturn((200, d))
+ self.m.ReplayAll()
+
+ scheduler.TaskRunner(instance.suspend)()
+ self.assertEqual(instance.state, (instance.SUSPEND, instance.COMPLETE))
+
+ self.m.VerifyAll()
+
def test_instance_status_build_spawning(self):
self._test_instance_status_not_build_active('BUILD(SPAWNING)')