]> review.fuel-infra Code Review - openstack-build/heat-build.git/commitdiff
Implement native nova server resource
authorSteve Baker <sbaker@redhat.com>
Sun, 18 Aug 2013 23:12:59 +0000 (11:12 +1200)
committerSteve Baker <sbaker@redhat.com>
Wed, 28 Aug 2013 22:23:34 +0000 (10:23 +1200)
Some notes on the implementation:
- extends Resource rather than Instance since most of the
  complexity of Instance is in the handling of the Volumes
  attribute
- exposed properties and attributes map as closely as possible
  to the underlying v1 nova API (rather than the nova client library)
- property or attribute key namespaces have been stripped to provide
  a cleaner API and avoid YAML quoting.
  (eg instance_name -> 'OS-EXT-SRV-ATTR:instance_name')
- an assumption is currently made that any volumes specified
  in block_device_mapping should not be detached on suspend
- Network address properties attempt to cater for simple and
  advanced use-cases:
  - 'first_private_address' and 'first_public_address' for simple
    template attribute scenarios, and to aid with AWS::EC2::Instance
    transition
  - 'networks' for a simple data structure which provides all addresses
  - 'addresses' for the raw data structure returned by the API

Implements blueprint native-nova-instance
Change-Id: I4136da22961c8aa90e3fc8a9411457622f1909fb

heat/engine/resources/server.py [new file with mode: 0644]
heat/tests/test_server.py [new file with mode: 0644]
heat/tests/v1_1/fakes.py

diff --git a/heat/engine/resources/server.py b/heat/engine/resources/server.py
new file mode 100644 (file)
index 0000000..f8f37b9
--- /dev/null
@@ -0,0 +1,455 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from heat.common import exception
+from heat.engine import clients
+from heat.engine import scheduler
+from heat.engine.resources import nova_utils
+from heat.engine import resource
+from heat.openstack.common.gettextutils import _
+from heat.openstack.common import log as logging
+
+logger = logging.getLogger(__name__)
+
+
+class Server(resource.Resource):
+
+    block_mapping_schema = {
+        'device_name': {
+            'Type': 'String',
+            'Required': True,
+            'Description': _('A device name where the volume will be '
+                             'attached in the system at /dev/device_name. '
+                             'This value is typically vda')},
+        'volume_id': {
+            'Type': 'String',
+            'Description': _('The ID of the volume to boot from. Only one of '
+                             'volume_id or snapshot_id should be provided')},
+        'snapshot_id': {
+            'Type': 'String',
+            'Description': _('The ID of the snapshot to create a volume '
+                             'from')},
+        'volume_size': {
+            'Type': 'String',
+            'Description': _('The size of the volume, in GB. It is safe to '
+                             'leave this blank and have the Compute service '
+                             'infer the size')},
+        'delete_on_termination': {
+            'Type': 'Boolean',
+            'Description': _('Indicate whether the volume should be deleted '
+                             'when the server is terminated')}
+    }
+
+    networks_schema = {
+        'uuid': {
+            'Type': 'String',
+            'Description': _('ID of network to create a port on')},
+        'fixed_ip': {
+            'Type': 'String',
+            'Description': _('Fixed IP address to specify for the port '
+                             'created on the requested network')},
+        'port': {
+            'Type': 'String',
+            'Description': _('ID of an existing port to associate with '
+                             'this server')},
+    }
+
+    properties_schema = {
+        'name': {
+            'Type': 'String',
+            'Description': _('Optional server name')},
+        'image': {
+            'Type': 'String',
+            'Description': _('The ID or name of the image to boot with')},
+        'block_device_mapping': {
+            'Type': 'List',
+            'Description': _('Block device mappings for this server'),
+            'Schema': {
+                'Type': 'Map',
+                'Schema': block_mapping_schema
+            }
+        },
+        'flavor': {
+            'Type': 'String',
+            'Description': _('The ID or name of the flavor to boot onto'),
+            'Required': True},
+        'flavor_update_policy': {
+            'Type': 'String',
+            'Description': _('Policy on how to apply a flavor update; either '
+                             'by requesting a server resize or by replacing '
+                             'the entire server'),
+            'Default': 'RESIZE',
+            'AllowedValues': ['RESIZE', 'REPLACE']},
+        'key_name': {
+            'Type': 'String',
+            'Description': _('Name of keypair to inject into the server')},
+        'availability_zone': {
+            'Type': 'String',
+            'Description': _('Name of the availability zone for server '
+                             'placement')},
+        'security_groups': {
+            'Type': 'List',
+            'Description': _('List of security group names')},
+        'networks': {
+            'Type': 'List',
+            'Description': _('An ordered list of nics to be '
+                             'added to this server, with information about '
+                             'connected networks, fixed ips, port etc'),
+            'Schema': {
+                'Type': 'Map',
+                'Schema': networks_schema
+            }
+        },
+        'scheduler_hints': {
+            'Type': 'Map',
+            'Description': _('Arbitrary key-value pairs specified by the '
+                             'client to help boot a server')},
+        'metadata': {
+            'Type': 'Map',
+            'Description': _('Arbitrary key/value metadata to store for this '
+                             'server. A maximum of five entries is allowed, '
+                             'and both keys and values must be 255 characters '
+                             'or less')},
+        'user_data': {
+            'Type': 'String',
+            'Description': _('User data script to be executed by cloud-init')},
+        'reservation_id': {
+            'Type': 'String',
+            'Description': _('A UUID for the set of servers being requested'),
+            'Implemented': False},
+        'config_drive': {
+            'Type': 'String',
+            'Description': _('value for config drive either boolean, or '
+                             'volume-id'),
+            'Implemented': False},
+        # diskConfig translates to API attribute OS-DCF:diskConfig
+        # hence the camel case instead of underscore to separate the words
+        'diskConfig': {
+            'Type': 'String',
+            'Description': _('Control how the disk is partitioned when the '
+                             'server is created'),
+            'AllowedValues': ['AUTO', 'MANUAL']}
+    }
+
+    attributes_schema = {
+        'show': _('A dict of all server details as returned by the API'),
+        'addresses': _('A dict of all network addresses as returned by '
+                       'the API'),
+        'networks': _('A dict of assigned network addresses of the form: '
+                      '{"public": [ip1, ip2...], "private": [ip3, ip4]}'),
+        'first_private_address': _('Convenience attribute to fetch the first '
+                                   'assigned private network address, or an '
+                                   'empty string if nothing has been assigned '
+                                   'at this time'),
+        'first_public_address': _('Convenience attribute to fetch the first '
+                                  'assigned public network address, or an '
+                                  'empty string if nothing has been assigned '
+                                  'at this time'),
+        'instance_name': _('AWS compatible instance name'),
+        'accessIPv4': _('The manually assigned alternative public IPv4 '
+                        'address of the server'),
+        'accessIPv6': _('The manually assigned alternative public IPv6 '
+                        'address of the server'),
+    }
+
+    update_allowed_keys = ('Metadata', 'Properties')
+    update_allowed_properties = ('flavor', 'flavor_update_policy')
+
+    def __init__(self, name, json_snippet, stack):
+        super(Server, self).__init__(name, json_snippet, stack)
+        self.mime_string = None
+
+    def get_mime_string(self, userdata):
+        if not self.mime_string:
+            self.mime_string = nova_utils.build_userdata(self, userdata)
+        return self.mime_string
+
+    def handle_create(self):
+        security_groups = self.properties.get('security_groups', [])
+        userdata = self.properties.get('user_data', '')
+        flavor = self.properties['flavor']
+        availability_zone = self.properties['availability_zone']
+
+        key_name = self.properties['key_name']
+        if key_name:
+            # confirm keypair exists
+            nova_utils.get_keypair(self.nova(), key_name)
+
+        image = self.properties.get('image')
+        if image:
+            image = nova_utils.get_image_id(self.nova(), image)
+
+        flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
+        instance_meta = self.properties.get('metadata')
+        scheduler_hints = self.properties.get('scheduler_hints')
+        nics = self._build_nics(self.properties.get('networks'))
+        block_device_mapping = self._build_block_device_mapping(
+            self.properties.get('block_device_mapping'))
+        reservation_id = self.properties.get('reservation_id')
+        config_drive = self.properties.get('config_drive')
+        disk_config = self.properties.get('diskConfig')
+
+        server = None
+        try:
+            server = self.nova().servers.create(
+                name=self.physical_resource_name(),
+                image=image,
+                flavor=flavor_id,
+                key_name=key_name,
+                security_groups=security_groups,
+                userdata=self.get_mime_string(userdata),
+                meta=instance_meta,
+                scheduler_hints=scheduler_hints,
+                nics=nics,
+                availability_zone=availability_zone,
+                block_device_mapping=block_device_mapping,
+                reservation_id=reservation_id,
+                config_drive=config_drive,
+                disk_config=disk_config)
+        finally:
+            # Avoid a race condition where the thread could be cancelled
+            # before the ID is stored
+            if server is not None:
+                self.resource_id_set(server.id)
+
+        return server
+
+    def check_create_complete(self, server):
+        return self._check_active(server)
+
+    def _check_active(self, server):
+
+        if server.status != 'ACTIVE':
+            server.get()
+
+        # Some clouds append extra (STATUS) strings to the status
+        short_server_status = server.status.split('(')[0]
+        if short_server_status in nova_utils.deferred_server_statuses:
+            return False
+        elif server.status == 'ACTIVE':
+            return True
+        elif server.status == 'ERROR':
+            exc = exception.Error(_('Creation of server %s failed.') %
+                                  server.name)
+            raise exc
+        else:
+            exc = exception.Error(_('Creation of server %(server)s failed '
+                                    'with unknown status: %(status)s') %
+                                  dict(server=server.name,
+                                       status=server.status))
+            raise exc
+
+    @staticmethod
+    def _build_block_device_mapping(bdm):
+        if not bdm:
+            return None
+        bdm_dict = {}
+        for mapping in bdm:
+            mapping_parts = []
+            if mapping.get('snapshot_id'):
+                mapping_parts.append(mapping.get('snapshot_id'))
+                mapping_parts.append('snap')
+            else:
+                mapping_parts.append(mapping.get('volume_id'))
+                mapping_parts.append('')
+            if (mapping.get('volume_size') or
+                    mapping.get('delete_on_termination')):
+
+                mapping_parts.append(mapping.get('volume_size', 0))
+            if mapping.get('delete_on_termination'):
+                mapping_parts.append(mapping.get('delete_on_termination'))
+            bdm_dict[mapping.get('device_name')] = mapping_parts
+
+        return bdm_dict
+
+    @staticmethod
+    def _build_nics(networks):
+        if not networks:
+            return None
+
+        nics = []
+
+        for net_data in networks:
+            nic_info = {}
+            if net_data.get('uuid'):
+                nic_info['net-id'] = net_data['uuid']
+            if net_data.get('fixed_ip'):
+                nic_info['v4-fixed-ip'] = net_data['fixed_ip']
+            if net_data.get('port'):
+                nic_info['port-id'] = net_data['port']
+            nics.append(nic_info)
+        return nics
+
+    def _resolve_attribute(self, name):
+        server = self.nova().servers.get(self.resource_id)
+        if name == 'addresses':
+            return server.addresses
+        if name == 'networks':
+            return server.networks
+        if name == 'first_private_address':
+            private = server.networks.get('private', [])
+            if len(private) > 0:
+                return private[0]
+            return ''
+        if name == 'first_public_address':
+            public = server.networks.get('public', [])
+            if len(public) > 0:
+                return public[0]
+            return ''
+        if name == 'instance_name':
+            return server._info.get('OS-EXT-SRV-ATTR:instance_name')
+        if name == 'accessIPv4':
+            return server.accessIPv4
+        if name == 'accessIPv6':
+            return server.accessIPv6
+        if name == 'show':
+            return server._info
+
+    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+        if 'Metadata' in tmpl_diff:
+            self.metadata = tmpl_diff['Metadata']
+
+        if 'flavor' in prop_diff:
+
+            flavor_update_policy = (
+                prop_diff.get('flavor_update_policy') or
+                self.properties.get('flavor_update_policy'))
+
+            if flavor_update_policy == 'REPLACE':
+                raise resource.UpdateReplace(self.name)
+
+            flavor = prop_diff['flavor']
+            flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
+            server = self.nova().servers.get(self.resource_id)
+            server.resize(flavor_id)
+            scheduler.TaskRunner(nova_utils.check_resize, server, flavor)()
+
+    def metadata_update(self, new_metadata=None):
+        '''
+        Refresh the metadata if new_metadata is None
+        '''
+        if new_metadata is None:
+            self.metadata = self.parsed_template('Metadata')
+
+    def validate(self):
+        '''
+        Validate any of the provided params
+        '''
+        super(Server, self).validate()
+
+        # check validity of key
+        key_name = self.properties.get('key_name', None)
+        if key_name:
+            nova_utils.get_keypair(self.nova(), key_name)
+
+        # make sure the image exists if specified.
+        image = self.properties.get('image', None)
+        if image:
+            nova_utils.get_image_id(self.nova(), image)
+        else:
+            # TODO(sbaker) confirm block_device_mapping is populated
+            # for boot-by-volume (see LP bug #1215267)
+            pass
+
+    def handle_delete(self):
+        '''
+        Delete a server, blocking until it is disposed by OpenStack
+        '''
+        if self.resource_id is None:
+            return
+
+        try:
+            server = self.nova().servers.get(self.resource_id)
+        except clients.novaclient.exceptions.NotFound:
+            pass
+        else:
+            delete = scheduler.TaskRunner(nova_utils.delete_server, server)
+            delete(wait_time=0.2)
+
+        self.resource_id = None
+
+    def handle_suspend(self):
+        '''
+        Suspend a server - note we do not wait for the SUSPENDED state,
+        this is polled for by check_suspend_complete in a similar way to the
+        create logic so we can take advantage of coroutines
+        '''
+        if self.resource_id is None:
+            raise exception.Error(_('Cannot suspend %s, resource_id not set') %
+                                  self.name)
+
+        try:
+            server = self.nova().servers.get(self.resource_id)
+        except clients.novaclient.exceptions.NotFound:
+            raise exception.NotFound(_('Failed to find server %s') %
+                                     self.resource_id)
+        else:
+            logger.debug('suspending server %s' % self.resource_id)
+            # We want the server.suspend to happen after the volume
+            # detachement has finished, so pass both tasks and the server
+            suspend_runner = scheduler.TaskRunner(server.suspend)
+            return server, suspend_runner
+
+    def check_suspend_complete(self, cookie):
+        server, suspend_runner = cookie
+
+        if not suspend_runner.started():
+            suspend_runner.start()
+
+        if suspend_runner.done():
+            if server.status == 'SUSPENDED':
+                return True
+
+            server.get()
+            logger.debug('%s check_suspend_complete status = %s' %
+                         (self.name, server.status))
+            if server.status in list(nova_utils.deferred_server_statuses +
+                                     ['ACTIVE']):
+                return server.status == 'SUSPENDED'
+            else:
+                exc = exception.Error(_('Suspend of server %(server)s failed '
+                                        'with unknown status: %(status)s') %
+                                      dict(server=server.name,
+                                           status=server.status))
+                raise exc
+
+    def handle_resume(self):
+        '''
+        Resume a server - note we do not wait for the ACTIVE state,
+        this is polled for by check_resume_complete in a similar way to the
+        create logic so we can take advantage of coroutines
+        '''
+        if self.resource_id is None:
+            raise exception.Error(_('Cannot resume %s, resource_id not set') %
+                                  self.name)
+
+        try:
+            server = self.nova().servers.get(self.resource_id)
+        except clients.novaclient.exceptions.NotFound:
+            raise exception.NotFound(_('Failed to find server %s') %
+                                     self.resource_id)
+        else:
+            logger.debug('resuming server %s' % self.resource_id)
+            server.resume()
+            return server
+
+    def check_resume_complete(self, server):
+        return self._check_active(server)
+
+
+def resource_mapping():
+    return {
+        'OS::Nova::Server': Server,
+    }
diff --git a/heat/tests/test_server.py b/heat/tests/test_server.py
new file mode 100644 (file)
index 0000000..6e9ca75
--- /dev/null
@@ -0,0 +1,801 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+import mox
+
+from heat.engine import environment
+from heat.tests.v1_1 import fakes
+from heat.common import exception
+from heat.common import template_format
+from heat.engine import parser
+from heat.engine import resource
+from heat.engine import scheduler
+from heat.engine.resources import server as servers
+from heat.openstack.common import uuidutils
+from heat.tests.common import HeatTestCase
+from heat.tests import utils
+
+
+wp_template = '''
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "WordPress",
+  "Parameters" : {
+    "key_name" : {
+      "Description" : "key_name",
+      "Type" : "String",
+      "Default" : "test"
+    }
+  },
+  "Resources" : {
+    "WebServer": {
+      "Type": "OS::Nova::Server",
+      "Properties": {
+        "image" : "F17-x86_64-gold",
+        "flavor"   : "m1.large",
+        "key_name"        : "test",
+        "user_data"       : "wordpress"
+      }
+    }
+  }
+}
+'''
+
+
+class ServersTest(HeatTestCase):
+    def setUp(self):
+        super(ServersTest, self).setUp()
+        self.fc = fakes.FakeClient()
+        utils.setup_dummy_db()
+
+    def _setup_test_stack(self, stack_name):
+        t = template_format.parse(wp_template)
+        template = parser.Template(t)
+        stack = parser.Stack(utils.dummy_context(), stack_name, template,
+                             environment.Environment({'key_name': 'test'}),
+                             stack_id=uuidutils.generate_uuid())
+        return (t, stack)
+
+    def _setup_test_server(self, return_server, name, image_id=None):
+        stack_name = '%s_stack' % name
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        t['Resources']['WebServer']['Properties']['image'] = \
+            image_id or 'CentOS 5.2'
+        t['Resources']['WebServer']['Properties']['flavor'] = \
+            '256 MB Server'
+        server = servers.Server('%s_name' % name,
+                                t['Resources']['WebServer'], stack)
+
+        self.m.StubOutWithMock(server, 'nova')
+        server.nova().MultipleTimes().AndReturn(self.fc)
+
+        server.t = server.stack.resolve_runtime_data(server.t)
+
+        # need to resolve the template functions
+        #server_userdata = nova_utils.build_userdata(
+        #    server,
+        #    server.t['Properties']['user_data'])
+        #server.mime_string = server_userdata
+        self.m.StubOutWithMock(self.fc.servers, 'create')
+        self.fc.servers.create(
+            image=1, flavor=1, key_name='test',
+            name=utils.PhysName(stack_name, server.name),
+            security_groups=None,
+            userdata=mox.IgnoreArg(), scheduler_hints=None,
+            meta=None, nics=None, availability_zone=None,
+            block_device_mapping=None, config_drive=None,
+            disk_config=None, reservation_id=None).AndReturn(
+                return_server)
+
+        return server
+
+    def _create_test_server(self, return_server, name):
+        server = self._setup_test_server(return_server, name)
+        self.m.ReplayAll()
+        scheduler.TaskRunner(server.create)()
+        return server
+
+    def test_server_create(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_create')
+        # this makes sure the auto increment worked on server creation
+        self.assertTrue(server.id > 0)
+
+        public_ip = return_server.networks['public'][0]
+        self.assertEqual(
+            server.FnGetAtt('addresses')['public'][0]['addr'], public_ip)
+        self.assertEqual(
+            server.FnGetAtt('networks')['public'][0], public_ip)
+        self.assertEqual(
+            server.FnGetAtt('first_public_address'), public_ip)
+
+        private_ip = return_server.networks['private'][0]
+        self.assertEqual(
+            server.FnGetAtt('addresses')['private'][0]['addr'], private_ip)
+        self.assertEqual(
+            server.FnGetAtt('networks')['private'][0], private_ip)
+        self.assertEqual(
+            server.FnGetAtt('first_private_address'), private_ip)
+
+        self.assertEqual(return_server._info, server.FnGetAtt('show'))
+        self.assertEqual('sample-server2', server.FnGetAtt('instance_name'))
+        self.assertEqual('192.0.2.0', server.FnGetAtt('accessIPv4'))
+        self.assertEqual('::babe:4317:0A83', server.FnGetAtt('accessIPv6'))
+        self.m.VerifyAll()
+
+    def test_server_create_with_image_id(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._setup_test_server(return_server,
+                                         'test_server_create_image_id',
+                                         image_id='1')
+        self.m.StubOutWithMock(uuidutils, "is_uuid_like")
+        uuidutils.is_uuid_like('1').AndReturn(True)
+
+        self.m.ReplayAll()
+        scheduler.TaskRunner(server.create)()
+
+        # this makes sure the auto increment worked on server creation
+        self.assertTrue(server.id > 0)
+
+        public_ip = return_server.networks['public'][0]
+        self.assertEqual(
+            server.FnGetAtt('addresses')['public'][0]['addr'], public_ip)
+        self.assertEqual(
+            server.FnGetAtt('networks')['public'][0], public_ip)
+        self.assertEqual(
+            server.FnGetAtt('first_public_address'), public_ip)
+
+        private_ip = return_server.networks['private'][0]
+        self.assertEqual(
+            server.FnGetAtt('addresses')['private'][0]['addr'], private_ip)
+        self.assertEqual(
+            server.FnGetAtt('networks')['private'][0], private_ip)
+        self.assertEqual(
+            server.FnGetAtt('first_private_address'), private_ip)
+
+        self.m.VerifyAll()
+
+    def test_server_create_image_name_err(self):
+        stack_name = 'test_server_create_image_name_err_stack'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        # create an server with non exist image name
+        t['Resources']['WebServer']['Properties']['image'] = 'Slackware'
+        server = servers.Server('server_create_image_err',
+                                t['Resources']['WebServer'], stack)
+
+        self.m.StubOutWithMock(server, 'nova')
+        server.nova().MultipleTimes().AndReturn(self.fc)
+        self.m.ReplayAll()
+
+        self.assertRaises(exception.ImageNotFound, server.handle_create)
+
+        self.m.VerifyAll()
+
+    def test_server_create_duplicate_image_name_err(self):
+        stack_name = 'test_server_create_image_name_err_stack'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        # create an server with a non unique image name
+        t['Resources']['WebServer']['Properties']['image'] = 'CentOS 5.2'
+        server = servers.Server('server_create_image_err',
+                                t['Resources']['WebServer'], stack)
+
+        self.m.StubOutWithMock(server, 'nova')
+        server.nova().MultipleTimes().AndReturn(self.fc)
+        self.m.StubOutWithMock(self.fc.client, "get_images_detail")
+        self.fc.client.get_images_detail().AndReturn((
+            200, {'images': [{'id': 1, 'name': 'CentOS 5.2'},
+                             {'id': 4, 'name': 'CentOS 5.2'}]}))
+        self.m.ReplayAll()
+
+        self.assertRaises(exception.NoUniqueImageFound, server.handle_create)
+
+        self.m.VerifyAll()
+
+    def test_server_create_image_id_err(self):
+        stack_name = 'test_server_create_image_id_err_stack'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        # create an server with non exist image Id
+        t['Resources']['WebServer']['Properties']['image'] = '1'
+        server = servers.Server('server_create_image_err',
+                                t['Resources']['WebServer'], stack)
+
+        self.m.StubOutWithMock(server, 'nova')
+        server.nova().MultipleTimes().AndReturn(self.fc)
+        self.m.StubOutWithMock(uuidutils, "is_uuid_like")
+        uuidutils.is_uuid_like('1').AndReturn(True)
+        self.m.StubOutWithMock(self.fc.client, "get_images_1")
+        self.fc.client.get_images_1().AndRaise(
+            servers.clients.novaclient.exceptions.NotFound(404))
+        self.m.ReplayAll()
+
+        self.assertRaises(exception.ImageNotFound, server.handle_create)
+
+        self.m.VerifyAll()
+
+    def test_server_create_unexpected_status(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_create')
+        return_server.get = lambda: None
+        return_server.status = 'BOGUS'
+        self.assertRaises(exception.Error,
+                          server.check_create_complete,
+                          return_server)
+
+    def test_server_create_error_status(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_create')
+        return_server.status = 'ERROR'
+        return_server.fault = {
+            'message': 'NoValidHost',
+            'code': 500,
+            'created': '2013-08-14T03:12:10Z'
+        }
+        self.m.StubOutWithMock(return_server, 'get')
+        return_server.get()
+        self.m.ReplayAll()
+
+        self.assertRaises(exception.Error,
+                          server.check_create_complete,
+                          return_server)
+
+        self.m.VerifyAll()
+
+    def test_server_validate(self):
+        stack_name = 'test_server_validate_stack'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        # create an server with non exist image Id
+        t['Resources']['WebServer']['Properties']['image'] = '1'
+        server = servers.Server('server_create_image_err',
+                                t['Resources']['WebServer'], stack)
+
+        self.m.StubOutWithMock(server, 'nova')
+        server.nova().MultipleTimes().AndReturn(self.fc)
+
+        self.m.StubOutWithMock(uuidutils, "is_uuid_like")
+        uuidutils.is_uuid_like('1').AndReturn(True)
+        self.m.ReplayAll()
+
+        self.assertEqual(server.validate(), None)
+
+        self.m.VerifyAll()
+
+    def test_server_validate_delete_policy(self):
+        stack_name = 'test_server_validate_stack'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        # create an server with non exist image Id
+        t['Resources']['WebServer']['DeletionPolicy'] = 'SelfDestruct'
+        server = servers.Server('server_create_image_err',
+                                t['Resources']['WebServer'], stack)
+
+        self.m.ReplayAll()
+
+        ex = self.assertRaises(exception.StackValidationFailed,
+                               server.validate)
+        self.assertEqual('Invalid DeletionPolicy SelfDestruct',
+                         str(ex))
+
+        self.m.VerifyAll()
+
+    def test_server_delete(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_create_delete')
+        server.resource_id = 1234
+
+        # this makes sure the auto increment worked on server creation
+        self.assertTrue(server.id > 0)
+
+        server_get = self.fc.client.get_servers_1234()
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn(server_get)
+        get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
+        mox.Replay(get)
+        self.m.ReplayAll()
+
+        scheduler.TaskRunner(server.delete)()
+        self.assertTrue(server.resource_id is None)
+        self.assertEqual(server.state, (server.DELETE, server.COMPLETE))
+        self.m.VerifyAll()
+
+    def test_server_delete_notfound(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_create_delete')
+        server.resource_id = 1234
+
+        # this makes sure the auto increment worked on server creation
+        self.assertTrue(server.id > 0)
+
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
+        mox.Replay(get)
+
+        server.delete()
+        self.assertTrue(server.resource_id is None)
+        self.assertEqual(server.state, (server.DELETE, server.COMPLETE))
+        self.m.VerifyAll()
+
+        server.state_set(server.CREATE, server.COMPLETE, 'to delete again')
+        server.delete()
+        self.assertEqual(server.state, (server.DELETE, server.COMPLETE))
+        self.m.VerifyAll()
+
+    def test_server_update_metadata(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_update')
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Metadata'] = {'test': 123}
+        self.assertEqual(None, server.update(update_template))
+        self.assertEqual(server.metadata, {'test': 123})
+
+        server.t['Metadata'] = {'test': 456}
+        server.metadata_update()
+        self.assertEqual(server.metadata, {'test': 456})
+
+    def test_server_update_server_flavor(self):
+        """
+        Server.handle_update supports changing the flavor, and makes
+        the change making a resize API call against Nova.
+        """
+        return_server = self.fc.servers.list()[1]
+        return_server.id = 1234
+        server = self._create_test_server(return_server,
+                                          'test_server_update')
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Properties']['flavor'] = 'm1.small'
+
+        self.m.StubOutWithMock(self.fc.servers, 'get')
+        self.fc.servers.get(1234).AndReturn(return_server)
+
+        def activate_status(server):
+            server.status = 'VERIFY_RESIZE'
+        return_server.get = activate_status.__get__(return_server)
+
+        self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
+        self.fc.client.post_servers_1234_action(
+            body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
+        self.fc.client.post_servers_1234_action(
+            body={'confirmResize': None}).AndReturn((202, None))
+        self.m.ReplayAll()
+
+        self.assertEqual(None, server.update(update_template))
+        self.assertEqual(server.state, (server.UPDATE, server.COMPLETE))
+        self.m.VerifyAll()
+
+    def test_server_update_server_flavor_failed(self):
+        """
+        If the status after a resize is not VERIFY_RESIZE, it means the resize
+        call failed, so we raise an explicit error.
+        """
+        return_server = self.fc.servers.list()[1]
+        return_server.id = 1234
+        server = self._create_test_server(return_server,
+                                          'test_server_update')
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Properties']['flavor'] = 'm1.small'
+
+        self.m.StubOutWithMock(self.fc.servers, 'get')
+        self.fc.servers.get(1234).AndReturn(return_server)
+
+        def activate_status(server):
+            server.status = 'ACTIVE'
+        return_server.get = activate_status.__get__(return_server)
+
+        self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
+        self.fc.client.post_servers_1234_action(
+            body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
+        self.m.ReplayAll()
+
+        error = self.assertRaises(exception.ResourceFailure,
+                                  server.update, update_template)
+        self.assertEqual(
+            "Error: Resizing to 'm1.small' failed, status 'ACTIVE'",
+            str(error))
+        self.assertEqual(server.state, (server.UPDATE, server.FAILED))
+        self.m.VerifyAll()
+
+    def test_server_update_server_flavor_replace(self):
+        stack_name = 'test_server_update_flavor_replace'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        t['Resources']['WebServer']['Properties'][
+            'flavor_update_policy'] = 'REPLACE'
+        server = servers.Server('server_server_update_flavor_replace',
+                                t['Resources']['WebServer'], stack)
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Properties']['flavor'] = 'm1.smigish'
+        self.assertRaises(resource.UpdateReplace,
+                          server.update, update_template)
+
+    def test_server_update_server_flavor_policy_update(self):
+        stack_name = 'test_server_update_flavor_replace'
+        (t, stack) = self._setup_test_stack(stack_name)
+
+        server = servers.Server('server_server_update_flavor_replace',
+                                t['Resources']['WebServer'], stack)
+
+        update_template = copy.deepcopy(server.t)
+        # confirm that when flavor_update_policy is changed during
+        # the update then the updated policy is followed for a flavor
+        # update
+        update_template['Properties']['flavor_update_policy'] = 'REPLACE'
+        update_template['Properties']['flavor'] = 'm1.smigish'
+        self.assertRaises(resource.UpdateReplace,
+                          server.update, update_template)
+
+    def test_server_update_replace(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_update')
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Notallowed'] = {'test': 123}
+        self.assertRaises(resource.UpdateReplace,
+                          server.update, update_template)
+
+    def test_server_update_properties(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_update')
+
+        update_template = copy.deepcopy(server.t)
+        update_template['Properties']['key_name'] = 'mustreplace'
+        self.assertRaises(resource.UpdateReplace,
+                          server.update, update_template)
+
+    def test_server_status_build(self):
+        return_server = self.fc.servers.list()[0]
+        server = self._setup_test_server(return_server,
+                                         'test_server_status_build')
+        server.resource_id = 1234
+
+        # Bind fake get method which Server.check_create_complete will call
+        def activate_status(server):
+            server.status = 'ACTIVE'
+        return_server.get = activate_status.__get__(return_server)
+        self.m.ReplayAll()
+
+        scheduler.TaskRunner(server.create)()
+        self.assertEqual(server.state, (server.CREATE, server.COMPLETE))
+
+    def test_server_status_suspend_no_resource_id(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = None
+        self.m.ReplayAll()
+
+        ex = self.assertRaises(exception.ResourceFailure,
+                               scheduler.TaskRunner(server.suspend))
+        self.assertEqual('Error: Cannot suspend test_server_suspend_name, '
+                         'resource_id not set',
+                         str(ex))
+        self.assertEqual(server.state, (server.SUSPEND, server.FAILED))
+
+        self.m.VerifyAll()
+
+    def test_server_status_suspend_not_found(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = 1234
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
+        mox.Replay(get)
+        self.m.ReplayAll()
+
+        ex = self.assertRaises(exception.ResourceFailure,
+                               scheduler.TaskRunner(server.suspend))
+        self.assertEqual('NotFound: Failed to find server 1234',
+                         str(ex))
+        self.assertEqual(server.state, (server.SUSPEND, server.FAILED))
+
+        self.m.VerifyAll()
+
+    def test_server_status_suspend_immediate(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to SUSPENDED
+        d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+        d['server']['status'] = 'SUSPENDED'
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn((200, d))
+        mox.Replay(get)
+
+        scheduler.TaskRunner(server.suspend)()
+        self.assertEqual(server.state, (server.SUSPEND, server.COMPLETE))
+
+        self.m.VerifyAll()
+
+    def test_server_status_resume_immediate(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_resume')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to SUSPENDED
+        d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+        d['server']['status'] = 'ACTIVE'
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn((200, d))
+        mox.Replay(get)
+        server.state_set(server.SUSPEND, server.COMPLETE)
+
+        scheduler.TaskRunner(server.resume)()
+        self.assertEqual(server.state, (server.RESUME, server.COMPLETE))
+
+        self.m.VerifyAll()
+
+    def test_server_status_suspend_wait(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to SUSPENDED, but
+        # return the ACTIVE state first (twice, so we sleep)
+        d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+        d2 = copy.deepcopy(d1)
+        d1['server']['status'] = 'ACTIVE'
+        d2['server']['status'] = 'SUSPENDED'
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d2))
+        self.m.ReplayAll()
+
+        scheduler.TaskRunner(server.suspend)()
+        self.assertEqual(server.state, (server.SUSPEND, server.COMPLETE))
+
+        self.m.VerifyAll()
+
+    def test_server_status_suspend_unknown_status(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to SUSPENDED, but
+        # return the ACTIVE state first (twice, so we sleep)
+        d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+        d2 = copy.deepcopy(d1)
+        d1['server']['status'] = 'ACTIVE'
+        d2['server']['status'] = 'TRANSMOGRIFIED'
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d2))
+        self.m.ReplayAll()
+
+        ex = self.assertRaises(exception.ResourceFailure,
+                               scheduler.TaskRunner(server.suspend))
+        self.assertEqual('Error: Suspend of server sample-server failed '
+                         'with unknown status: TRANSMOGRIFIED',
+                         str(ex))
+        self.assertEqual(server.state, (server.SUSPEND, server.FAILED))
+
+        self.m.VerifyAll()
+
+    def test_server_status_resume_wait(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_resume')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to ACTIVE, but
+        # return the SUSPENDED state first (twice, so we sleep)
+        d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
+        d2 = copy.deepcopy(d1)
+        d1['server']['status'] = 'SUSPENDED'
+        d2['server']['status'] = 'ACTIVE'
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d1))
+        get().AndReturn((200, d2))
+        self.m.ReplayAll()
+
+        server.state_set(server.SUSPEND, server.COMPLETE)
+
+        scheduler.TaskRunner(server.resume)()
+        self.assertEqual(server.state, (server.RESUME, server.COMPLETE))
+
+        self.m.VerifyAll()
+
+    def test_server_status_resume_no_resource_id(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_suspend')
+
+        server.resource_id = None
+        self.m.ReplayAll()
+
+        server.state_set(server.SUSPEND, server.COMPLETE)
+        ex = self.assertRaises(exception.ResourceFailure,
+                               scheduler.TaskRunner(server.resume))
+        self.assertEqual('Error: Cannot resume test_server_suspend_name, '
+                         'resource_id not set',
+                         str(ex))
+        self.assertEqual(server.state, (server.RESUME, server.FAILED))
+
+        self.m.VerifyAll()
+
+    def test_server_status_resume_not_found(self):
+        return_server = self.fc.servers.list()[1]
+        server = self._create_test_server(return_server,
+                                          'test_server_resume')
+
+        server.resource_id = 1234
+        self.m.ReplayAll()
+
+        # Override the get_servers_1234 handler status to ACTIVE, but
+        # return the SUSPENDED state first (twice, so we sleep)
+        self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
+        get = self.fc.client.get_servers_1234
+        get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
+        self.m.ReplayAll()
+
+        server.state_set(server.SUSPEND, server.COMPLETE)
+
+        ex = self.assertRaises(exception.ResourceFailure,
+                               scheduler.TaskRunner(server.resume))
+        self.assertEqual('NotFound: Failed to find server 1234',
+                         str(ex))
+        self.assertEqual(server.state, (server.RESUME, server.FAILED))
+
+        self.m.VerifyAll()
+
+    def test_server_status_build_spawning(self):
+        self._test_server_status_not_build_active('BUILD(SPAWNING)')
+
+    def test_server_status_hard_reboot(self):
+        self._test_server_status_not_build_active('HARD_REBOOT')
+
+    def test_server_status_password(self):
+        self._test_server_status_not_build_active('PASSWORD')
+
+    def test_server_status_reboot(self):
+        self._test_server_status_not_build_active('REBOOT')
+
+    def test_server_status_rescue(self):
+        self._test_server_status_not_build_active('RESCUE')
+
+    def test_server_status_resize(self):
+        self._test_server_status_not_build_active('RESIZE')
+
+    def test_server_status_revert_resize(self):
+        self._test_server_status_not_build_active('REVERT_RESIZE')
+
+    def test_server_status_shutoff(self):
+        self._test_server_status_not_build_active('SHUTOFF')
+
+    def test_server_status_suspended(self):
+        self._test_server_status_not_build_active('SUSPENDED')
+
+    def test_server_status_verify_resize(self):
+        self._test_server_status_not_build_active('VERIFY_RESIZE')
+
+    def _test_server_status_not_build_active(self, uncommon_status):
+        return_server = self.fc.servers.list()[0]
+        server = self._setup_test_server(return_server,
+                                         'test_server_status_build')
+        server.resource_id = 1234
+
+        check_iterations = [0]
+
+        # Bind fake get method which Server.check_create_complete will call
+        def activate_status(server):
+            check_iterations[0] += 1
+            if check_iterations[0] == 1:
+                server.status = uncommon_status
+            if check_iterations[0] > 2:
+                server.status = 'ACTIVE'
+        return_server.get = activate_status.__get__(return_server)
+        self.m.ReplayAll()
+
+        scheduler.TaskRunner(server.create)()
+        self.assertEqual(server.state, (server.CREATE, server.COMPLETE))
+
+        self.m.VerifyAll()
+
+    def test_build_nics(self):
+        self.assertEqual(None, servers.Server._build_nics([]))
+        self.assertEqual(None, servers.Server._build_nics(None))
+        self.assertEqual([
+            {'net-id': '1234abcd'},
+            {'v4-fixed-ip': '192.0.2.0'},
+            {'port-id': 'aaaabbbb'}
+        ], servers.Server._build_nics([
+            {'uuid': '1234abcd'},
+            {'fixed_ip': '192.0.2.0'},
+            {'port': 'aaaabbbb'}
+        ]))
+
+    def test_server_without_ip_address(self):
+        return_server = self.fc.servers.list()[3]
+        server = self._create_test_server(return_server,
+                                          'test_without_ip_address')
+
+        self.assertEqual(server.FnGetAtt('addresses'), {'empty_net': []})
+        self.assertEqual(server.FnGetAtt('networks'), {'empty_net': []})
+        self.assertEqual(server.FnGetAtt('first_private_address'), '')
+        self.assertEqual(server.FnGetAtt('first_public_address'), '')
+
+    def test_build_block_device_mapping(self):
+        self.assertEqual(
+            None, servers.Server._build_block_device_mapping([]))
+        self.assertEqual(
+            None, servers.Server._build_block_device_mapping(None))
+
+        self.assertEqual({
+            'vda': ['1234', ''],
+            'vdb': ['1234', 'snap'],
+        }, servers.Server._build_block_device_mapping([
+            {'device_name': 'vda', 'volume_id': '1234'},
+            {'device_name': 'vdb', 'snapshot_id': '1234'},
+        ]))
+
+        self.assertEqual({
+            'vdc': ['1234', '', 10],
+            'vdd': ['1234', 'snap', 0, True]
+        }, servers.Server._build_block_device_mapping([
+            {
+                'device_name': 'vdc',
+                'volume_id': '1234',
+                'volume_size': 10
+            },
+            {
+                'device_name': 'vdd',
+                'snapshot_id': '1234',
+                'delete_on_termination': True
+            }
+        ]))
index 4aaef6db75a64784a900ce2c87c5a55f1cf0e75b..57a07484b217f0490dc8f9fea3f45fad9cd0a4c8 100644 (file)
@@ -121,6 +121,8 @@ class FakeHTTPClient(base_client.HTTPClient):
     def get_servers_detail(self, **kw):
         return (200, {"servers": [{"id": 1234,
                                    "name": "sample-server",
+                                   "OS-EXT-SRV-ATTR:instance_name":
+                                   "sample-server",
                                    "image": {"id": 2,
                                              "name": "sample image"},
                                    "flavor": {"id": 1,
@@ -137,10 +139,14 @@ class FakeHTTPClient(base_client.HTTPClient):
                                                              "5.6.7.8"}],
                                    "private": [{"version": 4,
                                                 "addr": "10.11.12.13"}]},
+                                   "accessIPv4": "",
+                                   "accessIPv6": "",
                                    "metadata": {"Server Label": "Web Head 1",
                                                 "Image Version": "2.1"}},
                                   {"id": 5678,
                                    "name": "sample-server2",
+                                   "OS-EXT-SRV-ATTR:instance_name":
+                                   "sample-server2",
                                    "image": {"id": 2,
                                              "name": "sample image"},
                                    "flavor": {"id": 1,
@@ -148,6 +154,8 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "hostId":
                                    "9e107d9d372bb6826bd81d3542a419d6",
                                    "status": "ACTIVE",
+                                   "accessIPv4": "192.0.2.0",
+                                   "accessIPv6": "::babe:4317:0A83",
                                    "addresses": {"public": [{"version": 4,
                                                              "addr":
                                                              "4.5.6.7"},
@@ -159,6 +167,8 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "metadata": {"Server Label": "DB 1"}},
                                   {"id": 9101,
                                    "name": "hard-reboot",
+                                   "OS-EXT-SRV-ATTR:instance_name":
+                                   "hard-reboot",
                                    "image": {"id": 2,
                                              "name": "sample image"},
                                    "flavor": {"id": 1,
@@ -166,6 +176,8 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "hostId":
                                    "9e44d8d435c43dd8d96bb63ed995605f",
                                    "status": "HARD_REBOOT",
+                                   "accessIPv4": "",
+                                   "accessIPv6": "",
                                    "addresses": {"public": [{"version": 4,
                                                              "addr":
                                                              "172.17.1.2"},
@@ -177,6 +189,8 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "metadata": {"Server Label": "DB 1"}},
                                   {"id": 9102,
                                    "name": "server-with-no-ip",
+                                   "OS-EXT-SRV-ATTR:instance_name":
+                                   "server-with-no-ip",
                                    "image": {"id": 2,
                                              "name": "sample image"},
                                    "flavor": {"id": 1,
@@ -184,11 +198,15 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "hostId":
                                    "c1365ba78c624df9b2ff446515a682f5",
                                    "status": "ACTIVE",
+                                   "accessIPv4": "",
+                                   "accessIPv6": "",
                                    "addresses": {
                                        "empty_net": []},
                                    "metadata": {"Server Label": "DB 1"}},
                                   {"id": 9999,
                                    "name": "sample-server3",
+                                   "OS-EXT-SRV-ATTR:instance_name":
+                                   "sample-server3",
                                    "image": {"id": 3,
                                              "name": "sample image"},
                                    "flavor": {"id": 3,
@@ -196,6 +214,8 @@ class FakeHTTPClient(base_client.HTTPClient):
                                    "hostId":
                                    "9e107d9d372bb6826bd81d3542a419d6",
                                    "status": "ACTIVE",
+                                   "accessIPv4": "",
+                                   "accessIPv6": "",
                                    "addresses": {
                                    "public": [{"version": 4,
                                                "addr": "4.5.6.7"},