]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Implements an LBaaS driver for NetScaler devices
authorYoucef Laribi <youcef.laribi@citrix.com>
Wed, 20 Nov 2013 20:29:01 +0000 (12:29 -0800)
committerYoucef Laribi <youcef.laribi@citrix.com>
Thu, 20 Feb 2014 20:14:06 +0000 (12:14 -0800)
This driver for the Neutron LBaaS plugin allows for
using the Citrix NetScaler loadbalancing devices
to provide Neutron LBaaS functionality in OpenStack.

Change-Id: Ibfeb54c4402943fb3696a1c599fa373e42e520d4
Implements: blueprint netscaler-lbaas-driver

etc/neutron.conf
etc/services.conf
neutron/services/loadbalancer/drivers/netscaler/__init__.py [new file with mode: 0644]
neutron/services/loadbalancer/drivers/netscaler/ncc_client.py [new file with mode: 0644]
neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py [new file with mode: 0644]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py [new file with mode: 0644]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py [new file with mode: 0644]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py [new file with mode: 0644]

index 1e2226fc3e2c7891b915a6b774059713b6b13561..a8dabdcc357ae4fba89d470e62dd60ca6492b2d9 100644 (file)
@@ -394,3 +394,5 @@ service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.hapr
 # If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
 # Otherwise comment the HA Proxy line
 #service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+#uncomment the following line to make the 'netscaler' LBaaS provider available.
+#service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
index 32d1029ac4ff131c675ba07f0e46613af18c3557..23ee9be8b546f0d6e8e55a285cf501531dfd19d5 100644 (file)
@@ -18,3 +18,8 @@
 #l4_workflow_name = openstack_l4
 #l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
 #l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2
+
+[netscaler_driver]
+#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api
+#netscaler_ncc_username = admin
+#netscaler_ncc_password = secret
diff --git a/neutron/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/services/loadbalancer/drivers/netscaler/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py b/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py
new file mode 100644 (file)
index 0000000..98c8a35
--- /dev/null
@@ -0,0 +1,182 @@
+# Copyright 2014 Citrix Systems
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import base64
+import requests
+
+from neutron.common import exceptions as n_exc
+from neutron.openstack.common import jsonutils
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+CONTENT_TYPE_HEADER = 'Content-type'
+ACCEPT_HEADER = 'Accept'
+AUTH_HEADER = 'Authorization'
+DRIVER_HEADER = 'X-OpenStack-LBaaS'
+TENANT_HEADER = 'X-Tenant-ID'
+JSON_CONTENT_TYPE = 'application/json'
+DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas'
+
+
+class NCCException(n_exc.NeutronException):
+
+    """Represents exceptions thrown by NSClient."""
+
+    CONNECTION_ERROR = 1
+    REQUEST_ERROR = 2
+    RESPONSE_ERROR = 3
+    UNKNOWN_ERROR = 4
+
+    def __init__(self, error):
+        self.message = _("NCC Error %d") % error
+        super(NCCException, self).__init__()
+        self.error = error
+
+
+class NSClient(object):
+
+    """Client to operate on REST resources of NetScaler Control Center."""
+
+    def __init__(self, service_uri, username, password):
+        if not service_uri:
+            msg = _("No NetScaler Control Center URI specified. "
+                    "Cannot connect.")
+            LOG.exception(msg)
+            raise NCCException(NCCException.CONNECTION_ERROR)
+        self.service_uri = service_uri.strip('/')
+        self.auth = None
+        if username and password:
+            base64string = base64.encodestring("%s:%s" % (username, password))
+            base64string = base64string[:-1]
+            self.auth = 'Basic %s' % base64string
+
+    def create_resource(self, tenant_id, resource_path, object_name,
+                        object_data):
+        """Create a resource of NetScaler Control Center."""
+        return self._resource_operation('POST', tenant_id,
+                                        resource_path,
+                                        object_name=object_name,
+                                        object_data=object_data)
+
+    def retrieve_resource(self, tenant_id, resource_path, parse_response=True):
+        """Retrieve a resource of NetScaler Control Center."""
+        return self._resource_operation('GET', tenant_id, resource_path)
+
+    def update_resource(self, tenant_id, resource_path, object_name,
+                        object_data):
+        """Update a resource of the NetScaler Control Center."""
+        return self._resource_operation('PUT', tenant_id,
+                                        resource_path,
+                                        object_name=object_name,
+                                        object_data=object_data)
+
+    def remove_resource(self, tenant_id, resource_path, parse_response=True):
+        """Remove a resource of NetScaler Control Center."""
+        return self._resource_operation('DELETE', tenant_id, resource_path)
+
+    def _resource_operation(self, method, tenant_id, resource_path,
+                            object_name=None, object_data=None):
+        resource_uri = "%s/%s" % (self.service_uri, resource_path)
+        headers = self._setup_req_headers(tenant_id)
+        request_body = None
+        if object_data:
+            if isinstance(object_data, str):
+                request_body = object_data
+            else:
+                obj_dict = {object_name: object_data}
+                request_body = jsonutils.dumps(obj_dict)
+
+        response_status, resp_dict = self._execute_request(method,
+                                                           resource_uri,
+                                                           headers,
+                                                           body=request_body)
+        return response_status, resp_dict
+
+    def _is_valid_response(self, response_status):
+        # when status is less than 400, the response is fine
+        return response_status < requests.codes.bad_request
+
+    def _setup_req_headers(self, tenant_id):
+        headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE,
+                   CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE,
+                   DRIVER_HEADER: DRIVER_HEADER_VALUE,
+                   TENANT_HEADER: tenant_id,
+                   AUTH_HEADER: self.auth}
+        return headers
+
+    def _get_response_dict(self, response):
+        response_dict = {'status': response.status_code,
+                         'body': response.text,
+                         'headers': response.headers}
+        if self._is_valid_response(response.status_code):
+            if response.text:
+                response_dict['dict'] = response.json()
+        return response_dict
+
+    def _execute_request(self, method, resource_uri, headers, body=None):
+        try:
+            response = requests.request(method, url=resource_uri,
+                                        headers=headers, data=body)
+        except requests.exceptions.ConnectionError:
+            msg = (_("Connection error occurred while connecting to %s") %
+                   self.service_uri)
+            LOG.exception(msg)
+            raise NCCException(NCCException.CONNECTION_ERROR)
+        except requests.exceptions.SSLError:
+            msg = (_("SSL error occurred while connecting to %s") %
+                   self.service_uri)
+            LOG.exception(msg)
+            raise NCCException(NCCException.CONNECTION_ERROR)
+        except requests.exceptions.Timeout:
+            msg = _("Request to %s timed out") % self.service_uri
+            LOG.exception(msg)
+            raise NCCException(NCCException.CONNECTION_ERROR)
+        except (requests.exceptions.URLRequired,
+                requests.exceptions.InvalidURL,
+                requests.exceptions.MissingSchema,
+                requests.exceptions.InvalidSchema):
+            msg = _("Request did not specify a valid URL")
+            LOG.exception(msg)
+            raise NCCException(NCCException.REQUEST_ERROR)
+        except requests.exceptions.TooManyRedirects:
+            msg = _("Too many redirects occurred for request to %s")
+            LOG.exception(msg)
+            raise NCCException(NCCException.REQUEST_ERROR)
+        except requests.exceptions.RequestException:
+            msg = (_("A request error while connecting to %s") %
+                   self.service_uri)
+            LOG.exception(msg)
+            raise NCCException(NCCException.REQUEST_ERROR)
+        except Exception:
+            msg = (_("A unknown error occurred during request to %s") %
+                   self.service_uri)
+            LOG.exception(msg)
+            raise NCCException(NCCException.UNKNOWN_ERROR)
+        resp_dict = self._get_response_dict(response)
+        LOG.debug(_("Response: %s"), resp_dict['body'])
+        response_status = resp_dict['status']
+        if response_status == requests.codes.unauthorized:
+            LOG.exception(_("Unable to login. Invalid credentials passed."
+                          "for: %s"), self.service_uri)
+            raise NCCException(NCCException.RESPONSE_ERROR)
+        if not self._is_valid_response(response_status):
+            msg = (_("Failed %(method)s operation on %(url)s "
+                   "status code: %(response_status)s") %
+                   {"method": method,
+                    "url": resource_uri,
+                    "response_status": response_status})
+            LOG.exception(msg)
+            raise NCCException(NCCException.RESPONSE_ERROR)
+        return response_status, resp_dict
diff --git a/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py b/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py
new file mode 100644 (file)
index 0000000..9f74d6a
--- /dev/null
@@ -0,0 +1,489 @@
+# Copyright 2014 Citrix Systems, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.config import cfg
+
+from neutron.api.v2 import attributes
+from neutron.db.loadbalancer import loadbalancer_db
+from neutron.openstack.common import log as logging
+from neutron.plugins.common import constants
+from neutron.services.loadbalancer.drivers import abstract_driver
+from neutron.services.loadbalancer.drivers.netscaler import ncc_client
+
+LOG = logging.getLogger(__name__)
+
+NETSCALER_CC_OPTS = [
+    cfg.StrOpt(
+        'netscaler_ncc_uri',
+        help=_('The URL to reach the NetScaler Control Center Server.'),
+    ),
+    cfg.StrOpt(
+        'netscaler_ncc_username',
+        help=_('Username to login to the NetScaler Control Center Server.'),
+    ),
+    cfg.StrOpt(
+        'netscaler_ncc_password',
+        help=_('Password to login to the NetScaler Control Center Server.'),
+    )
+]
+
+cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver')
+
+VIPS_RESOURCE = 'vips'
+VIP_RESOURCE = 'vip'
+POOLS_RESOURCE = 'pools'
+POOL_RESOURCE = 'pool'
+POOLMEMBERS_RESOURCE = 'members'
+POOLMEMBER_RESOURCE = 'member'
+MONITORS_RESOURCE = 'healthmonitors'
+MONITOR_RESOURCE = 'healthmonitor'
+POOLSTATS_RESOURCE = 'statistics'
+PROV_SEGMT_ID = 'provider:segmentation_id'
+PROV_NET_TYPE = 'provider:network_type'
+DRIVER_NAME = 'netscaler_driver'
+
+
+class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
+
+    """NetScaler LBaaS Plugin driver class."""
+
+    def __init__(self, plugin):
+        self.plugin = plugin
+        ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri
+        ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username
+        ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password
+        self.client = ncc_client.NSClient(ncc_uri,
+                                          ncc_username,
+                                          ncc_password)
+
+    def create_vip(self, context, vip):
+        """Create a vip on a NetScaler device."""
+        network_info = self._get_vip_network_info(context, vip)
+        ncc_vip = self._prepare_vip_for_creation(vip)
+        ncc_vip = dict(ncc_vip.items() + network_info.items())
+        msg = _("NetScaler driver vip creation: %s") % repr(ncc_vip)
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
+                                        VIP_RESOURCE, ncc_vip)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"],
+                                  status)
+
+    def update_vip(self, context, old_vip, vip):
+        """Update a vip on a NetScaler device."""
+        update_vip = self._prepare_vip_for_update(vip)
+        resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
+        msg = (_("NetScaler driver vip %(vip_id)s update: %(vip_obj)s") %
+               {"vip_id": vip["id"], "vip_obj": repr(vip)})
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.update_resource(context.tenant_id, resource_path,
+                                        VIP_RESOURCE, update_vip)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"],
+                                  status)
+
+    def delete_vip(self, context, vip):
+        """Delete a vip on a NetScaler device."""
+        resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
+        msg = _("NetScaler driver vip removal: %s") % vip["id"]
+        LOG.debug(msg)
+        try:
+            self.client.remove_resource(context.tenant_id, resource_path)
+        except ncc_client.NCCException:
+            self.plugin.update_status(context, loadbalancer_db.Vip,
+                                      vip["id"],
+                                      constants.ERROR)
+        else:
+            self.plugin._delete_db_vip(context, vip['id'])
+
+    def create_pool(self, context, pool):
+        """Create a pool on a NetScaler device."""
+        network_info = self._get_pool_network_info(context, pool)
+        #allocate a snat port/ipaddress on the subnet if one doesn't exist
+        self._create_snatport_for_subnet_if_not_exists(context,
+                                                       pool['tenant_id'],
+                                                       pool['subnet_id'],
+                                                       network_info)
+        ncc_pool = self._prepare_pool_for_creation(pool)
+        ncc_pool = dict(ncc_pool.items() + network_info.items())
+        msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool)
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
+                                        POOL_RESOURCE, ncc_pool)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Pool,
+                                  ncc_pool["id"], status)
+
+    def update_pool(self, context, old_pool, pool):
+        """Update a pool on a NetScaler device."""
+        ncc_pool = self._prepare_pool_for_update(pool)
+        resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
+        msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") %
+               {"pool_id": old_pool["id"], "pool_obj": repr(ncc_pool)})
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.update_resource(context.tenant_id, resource_path,
+                                        POOL_RESOURCE, ncc_pool)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Pool,
+                                  old_pool["id"], status)
+
+    def delete_pool(self, context, pool):
+        """Delete a pool on a NetScaler device."""
+        resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
+        msg = _("NetScaler driver pool removal: %s") % pool["id"]
+        LOG.debug(msg)
+        try:
+            self.client.remove_resource(context.tenant_id, resource_path)
+        except ncc_client.NCCException:
+            self.plugin.update_status(context, loadbalancer_db.Pool,
+                                      pool["id"],
+                                      constants.ERROR)
+        else:
+            self.plugin._delete_db_pool(context, pool['id'])
+            self._remove_snatport_for_subnet_if_not_used(context,
+                                                         pool['tenant_id'],
+                                                         pool['subnet_id'])
+
+    def create_member(self, context, member):
+        """Create a pool member on a NetScaler device."""
+        ncc_member = self._prepare_member_for_creation(member)
+        msg = (_("NetScaler driver poolmember creation: %s") %
+               repr(ncc_member))
+        LOG.info(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.create_resource(context.tenant_id,
+                                        POOLMEMBERS_RESOURCE,
+                                        POOLMEMBER_RESOURCE,
+                                        ncc_member)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Member,
+                                  member["id"], status)
+
+    def update_member(self, context, old_member, member):
+        """Update a pool member on a NetScaler device."""
+        ncc_member = self._prepare_member_for_update(member)
+        resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
+        msg = (_("NetScaler driver poolmember %(member_id)s update:"
+                 " %(member_obj)s") %
+               {"member_id": old_member["id"],
+                "member_obj": repr(ncc_member)})
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.update_resource(context.tenant_id, resource_path,
+                                        POOLMEMBER_RESOURCE, ncc_member)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_status(context, loadbalancer_db.Member,
+                                  old_member["id"], status)
+
+    def delete_member(self, context, member):
+        """Delete a pool member on a NetScaler device."""
+        resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
+        msg = (_("NetScaler driver poolmember removal: %s") %
+               member["id"])
+        LOG.debug(msg)
+        try:
+            self.client.remove_resource(context.tenant_id, resource_path)
+        except ncc_client.NCCException:
+            self.plugin.update_status(context, loadbalancer_db.Member,
+                                      member["id"],
+                                      constants.ERROR)
+        else:
+            self.plugin._delete_db_member(context, member['id'])
+
+    def create_pool_health_monitor(self, context, health_monitor, pool_id):
+        """Create a pool health monitor on a NetScaler device."""
+        ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor,
+                                                          pool_id)
+        resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
+                                      MONITORS_RESOURCE)
+        msg = (_("NetScaler driver healthmonitor creation for pool %(pool_id)s"
+                 ": %(monitor_obj)s") %
+               {"pool_id": pool_id,
+                "monitor_obj": repr(ncc_hm)})
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.create_resource(context.tenant_id, resource_path,
+                                        MONITOR_RESOURCE,
+                                        ncc_hm)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_pool_health_monitor(context,
+                                               health_monitor['id'],
+                                               pool_id,
+                                               status, "")
+
+    def update_pool_health_monitor(self, context, old_health_monitor,
+                                   health_monitor, pool_id):
+        """Update a pool health monitor on a NetScaler device."""
+        ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
+        resource_path = "%s/%s" % (MONITORS_RESOURCE,
+                                   old_health_monitor["id"])
+        msg = (_("NetScaler driver healthmonitor %(monitor_id)s update: "
+                 "%(monitor_obj)s") %
+               {"monitor_id": old_health_monitor["id"],
+                "monitor_obj": repr(ncc_hm)})
+        LOG.debug(msg)
+        status = constants.ACTIVE
+        try:
+            self.client.update_resource(context.tenant_id, resource_path,
+                                        MONITOR_RESOURCE, ncc_hm)
+        except ncc_client.NCCException:
+            status = constants.ERROR
+        self.plugin.update_pool_health_monitor(context,
+                                               old_health_monitor['id'],
+                                               pool_id,
+                                               status, "")
+
+    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
+        """Delete a pool health monitor on a NetScaler device."""
+        resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
+                                         MONITORS_RESOURCE,
+                                         health_monitor["id"])
+        msg = (_("NetScaler driver healthmonitor %(monitor_id)s"
+                 "removal for pool %(pool_id)s") %
+               {"monitor_id": health_monitor["id"],
+                "pool_id": pool_id})
+        LOG.debug(msg)
+        try:
+            self.client.remove_resource(context.tenant_id, resource_path)
+        except ncc_client.NCCException:
+            self.plugin.update_pool_health_monitor(context,
+                                                   health_monitor['id'],
+                                                   pool_id,
+                                                   constants.ERROR, "")
+        else:
+            self.plugin._delete_db_pool_health_monitor(context,
+                                                       health_monitor['id'],
+                                                       pool_id)
+
+    def stats(self, context, pool_id):
+        """Retrieve pool statistics from the NetScaler device."""
+        resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
+        msg = _("NetScaler driver pool stats retrieval: %s") % pool_id
+        LOG.debug(msg)
+        try:
+            stats = self.client.retrieve_resource(context.tenant_id,
+                                                  resource_path)[1]
+        except ncc_client.NCCException:
+            self.plugin.update_status(context, loadbalancer_db.Pool,
+                                      pool_id, constants.ERROR)
+        else:
+            return stats
+
+    def _prepare_vip_for_creation(self, vip):
+        creation_attrs = {
+            'id': vip['id'],
+            'tenant_id': vip['tenant_id'],
+            'protocol': vip['protocol'],
+            'address': vip['address'],
+            'protocol_port': vip['protocol_port'],
+        }
+        if 'session_persistence' in vip:
+            creation_attrs['session_persistence'] = vip['session_persistence']
+        update_attrs = self._prepare_vip_for_update(vip)
+        creation_attrs.update(update_attrs)
+        return creation_attrs
+
+    def _prepare_vip_for_update(self, vip):
+        return {
+            'name': vip['name'],
+            'description': vip['description'],
+            'pool_id': vip['pool_id'],
+            'connection_limit': vip['connection_limit'],
+            'admin_state_up': vip['admin_state_up']
+        }
+
+    def _prepare_pool_for_creation(self, pool):
+        creation_attrs = {
+            'id': pool['id'],
+            'tenant_id': pool['tenant_id'],
+            'vip_id': pool['vip_id'],
+            'protocol': pool['protocol'],
+            'subnet_id': pool['subnet_id'],
+        }
+        update_attrs = self._prepare_pool_for_update(pool)
+        creation_attrs.update(update_attrs)
+        return creation_attrs
+
+    def _prepare_pool_for_update(self, pool):
+        return {
+            'name': pool['name'],
+            'description': pool['description'],
+            'lb_method': pool['lb_method'],
+            'admin_state_up': pool['admin_state_up']
+        }
+
+    def _prepare_member_for_creation(self, member):
+        creation_attrs = {
+            'id': member['id'],
+            'tenant_id': member['tenant_id'],
+            'address': member['address'],
+            'protocol_port': member['protocol_port'],
+        }
+        update_attrs = self._prepare_member_for_update(member)
+        creation_attrs.update(update_attrs)
+        return creation_attrs
+
+    def _prepare_member_for_update(self, member):
+        return {
+            'pool_id': member['pool_id'],
+            'weight': member['weight'],
+            'admin_state_up': member['admin_state_up']
+        }
+
+    def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id):
+        creation_attrs = {
+            'id': health_monitor['id'],
+            'tenant_id': health_monitor['tenant_id'],
+            'type': health_monitor['type'],
+        }
+        update_attrs = self._prepare_healthmonitor_for_update(health_monitor)
+        creation_attrs.update(update_attrs)
+        return creation_attrs
+
+    def _prepare_healthmonitor_for_update(self, health_monitor):
+        ncc_hm = {
+            'delay': health_monitor['delay'],
+            'timeout': health_monitor['timeout'],
+            'max_retries': health_monitor['max_retries'],
+            'admin_state_up': health_monitor['admin_state_up']
+        }
+        if health_monitor['type'] in ['HTTP', 'HTTPS']:
+            ncc_hm['http_method'] = health_monitor['http_method']
+            ncc_hm['url_path'] = health_monitor['url_path']
+            ncc_hm['expected_codes'] = health_monitor['expected_codes']
+        return ncc_hm
+
+    def _get_network_info(self, context, entity):
+        network_info = {}
+        subnet_id = entity['subnet_id']
+        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
+        network_id = subnet['network_id']
+        network = self.plugin._core_plugin.get_network(context, network_id)
+        network_info['network_id'] = network_id
+        network_info['subnet_id'] = subnet_id
+        if PROV_NET_TYPE in network:
+            network_info['network_type'] = network[PROV_NET_TYPE]
+        if PROV_SEGMT_ID in network:
+            network_info['segmentation_id'] = network[PROV_SEGMT_ID]
+        return network_info
+
+    def _get_vip_network_info(self, context, vip):
+        network_info = self._get_network_info(context, vip)
+        network_info['port_id'] = vip['port_id']
+        return network_info
+
+    def _get_pool_network_info(self, context, pool):
+        return self._get_network_info(context, pool)
+
+    def _get_pools_on_subnet(self, context, tenant_id, subnet_id):
+        filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]}
+        return self.plugin.get_pools(context, filters=filter_dict)
+
+    def _get_snatport_for_subnet(self, context, tenant_id, subnet_id):
+        device_id = '_lb-snatport-' + subnet_id
+        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
+        network_id = subnet['network_id']
+        msg = (_("Filtering ports based on network_id=%(network_id)s, "
+                 "tenant_id=%(tenant_id)s, device_id=%(device_id)s") %
+               {'network_id': network_id,
+                'tenant_id': tenant_id,
+                'device_id': device_id})
+        LOG.debug(msg)
+        filter_dict = {
+            'network_id': [network_id],
+            'tenant_id': [tenant_id],
+            'device_id': [device_id],
+            'device-owner': [DRIVER_NAME]
+        }
+        ports = self.plugin._core_plugin.get_ports(context,
+                                                   filters=filter_dict)
+        if ports:
+            msg = _("Found an existing SNAT port for subnet %s") % subnet_id
+            LOG.info(msg)
+            return ports[0]
+        msg = _("Found no SNAT ports for subnet %s") % subnet_id
+        LOG.info(msg)
+
+    def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
+                                    ip_address):
+        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
+        fixed_ip = {'subnet_id': subnet['id']}
+        if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
+            fixed_ip['ip_address'] = ip_address
+        port_data = {
+            'tenant_id': tenant_id,
+            'name': '_lb-snatport-' + subnet_id,
+            'network_id': subnet['network_id'],
+            'mac_address': attributes.ATTR_NOT_SPECIFIED,
+            'admin_state_up': False,
+            'device_id': '_lb-snatport-' + subnet_id,
+            'device_owner': DRIVER_NAME,
+            'fixed_ips': [fixed_ip],
+        }
+        port = self.plugin._core_plugin.create_port(context,
+                                                    {'port': port_data})
+        msg = _("Created SNAT port: %s") % repr(port)
+        LOG.info(msg)
+        return port
+
+    def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
+        port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
+        if port:
+            self.plugin._core_plugin.delete_port(context, port['id'])
+            msg = _("Removed SNAT port: %s") % repr(port)
+            LOG.info(msg)
+
+    def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
+                                                  subnet_id, network_info):
+        port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
+        if not port:
+            msg = _("No SNAT port found for subnet %s."
+                    " Creating one...") % subnet_id
+            LOG.info(msg)
+            port = self._create_snatport_for_subnet(context, tenant_id,
+                                                    subnet_id,
+                                                    ip_address=None)
+        network_info['port_id'] = port['id']
+        network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
+        msg = _("SNAT port: %s") % repr(port)
+        LOG.info(msg)
+
+    def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
+                                                subnet_id):
+        pools = self._get_pools_on_subnet(context, tenant_id, subnet_id)
+        if not pools:
+            #No pools left on the old subnet.
+            #We can remove the SNAT port/ipaddress
+            self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
+            msg = _("Removing SNAT port for subnet %s "
+                    "as this is the last pool using it...") % subnet_id
+            LOG.info(msg)
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py
new file mode 100644 (file)
index 0000000..d5eb6a7
--- /dev/null
@@ -0,0 +1,206 @@
+# Copyright 2014 Citrix Systems
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+import requests
+
+from neutron.services.loadbalancer.drivers.netscaler import (
+    ncc_client, netscaler_driver
+)
+from neutron.tests.unit import testlib_api
+
+NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers'
+                    '.netscaler.ncc_client.NSClient')
+
+TESTURI_SCHEME = 'http'
+TESTURI_HOSTNAME = '1.1.1.1'
+TESTURI_PORT = 4433
+TESTURI_PATH = '/ncc_service/1.0'
+TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME,
+                            TESTURI_PORT, TESTURI_PATH)
+TEST_USERNAME = 'user211'
+TEST_PASSWORD = '@30xHl5cT'
+TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1'
+TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
+
+
+class TestNSClient(testlib_api.WebTestCase):
+
+    """A Unit test for the NetScaler NCC client module."""
+
+    def setUp(self):
+        self.log = mock.patch.object(ncc_client, 'LOG').start()
+        super(TestNSClient, self).setUp()
+        # mock the requests.request function call
+        self.request_method_mock = mock.Mock()
+        requests.request = self.request_method_mock
+        self.testclient = self._get_nsclient()
+        self.addCleanup(mock.patch.stopall)
+
+    def test_instantiate_nsclient_with_empty_uri(self):
+        """Asserts that a call with empty URI will raise an exception."""
+        self.assertRaises(ncc_client.NCCException, ncc_client.NSClient,
+                          '', TEST_USERNAME, TEST_PASSWORD)
+
+    def test_create_resource_with_no_connection(self):
+        """Asserts that a call with no connection will raise an exception."""
+        # mock a connection object that fails to establish a connection
+        self.request_method_mock.side_effect = (
+            requests.exceptions.ConnectionError())
+        resource_path = netscaler_driver.VIPS_RESOURCE
+        resource_name = netscaler_driver.VIP_RESOURCE
+        resource_body = self._get_testvip_httpbody_for_create()
+        # call method under test: create_resource() and assert that
+        # it raises an exception
+        self.assertRaises(ncc_client.NCCException,
+                          self.testclient.create_resource,
+                          TEST_TENANT_ID, resource_path,
+                          resource_name, resource_body)
+
+    def test_create_resource_with_error(self):
+        """Asserts that a failed create call raises an exception."""
+        # create a mock object to represent a valid http response
+        # with a failure status code.
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.unauthorized
+        fake_response.headers = []
+        requests.request.return_value = fake_response
+        resource_path = netscaler_driver.VIPS_RESOURCE
+        resource_name = netscaler_driver.VIP_RESOURCE
+        resource_body = self._get_testvip_httpbody_for_create()
+        # call method under test: create_resource
+        # and assert that it raises the expected exception.
+        self.assertRaises(ncc_client.NCCException,
+                          self.testclient.create_resource,
+                          TEST_TENANT_ID, resource_path,
+                          resource_name, resource_body)
+
+    def test_create_resource(self):
+        """Asserts that a correct call will succeed."""
+        # obtain the mock object that corresponds to the call of request()
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.created
+        fake_response.headers = []
+        self.request_method_mock.return_value = fake_response
+        resource_path = netscaler_driver.VIPS_RESOURCE
+        resource_name = netscaler_driver.VIP_RESOURCE
+        resource_body = self._get_testvip_httpbody_for_create()
+        # call method under test: create_resource()
+        self.testclient.create_resource(TEST_TENANT_ID, resource_path,
+                                        resource_name, resource_body)
+        # assert that request() was called
+        # with the expected params.
+        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
+        self.request_method_mock.assert_called_once_with(
+            'POST',
+            url=resource_url,
+            headers=mock.ANY,
+            data=mock.ANY)
+
+    def test_update_resource_with_error(self):
+        """Asserts that a failed update call raises an exception."""
+        # create a valid http response with a failure status code.
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.unauthorized
+        fake_response.headers = []
+        # obtain the mock object that corresponds to the call of request()
+        self.request_method_mock.return_value = fake_response
+        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
+                                   TESTVIP_ID)
+        resource_name = netscaler_driver.VIP_RESOURCE
+        resource_body = self._get_testvip_httpbody_for_update()
+        # call method under test: update_resource() and
+        # assert that it raises the expected exception.
+        self.assertRaises(ncc_client.NCCException,
+                          self.testclient.update_resource,
+                          TEST_TENANT_ID, resource_path,
+                          resource_name, resource_body)
+
+    def test_update_resource(self):
+        """Asserts that a correct update call will succeed."""
+        # create a valid http response with a successful status code.
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.ok
+        fake_response.headers = []
+        # obtain the mock object that corresponds to the call of request()
+        self.request_method_mock.return_value = fake_response
+        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
+                                   TESTVIP_ID)
+        resource_name = netscaler_driver.VIP_RESOURCE
+        resource_body = self._get_testvip_httpbody_for_update()
+        # call method under test: update_resource.
+        self.testclient.update_resource(TEST_TENANT_ID, resource_path,
+                                        resource_name, resource_body)
+        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
+        # assert that requests.request() was called with the
+        # expected params.
+        self.request_method_mock.assert_called_once_with(
+            'PUT',
+            url=resource_url,
+            headers=mock.ANY,
+            data=mock.ANY)
+
+    def test_delete_resource_with_error(self):
+        """Asserts that a failed delete call raises an exception."""
+        # create a valid http response with a failure status code.
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.unauthorized
+        fake_response.headers = []
+        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
+                                   TESTVIP_ID)
+        # call method under test: create_resource
+        self.assertRaises(ncc_client.NCCException,
+                          self.testclient.remove_resource,
+                          TEST_TENANT_ID, resource_path)
+
+    def test_delete_resource(self):
+        """Asserts that a correct delete call will succeed."""
+        # create a valid http response with a failure status code.
+        fake_response = requests.Response()
+        fake_response.status_code = requests.codes.ok
+        fake_response.headers = []
+        # obtain the mock object that corresponds to the call of request()
+        self.request_method_mock.return_value = fake_response
+        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
+                                   TESTVIP_ID)
+        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
+        # call method under test: create_resource
+        self.testclient.remove_resource(TEST_TENANT_ID, resource_path)
+        # assert that httplib.HTTPConnection request() was called with the
+        # expected params
+        self.request_method_mock.assert_called_once_with(
+            'DELETE',
+            url=resource_url,
+            headers=mock.ANY,
+            data=mock.ANY)
+
+    def _get_nsclient(self):
+        return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD)
+
+    def _get_testvip_httpbody_for_create(self):
+        body = {
+            'name': 'vip1',
+            'address': '10.0.0.3',
+            'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d',
+            'protocol': 'HTTP',
+            'protocol_port': 80,
+            'admin_state_up': True,
+        }
+        return body
+
+    def _get_testvip_httpbody_for_update(self):
+        body = {}
+        body['name'] = 'updated vip1'
+        body['admin_state_up'] = False
+        return body
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py
new file mode 100644 (file)
index 0000000..c8962e1
--- /dev/null
@@ -0,0 +1,803 @@
+# Copyright 2014 Citrix Systems
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import contextlib
+
+import mock
+
+from neutron.common import exceptions
+from neutron import context
+from neutron.db.loadbalancer import loadbalancer_db
+from neutron import manager
+from neutron.plugins.common import constants
+from neutron.services.loadbalancer.drivers.netscaler import ncc_client
+from neutron.services.loadbalancer.drivers.netscaler import netscaler_driver
+from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
+
+
+LBAAS_DRIVER_CLASS = ('neutron.services.loadbalancer.drivers'
+                      '.netscaler.netscaler_driver'
+                      '.NetScalerPluginDriver')
+
+NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers'
+                    '.netscaler.ncc_client'
+                    '.NSClient')
+
+LBAAS_PROVIDER_NAME = 'netscaler'
+LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' %
+                  (LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS))
+
+#Test data
+TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
+TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d'
+TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee'
+TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c'
+
+TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180'
+TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e'
+TESTPOOL_SNATIP_ADDRESS = '10.0.0.50'
+TESTPOOL_SNAT_PORT = {
+    'id': TESTPOOL_PORT_ID,
+    'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}]
+}
+TESTVIP_IP = '10.0.1.100'
+TESTMEMBER_IP = '10.0.0.5'
+
+
+class TestLoadBalancerPluginBase(test_db_loadbalancer
+                                 .LoadBalancerPluginDbTestCase):
+
+    def setUp(self):
+        super(TestLoadBalancerPluginBase, self).setUp(
+            lbaas_provider=LBAAS_PROVIDER)
+        loaded_plugins = manager.NeutronManager().get_service_plugins()
+        self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
+
+
+class TestNetScalerPluginDriver(TestLoadBalancerPluginBase):
+
+    """Unit tests for the NetScaler LBaaS driver module."""
+
+    def setUp(self):
+        mock.patch.object(netscaler_driver, 'LOG').start()
+
+        # mock the NSClient class (REST client)
+        client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start()
+
+        #mock the REST methods of the NSClient class
+        self.client_mock_instance = client_mock_cls.return_value
+        self.create_resource_mock = self.client_mock_instance.create_resource
+        self.create_resource_mock.side_effect = mock_create_resource_func
+        self.update_resource_mock = self.client_mock_instance.update_resource
+        self.update_resource_mock.side_effect = mock_update_resource_func
+        self.retrieve_resource_mock = (self.client_mock_instance
+                                           .retrieve_resource)
+        self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func
+        self.remove_resource_mock = self.client_mock_instance.remove_resource
+        self.remove_resource_mock.side_effect = mock_remove_resource_func
+        super(TestNetScalerPluginDriver, self).setUp()
+        self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = (
+            netscaler_driver.NetScalerPluginDriver(self.plugin_instance))
+        self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME]
+        self.context = context.get_admin_context()
+        self.addCleanup(mock.patch.stopall)
+
+    def test_create_vip(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                testvip = self._build_testvip_contents(subnet['subnet'],
+                                                       pool['pool'])
+                expectedvip = self._build_expectedvip_contents(
+                    testvip,
+                    subnet['subnet'])
+                # mock the LBaaS plugin update_status().
+                self._mock_update_status()
+                # reset the create_resource() mock
+                self.create_resource_mock.reset_mock()
+                # execute the method under test
+                self.driver.create_vip(self.context, testvip)
+                # First, assert that create_resource was called once
+                # with expected params.
+                self.create_resource_mock.assert_called_once_with(
+                    None,
+                    netscaler_driver.VIPS_RESOURCE,
+                    netscaler_driver.VIP_RESOURCE,
+                    expectedvip)
+                #Finally, assert that the vip object is now ACTIVE
+                self.mock_update_status_obj.assert_called_once_with(
+                    mock.ANY,
+                    loadbalancer_db.Vip,
+                    expectedvip['id'],
+                    constants.ACTIVE)
+
+    def test_create_vip_without_connection(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                testvip = self._build_testvip_contents(subnet['subnet'],
+                                                       pool['pool'])
+                expectedvip = self._build_expectedvip_contents(
+                    testvip,
+                    subnet['subnet'])
+                errorcode = ncc_client.NCCException.CONNECTION_ERROR
+                self.create_resource_mock.side_effect = (
+                    ncc_client.NCCException(errorcode))
+                # mock the plugin's update_status()
+                self._mock_update_status()
+                # reset the create_resource() mock
+                self.create_resource_mock.reset_mock()
+                # execute the method under test.
+                self.driver.create_vip(self.context, testvip)
+                # First, assert that update_resource was called once
+                # with expected params.
+                self.create_resource_mock.assert_called_once_with(
+                    None,
+                    netscaler_driver.VIPS_RESOURCE,
+                    netscaler_driver.VIP_RESOURCE,
+                    expectedvip)
+                #Finally, assert that the vip object is in ERROR state
+                self.mock_update_status_obj.assert_called_once_with(
+                    mock.ANY,
+                    loadbalancer_db.Vip,
+                    testvip['id'],
+                    constants.ERROR)
+
+    def test_update_vip(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with self.vip(pool=pool, subnet=subnet) as vip:
+                    updated_vip = self._build_updated_testvip_contents(
+                        vip['vip'],
+                        subnet['subnet'],
+                        pool['pool'])
+                    expectedvip = self._build_updated_expectedvip_contents(
+                        updated_vip,
+                        subnet['subnet'],
+                        pool['pool'])
+                    # mock the plugin's update_status()
+                    self._mock_update_status()
+                    # reset the update_resource() mock
+                    self.update_resource_mock.reset_mock()
+                    # execute the method under test
+                    self.driver.update_vip(self.context, updated_vip,
+                                           updated_vip)
+                    vip_resource_path = "%s/%s" % (
+                        (netscaler_driver.VIPS_RESOURCE,
+                         vip['vip']['id']))
+                    # First, assert that update_resource was called once
+                    # with expected params.
+                    (self.update_resource_mock
+                         .assert_called_once_with(
+                             None,
+                             vip_resource_path,
+                             netscaler_driver.VIP_RESOURCE,
+                             expectedvip))
+                    #Finally, assert that the vip object is now ACTIVE
+                    self.mock_update_status_obj.assert_called_once_with(
+                        mock.ANY,
+                        loadbalancer_db.Vip,
+                        vip['vip']['id'],
+                        constants.ACTIVE)
+
+    def test_delete_vip(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with contextlib.nested(
+                    self.vip(pool=pool, subnet=subnet),
+                    mock.patch.object(self.driver.plugin, '_delete_db_vip')
+                ) as (vip, mock_delete_db_vip):
+                    mock_delete_db_vip.return_value = None
+                    #reset the remove_resource() mock
+                    self.remove_resource_mock.reset_mock()
+                    # execute the method under test
+                    self.driver.delete_vip(self.context, vip['vip'])
+                    vip_resource_path = "%s/%s" % (
+                                        (netscaler_driver.VIPS_RESOURCE,
+                                         vip['vip']['id']))
+                    # Assert that remove_resource() was called once
+                    # with expected params.
+                    (self.remove_resource_mock
+                         .assert_called_once_with(None, vip_resource_path))
+
+    def test_create_pool(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
+        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
+            mock_get_subnet.return_value = subnet['subnet']
+            mock_get_ports.return_value = None
+            mock_create_port.return_value = TESTPOOL_SNAT_PORT
+            testpool = self._build_testpool_contents(subnet['subnet'])
+            expectedpool = self._build_expectedpool_contents(testpool,
+                                                             subnet['subnet'])
+            #reset the create_resource() mock
+            self.create_resource_mock.reset_mock()
+            # mock the plugin's update_status()
+            self._mock_update_status()
+            # execute the method under test
+            self.driver.create_pool(self.context, testpool)
+            # First, assert that create_resource was called once
+            # with expected params.
+            (self.create_resource_mock
+                 .assert_called_once_with(None,
+                                          netscaler_driver.POOLS_RESOURCE,
+                                          netscaler_driver.POOL_RESOURCE,
+                                          expectedpool))
+            #Finally, assert that the pool object is now ACTIVE
+            self.mock_update_status_obj.assert_called_once_with(
+                mock.ANY,
+                loadbalancer_db.Pool,
+                expectedpool['id'],
+                constants.ACTIVE)
+
+    def test_create_pool_with_error(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
+        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
+            mock_get_subnet.return_value = subnet['subnet']
+            mock_get_ports.return_value = None
+            mock_create_port.return_value = TESTPOOL_SNAT_PORT
+            errorcode = ncc_client.NCCException.CONNECTION_ERROR
+            self.create_resource_mock.side_effect = (ncc_client
+                                                     .NCCException(errorcode))
+            testpool = self._build_testpool_contents(subnet['subnet'])
+            expectedpool = self._build_expectedpool_contents(testpool,
+                                                             subnet['subnet'])
+            # mock the plugin's update_status()
+            self._mock_update_status()
+            #reset the create_resource() mock
+            self.create_resource_mock.reset_mock()
+            # execute the method under test.
+            self.driver.create_pool(self.context, testpool)
+            # Also assert that create_resource was called once
+            # with expected params.
+            (self.create_resource_mock
+                 .assert_called_once_with(None,
+                                          netscaler_driver.POOLS_RESOURCE,
+                                          netscaler_driver.POOL_RESOURCE,
+                                          expectedpool))
+            #Finally, assert that the pool object is in ERROR state
+            self.mock_update_status_obj.assert_called_once_with(
+                mock.ANY,
+                loadbalancer_db.Pool,
+                expectedpool['id'],
+                constants.ERROR)
+
+    def test_create_pool_with_snatportcreate_failure(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
+            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
+        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
+            mock_get_subnet.return_value = subnet['subnet']
+            mock_get_ports.return_value = None
+            mock_create_port.side_effect = exceptions.NeutronException()
+            testpool = self._build_testpool_contents(subnet['subnet'])
+            #reset the create_resource() mock
+            self.create_resource_mock.reset_mock()
+            # execute the method under test.
+            self.assertRaises(exceptions.NeutronException,
+                              self.driver.create_pool,
+                              self.context, testpool)
+
+    def test_update_pool(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                updated_pool = self._build_updated_testpool_contents(
+                    pool['pool'],
+                    subnet['subnet'])
+                expectedpool = self._build_updated_expectedpool_contents(
+                    updated_pool,
+                    subnet['subnet'])
+                # mock the plugin's update_status()
+                self._mock_update_status()
+                # reset the update_resource() mock
+                self.update_resource_mock.reset_mock()
+                # execute the method under test.
+                self.driver.update_pool(self.context, pool['pool'],
+                                        updated_pool)
+                pool_resource_path = "%s/%s" % (
+                    (netscaler_driver.POOLS_RESOURCE,
+                     pool['pool']['id']))
+                # First, assert that update_resource was called once
+                # with expected params.
+                (self.update_resource_mock
+                     .assert_called_once_with(None,
+                                              pool_resource_path,
+                                              netscaler_driver.POOL_RESOURCE,
+                                              expectedpool))
+                #Finally, assert that the pool object is now ACTIVE
+                self.mock_update_status_obj.assert_called_once_with(
+                    mock.ANY,
+                    loadbalancer_db.Pool,
+                    pool['pool']['id'],
+                    constants.ACTIVE)
+
+    def test_delete_pool(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with contextlib.nested(
+                self.pool(provider=LBAAS_PROVIDER_NAME),
+                mock.patch.object(self.driver.plugin._core_plugin,
+                                  'delete_port'),
+                mock.patch.object(self.driver.plugin._core_plugin,
+                                  'get_ports'),
+                mock.patch.object(self.driver.plugin,
+                                  'get_pools'),
+                mock.patch.object(self.driver.plugin,
+                                  '_delete_db_pool')
+            ) as (pool, mock_delete_port, mock_get_ports, mock_get_pools,
+                  mock_delete_db_pool):
+                mock_delete_port.return_value = None
+                mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}]
+                mock_get_pools.return_value = []
+                mock_delete_db_pool.return_value = None
+                #reset the remove_resource() mock
+                self.remove_resource_mock.reset_mock()
+                # execute the method under test.
+                self.driver.delete_pool(self.context, pool['pool'])
+                pool_resource_path = "%s/%s" % (
+                    (netscaler_driver.POOLS_RESOURCE,
+                     pool['pool']['id']))
+                # Assert that delete_resource was called
+                # once with expected params.
+                (self.remove_resource_mock
+                     .assert_called_once_with(None, pool_resource_path))
+
+    def test_create_member(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin,
+                              'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                testmember = self._build_testmember_contents(pool['pool'])
+                expectedmember = self._build_expectedmember_contents(
+                    testmember)
+                # mock the plugin's update_status()
+                self._mock_update_status()
+                #reset the create_resource() mock
+                self.create_resource_mock.reset_mock()
+                # execute the method under test.
+                self.driver.create_member(self.context, testmember)
+                # First, assert that create_resource was called once
+                # with expected params.
+                (self.create_resource_mock
+                     .assert_called_once_with(
+                         None,
+                         netscaler_driver.POOLMEMBERS_RESOURCE,
+                         netscaler_driver.POOLMEMBER_RESOURCE,
+                         expectedmember))
+                #Finally, assert that the member object is now ACTIVE
+                self.mock_update_status_obj.assert_called_once_with(
+                    mock.ANY,
+                    loadbalancer_db.Member,
+                    expectedmember['id'],
+                    constants.ACTIVE)
+
+    def test_update_member(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with self.member(pool_id=pool['pool']['id']) as member:
+                    updatedmember = (self._build_updated_testmember_contents(
+                        member['member']))
+                    expectedmember = (self
+                                      ._build_updated_expectedmember_contents(
+                                          updatedmember))
+                    # mock the plugin's update_status()
+                    self._mock_update_status()
+                    # reset the update_resource() mock
+                    self.update_resource_mock.reset_mock()
+                    # execute the method under test
+                    self.driver.update_member(self.context,
+                                              member['member'],
+                                              updatedmember)
+                    member_resource_path = "%s/%s" % (
+                        (netscaler_driver.POOLMEMBERS_RESOURCE,
+                         member['member']['id']))
+                    # First, assert that update_resource was called once
+                    # with expected params.
+                    (self.update_resource_mock
+                         .assert_called_once_with(
+                             None,
+                             member_resource_path,
+                             netscaler_driver.POOLMEMBER_RESOURCE,
+                             expectedmember))
+                    #Finally, assert that the member object is now ACTIVE
+                    self.mock_update_status_obj.assert_called_once_with(
+                        mock.ANY,
+                        loadbalancer_db.Member,
+                        member['member']['id'],
+                        constants.ACTIVE)
+
+    def test_delete_member(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with contextlib.nested(
+                    self.member(pool_id=pool['pool']['id']),
+                    mock.patch.object(self.driver.plugin, '_delete_db_member')
+                ) as (member, mock_delete_db_member):
+                    mock_delete_db_member.return_value = None
+                    # reset the remove_resource() mock
+                    self.remove_resource_mock.reset_mock()
+                    # execute the method under test
+                    self.driver.delete_member(self.context,
+                                              member['member'])
+                    member_resource_path = "%s/%s" % (
+                        (netscaler_driver.POOLMEMBERS_RESOURCE,
+                         member['member']['id']))
+                    # Assert that delete_resource was called once
+                    # with expected params.
+                    (self.remove_resource_mock
+                         .assert_called_once_with(None, member_resource_path))
+
+    def test_create_pool_health_monitor(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                testhealthmonitor = self._build_testhealthmonitor_contents(
+                    pool['pool'])
+                expectedhealthmonitor = (
+                    self._build_expectedhealthmonitor_contents(
+                        testhealthmonitor))
+                with mock.patch.object(self.driver.plugin,
+                                       'update_pool_health_monitor') as mhm:
+                    # reset the create_resource() mock
+                    self.create_resource_mock.reset_mock()
+                    # execute the method under test.
+                    self.driver.create_pool_health_monitor(self.context,
+                                                           testhealthmonitor,
+                                                           pool['pool']['id'])
+                    # First, assert that create_resource was called once
+                    # with expected params.
+                    resource_path = "%s/%s/%s" % (
+                        netscaler_driver.POOLS_RESOURCE,
+                        pool['pool']['id'],
+                        netscaler_driver.MONITORS_RESOURCE)
+                    (self.create_resource_mock
+                         .assert_called_once_with(
+                             None,
+                             resource_path,
+                             netscaler_driver.MONITOR_RESOURCE,
+                             expectedhealthmonitor))
+                    # Finally, assert that the healthmonitor object is
+                    # now ACTIVE.
+                    (mhm.assert_called_once_with(
+                        mock.ANY,
+                        expectedhealthmonitor['id'],
+                        pool['pool']['id'],
+                        constants.ACTIVE, ""))
+
+    def test_update_pool_health_monitor(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with self.health_monitor(
+                    pool_id=pool['pool']['id']
+                ) as (health_monitor):
+                    updatedhealthmonitor = (
+                        self._build_updated_testhealthmonitor_contents(
+                            health_monitor['health_monitor']))
+                    expectedhealthmonitor = (
+                        self._build_updated_expectedhealthmonitor_contents(
+                            updatedhealthmonitor))
+                    with mock.patch.object(self.driver.plugin,
+                                           'update_pool_health_monitor')as mhm:
+                        # reset the update_resource() mock
+                        self.update_resource_mock.reset_mock()
+                        # execute the method under test.
+                        self.driver.update_pool_health_monitor(
+                            self.context,
+                            health_monitor['health_monitor'],
+                            updatedhealthmonitor,
+                            pool['pool']['id'])
+                        monitor_resource_path = "%s/%s" % (
+                            (netscaler_driver.MONITORS_RESOURCE,
+                             health_monitor['health_monitor']['id']))
+                        # First, assert that update_resource was called once
+                        # with expected params.
+                        self.update_resource_mock.assert_called_once_with(
+                            None,
+                            monitor_resource_path,
+                            netscaler_driver.MONITOR_RESOURCE,
+                            expectedhealthmonitor)
+                        #Finally, assert that the member object is now ACTIVE
+                        (mhm.assert_called_once_with(
+                            mock.ANY,
+                            health_monitor['health_monitor']['id'],
+                            pool['pool']['id'],
+                            constants.ACTIVE, ""))
+
+    def test_delete_pool_health_monitor(self):
+        with contextlib.nested(
+            self.subnet(),
+            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
+        ) as (subnet, mock_get_subnet):
+            mock_get_subnet.return_value = subnet['subnet']
+            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
+                with contextlib.nested(
+                    self.health_monitor(pool_id=pool['pool']['id']),
+                    mock.patch.object(self.driver.plugin,
+                                      '_delete_db_pool_health_monitor')
+                ) as (health_monitor, mock_delete_db_monitor):
+                    mock_delete_db_monitor.return_value = None
+                    # reset the remove_resource() mock
+                    self.remove_resource_mock.reset_mock()
+                    # execute the method under test.
+                    self.driver.delete_pool_health_monitor(
+                        self.context,
+                        health_monitor['health_monitor'],
+                        pool['pool']['id'])
+                    monitor_resource_path = "%s/%s/%s/%s" % (
+                        netscaler_driver.POOLS_RESOURCE,
+                        pool['pool']['id'],
+                        netscaler_driver.MONITORS_RESOURCE,
+                        health_monitor['health_monitor']['id'])
+                    # Assert that delete_resource was called once
+                    # with expected params.
+                    self.remove_resource_mock.assert_called_once_with(
+                        None,
+                        monitor_resource_path)
+
+    def _build_testvip_contents(self, subnet, pool):
+        vip_obj = dict(id=TESTVIP_ID,
+                       name='testvip',
+                       description='a test vip',
+                       tenant_id=self._tenant_id,
+                       subnet_id=subnet['id'],
+                       address=TESTVIP_IP,
+                       port_id=TESTVIP_PORT_ID,
+                       pool_id=pool['id'],
+                       protocol='HTTP',
+                       protocol_port=80,
+                       connection_limit=1000,
+                       admin_state_up=True,
+                       status='PENDING_CREATE',
+                       status_description='')
+        return vip_obj
+
+    def _build_expectedvip_contents(self, testvip, subnet):
+        expectedvip = dict(id=testvip['id'],
+                           name=testvip['name'],
+                           description=testvip['description'],
+                           tenant_id=testvip['tenant_id'],
+                           subnet_id=testvip['subnet_id'],
+                           address=testvip['address'],
+                           network_id=subnet['network_id'],
+                           port_id=testvip['port_id'],
+                           pool_id=testvip['pool_id'],
+                           protocol=testvip['protocol'],
+                           protocol_port=testvip['protocol_port'],
+                           connection_limit=testvip['connection_limit'],
+                           admin_state_up=testvip['admin_state_up'])
+        return expectedvip
+
+    def _build_updated_testvip_contents(self, testvip, subnet, pool):
+        #update some updateable fields of the vip
+        testvip['name'] = 'udpated testvip'
+        testvip['description'] = 'An updated version of test vip'
+        testvip['connection_limit'] = 2000
+        return testvip
+
+    def _build_updated_expectedvip_contents(self, testvip, subnet, pool):
+        expectedvip = dict(name=testvip['name'],
+                           description=testvip['description'],
+                           connection_limit=testvip['connection_limit'],
+                           admin_state_up=testvip['admin_state_up'],
+                           pool_id=testvip['pool_id'])
+        return expectedvip
+
+    def _build_testpool_contents(self, subnet):
+        pool_obj = dict(id=TESTPOOL_ID,
+                        name='testpool',
+                        description='a test pool',
+                        tenant_id=self._tenant_id,
+                        subnet_id=subnet['id'],
+                        protocol='HTTP',
+                        vip_id=None,
+                        admin_state_up=True,
+                        lb_method='ROUND_ROBIN',
+                        status='PENDING_CREATE',
+                        status_description='',
+                        members=[],
+                        health_monitors=[],
+                        health_monitors_status=None,
+                        provider=LBAAS_PROVIDER_NAME)
+        return pool_obj
+
+    def _build_expectedpool_contents(self, testpool, subnet):
+        expectedpool = dict(id=testpool['id'],
+                            name=testpool['name'],
+                            description=testpool['description'],
+                            tenant_id=testpool['tenant_id'],
+                            subnet_id=testpool['subnet_id'],
+                            network_id=subnet['network_id'],
+                            protocol=testpool['protocol'],
+                            vip_id=testpool['vip_id'],
+                            lb_method=testpool['lb_method'],
+                            snat_ip=TESTPOOL_SNATIP_ADDRESS,
+                            port_id=TESTPOOL_PORT_ID,
+                            admin_state_up=testpool['admin_state_up'])
+        return expectedpool
+
+    def _build_updated_testpool_contents(self, testpool, subnet):
+        updated_pool = dict(testpool.items())
+        updated_pool['name'] = 'udpated testpool'
+        updated_pool['description'] = 'An updated version of test pool'
+        updated_pool['lb_method'] = 'LEAST_CONNECTIONS'
+        updated_pool['admin_state_up'] = True
+        updated_pool['provider'] = LBAAS_PROVIDER_NAME
+        updated_pool['status'] = 'PENDING_UPDATE'
+        updated_pool['status_description'] = ''
+        updated_pool['members'] = []
+        updated_pool["health_monitors"] = []
+        updated_pool["health_monitors_status"] = None
+        return updated_pool
+
+    def _build_updated_expectedpool_contents(self, testpool, subnet):
+        expectedpool = dict(name=testpool['name'],
+                            description=testpool['description'],
+                            lb_method=testpool['lb_method'],
+                            admin_state_up=testpool['admin_state_up'])
+        return expectedpool
+
+    def _build_testmember_contents(self, pool):
+        member_obj = dict(
+            id=TESTMEMBER_ID,
+            tenant_id=self._tenant_id,
+            pool_id=pool['id'],
+            address=TESTMEMBER_IP,
+            protocol_port=8080,
+            weight=2,
+            admin_state_up=True,
+            status='PENDING_CREATE',
+            status_description='')
+        return member_obj
+
+    def _build_expectedmember_contents(self, testmember):
+        expectedmember = dict(
+            id=testmember['id'],
+            tenant_id=testmember['tenant_id'],
+            pool_id=testmember['pool_id'],
+            address=testmember['address'],
+            protocol_port=testmember['protocol_port'],
+            weight=testmember['weight'],
+            admin_state_up=testmember['admin_state_up'])
+        return expectedmember
+
+    def _build_updated_testmember_contents(self, testmember):
+        updated_member = dict(testmember.items())
+        updated_member.update(
+            weight=3,
+            admin_state_up=True,
+            status='PENDING_CREATE',
+            status_description=''
+        )
+        return updated_member
+
+    def _build_updated_expectedmember_contents(self, testmember):
+        expectedmember = dict(weight=testmember['weight'],
+                              pool_id=testmember['pool_id'],
+                              admin_state_up=testmember['admin_state_up'])
+        return expectedmember
+
+    def _build_testhealthmonitor_contents(self, pool):
+        monitor_obj = dict(
+            id=TESTMONITOR_ID,
+            tenant_id=self._tenant_id,
+            type='TCP',
+            delay=10,
+            timeout=5,
+            max_retries=3,
+            admin_state_up=True,
+            pools=[])
+        pool_obj = dict(status='PENDING_CREATE',
+                        status_description=None,
+                        pool_id=pool['id'])
+        monitor_obj['pools'].append(pool_obj)
+        return monitor_obj
+
+    def _build_expectedhealthmonitor_contents(self, testhealthmonitor):
+        expectedmonitor = dict(id=testhealthmonitor['id'],
+                               tenant_id=testhealthmonitor['tenant_id'],
+                               type=testhealthmonitor['type'],
+                               delay=testhealthmonitor['delay'],
+                               timeout=testhealthmonitor['timeout'],
+                               max_retries=testhealthmonitor['max_retries'],
+                               admin_state_up=(
+                                   testhealthmonitor['admin_state_up']))
+        return expectedmonitor
+
+    def _build_updated_testhealthmonitor_contents(self, testmonitor):
+        updated_monitor = dict(testmonitor.items())
+        updated_monitor.update(
+            delay=30,
+            timeout=3,
+            max_retries=5,
+            admin_state_up=True
+        )
+        return updated_monitor
+
+    def _build_updated_expectedhealthmonitor_contents(self, testmonitor):
+        expectedmonitor = dict(delay=testmonitor['delay'],
+                               timeout=testmonitor['timeout'],
+                               max_retries=testmonitor['max_retries'],
+                               admin_state_up=testmonitor['admin_state_up'])
+        return expectedmonitor
+
+    def _mock_update_status(self):
+        #patch the plugin's update_status() method with a mock object
+        self.mock_update_status_patcher = mock.patch.object(
+            self.driver.plugin,
+            'update_status')
+        self.mock_update_status_obj = self.mock_update_status_patcher.start()
+
+
+def mock_create_resource_func(*args, **kwargs):
+    return 201, {}
+
+
+def mock_update_resource_func(*args, **kwargs):
+    return 202, {}
+
+
+def mock_retrieve_resource_func(*args, **kwargs):
+    return 200, {}
+
+
+def mock_remove_resource_func(*args, **kwargs):
+    return 200, {}