]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Split services code out of Neutron, pass 1
authorDoug Wiegley <dougw@a10networks.com>
Mon, 8 Dec 2014 04:21:57 +0000 (21:21 -0700)
committerDoug Wiegley <dougw@a10networks.com>
Mon, 8 Dec 2014 23:38:56 +0000 (16:38 -0700)
- After l3_agent is refactored, need to remove services/firewall
- After vmware plugin moves services out of monolothic,
  remove model copies and services/loadbalancer/constants,
  and re-enable unit tests.
- After alembic chain gets split in four, tweak models/head and
  fix heal/current chain.
- Re-factor test_routerserviceinsertion into one of the service repos

Partially-Implements: blueprint services-split
Change-Id: I5466984a9e57128266f97e9bd5c265f4dc3cba7b

114 files changed:
neutron/agent/l3_agent.py
neutron/db/migration/models/head.py
neutron/plugins/vmware/plugins/service.py
neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py
neutron/services/firewall/agents/varmour/__init__.py [deleted file]
neutron/services/firewall/agents/varmour/varmour_api.py [deleted file]
neutron/services/firewall/agents/varmour/varmour_router.py [deleted file]
neutron/services/firewall/agents/varmour/varmour_utils.py [deleted file]
neutron/services/firewall/drivers/__init__.py [deleted file]
neutron/services/firewall/drivers/fwaas_base.py [deleted file]
neutron/services/firewall/drivers/linux/__init__.py [deleted file]
neutron/services/firewall/drivers/linux/iptables_fwaas.py [deleted file]
neutron/services/firewall/drivers/varmour/__init__.py [deleted file]
neutron/services/firewall/drivers/varmour/varmour_fwaas.py [deleted file]
neutron/services/firewall/fwaas_plugin.py [deleted file]
neutron/services/loadbalancer/agent/__init__.py [deleted file]
neutron/services/loadbalancer/agent/agent.py [deleted file]
neutron/services/loadbalancer/agent/agent_api.py [deleted file]
neutron/services/loadbalancer/agent/agent_device_driver.py [deleted file]
neutron/services/loadbalancer/agent/agent_manager.py [deleted file]
neutron/services/loadbalancer/drivers/a10networks/README.txt [deleted file]
neutron/services/loadbalancer/drivers/a10networks/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/a10networks/driver_v1.py [deleted file]
neutron/services/loadbalancer/drivers/common/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/common/agent_driver_base.py [deleted file]
neutron/services/loadbalancer/drivers/driver_base.py [deleted file]
neutron/services/loadbalancer/drivers/driver_mixins.py [deleted file]
neutron/services/loadbalancer/drivers/haproxy/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/haproxy/cfg.py [deleted file]
neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py [deleted file]
neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py [deleted file]
neutron/services/loadbalancer/drivers/logging_noop/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/logging_noop/driver.py [deleted file]
neutron/services/loadbalancer/drivers/netscaler/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/netscaler/ncc_client.py [deleted file]
neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py [deleted file]
neutron/services/loadbalancer/drivers/radware/__init__.py [deleted file]
neutron/services/loadbalancer/drivers/radware/driver.py [deleted file]
neutron/services/loadbalancer/drivers/radware/exceptions.py [deleted file]
neutron/services/loadbalancer/plugin.py [deleted file]
neutron/services/vpn/agent.py [deleted file]
neutron/services/vpn/common/__init__.py [deleted file]
neutron/services/vpn/common/topics.py [deleted file]
neutron/services/vpn/device_drivers/__init__.py [deleted file]
neutron/services/vpn/device_drivers/cisco_csr_rest_client.py [deleted file]
neutron/services/vpn/device_drivers/cisco_ipsec.py [deleted file]
neutron/services/vpn/device_drivers/ipsec.py [deleted file]
neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template [deleted file]
neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template [deleted file]
neutron/services/vpn/plugin.py [deleted file]
neutron/services/vpn/service_drivers/__init__.py
neutron/services/vpn/service_drivers/cisco_ipsec.py [deleted file]
neutron/services/vpn/service_drivers/cisco_validator.py [deleted file]
neutron/services/vpn/service_drivers/ipsec.py [deleted file]
neutron/tests/unit/db/firewall/__init__.py [deleted file]
neutron/tests/unit/db/firewall/test_db_firewall.py [deleted file]
neutron/tests/unit/db/loadbalancer/__init__.py [deleted file]
neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py [deleted file]
neutron/tests/unit/db/vpn/__init__.py [deleted file]
neutron/tests/unit/db/vpn/test_db_vpnaas.py [deleted file]
neutron/tests/unit/services/firewall/__init__.py [deleted file]
neutron/tests/unit/services/firewall/agents/__init__.py [deleted file]
neutron/tests/unit/services/firewall/agents/l3reference/__init__.py [deleted file]
neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py [deleted file]
neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py [deleted file]
neutron/tests/unit/services/firewall/agents/varmour/__init__.py [deleted file]
neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py [deleted file]
neutron/tests/unit/services/firewall/drivers/__init__.py [deleted file]
neutron/tests/unit/services/firewall/drivers/linux/__init__.py [deleted file]
neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py [deleted file]
neutron/tests/unit/services/firewall/drivers/varmour/__init__.py [deleted file]
neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py [deleted file]
neutron/tests/unit/services/firewall/test_fwaas_plugin.py [deleted file]
neutron/tests/unit/services/loadbalancer/agent/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/agent/test_agent.py [deleted file]
neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py [deleted file]
neutron/tests/unit/services/loadbalancer/agent/test_api.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/a10networks/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/logging_noop/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/logging_noop/test_logging_noop_driver.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py [deleted file]
neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py [deleted file]
neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py [deleted file]
neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py [deleted file]
neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py [deleted file]
neutron/tests/unit/services/vpn/__init__.py [deleted file]
neutron/tests/unit/services/vpn/device_drivers/__init__.py [deleted file]
neutron/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest.py [deleted file]
neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py [deleted file]
neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py [deleted file]
neutron/tests/unit/services/vpn/service_drivers/__init__.py [deleted file]
neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py [deleted file]
neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py [deleted file]
neutron/tests/unit/services/vpn/test_vpn_agent.py [deleted file]
neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py [deleted file]
neutron/tests/unit/services/vpn/test_vpnaas_extension.py [deleted file]
neutron/tests/unit/test_routerserviceinsertion.py.skip [moved from neutron/tests/unit/test_routerserviceinsertion.py with 100% similarity]
neutron/tests/unit/vmware/vshield/test_firewall_driver.py.skip [moved from neutron/tests/unit/vmware/vshield/test_firewall_driver.py with 100% similarity]
neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py.skip [moved from neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py with 100% similarity]
neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py.skip [moved from neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py with 100% similarity]
neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py.skip [moved from neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py with 100% similarity]
neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py.skip [moved from neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py with 100% similarity]

index 65300481e60d319391599956c3110e39d8bf8a4c..acd4d67b2c6da8398dbb1fda9a3dd3f473239773 100644 (file)
@@ -52,7 +52,12 @@ from neutron.openstack.common import periodic_task
 from neutron.openstack.common import processutils
 from neutron.openstack.common import service
 from neutron import service as neutron_service
-from neutron.services.firewall.agents.l3reference import firewall_l3_agent
+try:
+    from neutron_fwaas.services.firewall.agents.l3reference \
+        import firewall_l3_agent
+except Exception:
+    # TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only
+    from neutron.services.firewall.agents.l3reference import firewall_l3_agent
 
 LOG = logging.getLogger(__name__)
 NS_PREFIX = 'qrouter-'
index d7da9e794c930ee059840f10bde683833de53852..70e8c96d75cc77dad1d1930b26ce6c89749865b0 100644 (file)
@@ -28,6 +28,7 @@ from neutron.db import dvr_mac_db  # noqa
 from neutron.db import external_net_db  # noqa
 from neutron.db import extradhcpopt_db  # noqa
 from neutron.db import extraroute_db  # noqa
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.db.firewall import firewall_db  # noqa
 from neutron.db import l3_agentschedulers_db  # noqa
 from neutron.db import l3_attrs_db  # noqa
@@ -35,6 +36,7 @@ from neutron.db import l3_db  # noqa
 from neutron.db import l3_dvrscheduler_db  # noqa
 from neutron.db import l3_gwmode_db  # noqa
 from neutron.db import l3_hamode_db  # noqa
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.db.loadbalancer import loadbalancer_db  # noqa
 from neutron.db.metering import metering_db  # noqa
 from neutron.db import model_base
@@ -46,6 +48,7 @@ from neutron.db import routedserviceinsertion_db  # noqa
 from neutron.db import routerservicetype_db  # noqa
 from neutron.db import securitygroups_db  # noqa
 from neutron.db import servicetype_db  # noqa
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.db.vpn import vpn_db  # noqa
 from neutron.plugins.bigswitch.db import consistency_db  # noqa
 from neutron.plugins.bigswitch import routerrule_db  # noqa
@@ -79,9 +82,12 @@ from neutron.plugins.vmware.dbexts import models as vmware_models  # noqa
 from neutron.plugins.vmware.dbexts import networkgw_db  # noqa
 from neutron.plugins.vmware.dbexts import qos_db  # noqa
 from neutron.plugins.vmware.dbexts import vcns_models  # noqa
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.services.loadbalancer import agent_scheduler  # noqa
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.services.loadbalancer.drivers.embrane import (  # noqa
     models as embrane_models)
+# TODO(dougw) - services split, need to complete alembic fixes
 from neutron.services.vpn.service_drivers import cisco_csr_db  # noqa
 
 
index aede05ef6dde38534cd40403c0756decbb778bf9..57141df2d42b0fc17065b0cba15ce5342675c85f 100644 (file)
@@ -20,11 +20,26 @@ from oslo.utils import excutils
 
 from neutron.common import constants
 from neutron.common import exceptions as n_exc
-from neutron.db.firewall import firewall_db
+try:
+    from neutron_fwaas.db.firewall import firewall_db
+except Exception:
+    print("WARNING: missing neutron-fwaas package")
+    # TODO(dougw) - temporary, this is going away
+    from neutron.db.firewall import firewall_db
 from neutron.db import l3_db
-from neutron.db.loadbalancer import loadbalancer_db
+try:
+    from neutron_lbaas.db.loadbalancer import loadbalancer_db
+except Exception:
+    print("WARNING: missing neutron-lbaas package")
+    # TODO(dougw) - temporary, this is going away
+    from neutron.db.loadbalancer import loadbalancer_db
 from neutron.db import routedserviceinsertion_db as rsi_db
-from neutron.db.vpn import vpn_db
+try:
+    from neutron_vpnaas.db.vpn import vpn_db
+except Exception:
+    print("WARNING: missing neutron-vpnaas package")
+    # TODO(dougw) - temporary, this is going away
+    from neutron.db.vpn import vpn_db
 from neutron.extensions import firewall as fw_ext
 from neutron.extensions import l3
 from neutron.extensions import routedserviceinsertion as rsi
index d1ce2c9df70ddf5a4c689692561f120b401d2dd8..b163bb494fc19102bbd8931ce79e81e4ac5d0f66 100644 (file)
@@ -21,7 +21,12 @@ from neutron.plugins.vmware.vshield.common import (
     constants as vcns_const)
 from neutron.plugins.vmware.vshield.common import (
     exceptions as vcns_exc)
-from neutron.services.loadbalancer import constants as lb_constants
+try:
+    from neutron_lbaas.services.loadbalancer import constants as lb_constants
+except Exception:
+    print("WARNING: missing neutron-lbaas package")
+    # TODO(dougw) - this is going away
+    from neutron.services.loadbalancer import constants as lb_constants
 
 LOG = logging.getLogger(__name__)
 
diff --git a/neutron/services/firewall/agents/varmour/__init__.py b/neutron/services/firewall/agents/varmour/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/services/firewall/agents/varmour/varmour_api.py b/neutron/services/firewall/agents/varmour/varmour_api.py
deleted file mode 100755 (executable)
index 5cb885e..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import base64
-
-import httplib2
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
-
-OPTS = [
-    cfg.StrOpt('director', default='localhost',
-               help=_("vArmour director ip")),
-    cfg.StrOpt('director_port', default='443',
-               help=_("vArmour director port")),
-    cfg.StrOpt('username', default='varmour',
-               help=_("vArmour director username")),
-    cfg.StrOpt('password', default='varmour', secret=True,
-               help=_("vArmour director password")), ]
-
-cfg.CONF.register_opts(OPTS, "vArmour")
-
-LOG = logging.getLogger(__name__)
-
-REST_URL_PREFIX = '/api/v1.0'
-
-
-class vArmourAPIException(Exception):
-    message = _("An unknown exception.")
-
-    def __init__(self, **kwargs):
-        try:
-            self.err = self.message % kwargs
-
-        except Exception:
-            self.err = self.message
-
-    def __str__(self):
-        return self.err
-
-
-class AuthenticationFailure(vArmourAPIException):
-    message = _("Invalid login credential.")
-
-
-class vArmourRestAPI(object):
-
-    def __init__(self):
-        LOG.debug('vArmourRestAPI: started')
-        self.user = cfg.CONF.vArmour.username
-        self.passwd = cfg.CONF.vArmour.password
-        self.server = cfg.CONF.vArmour.director
-        self.port = cfg.CONF.vArmour.director_port
-        self.timeout = 3
-        self.key = ''
-
-    def auth(self):
-        headers = {}
-        enc = base64.b64encode(self.user + ':' + self.passwd)
-        headers['Authorization'] = 'Basic ' + enc
-        resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers)
-        if resp and resp['status'] == 200:
-            self.key = resp['body']['auth']
-            return True
-        else:
-            raise AuthenticationFailure()
-
-    def commit(self):
-        self.rest_api('POST', va_utils.REST_URL_COMMIT)
-
-    def rest_api(self, method, url, body=None, headers=None):
-        url = REST_URL_PREFIX + url
-        if body:
-            body_data = jsonutils.dumps(body)
-        else:
-            body_data = ''
-        if not headers:
-            headers = {}
-            enc = base64.b64encode('%s:%s' % (self.user, self.key))
-            headers['Authorization'] = 'Basic ' + enc
-
-        LOG.debug("vArmourRestAPI: %(server)s %(port)s",
-                  {'server': self.server, 'port': self.port})
-
-        try:
-            action = "https://" + self.server + ":" + self.port + url
-
-            LOG.debug("vArmourRestAPI Sending: "
-                      "%(method)s %(action)s %(headers)s %(body_data)s",
-                      {'method': method, 'action': action,
-                       'headers': headers, 'body_data': body_data})
-
-            h = httplib2.Http(timeout=3,
-                              disable_ssl_certificate_validation=True)
-            resp, resp_str = h.request(action, method,
-                                       body=body_data,
-                                       headers=headers)
-
-            LOG.debug("vArmourRestAPI Response: %(status)s %(resp_str)s",
-                      {'status': resp.status, 'resp_str': resp_str})
-
-            if resp.status == 200:
-                return {'status': resp.status,
-                        'reason': resp.reason,
-                        'body': jsonutils.loads(resp_str)}
-        except Exception:
-            LOG.error(_LE('vArmourRestAPI: Could not establish HTTP '
-                          'connection'))
-
-    def del_cfg_objs(self, url, prefix):
-        resp = self.rest_api('GET', url)
-        if resp and resp['status'] == 200:
-            olist = resp['body']['response']
-            if not olist:
-                return
-
-            for o in olist:
-                if o.startswith(prefix):
-                    self.rest_api('DELETE', url + '/"name:%s"' % o)
-            self.commit()
-
-    def count_cfg_objs(self, url, prefix):
-        count = 0
-        resp = self.rest_api('GET', url)
-        if resp and resp['status'] == 200:
-            for o in resp['body']['response']:
-                if o.startswith(prefix):
-                    count += 1
-
-        return count
diff --git a/neutron/services/firewall/agents/varmour/varmour_router.py b/neutron/services/firewall/agents/varmour/varmour_router.py
deleted file mode 100755 (executable)
index 59cb524..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-import eventlet
-eventlet.monkey_patch()
-
-import netaddr
-from oslo.config import cfg
-
-from neutron.agent.common import config
-from neutron.agent import l3_agent
-from neutron.agent import l3_ha_agent
-from neutron.agent.linux import external_process
-from neutron.agent.linux import interface
-from neutron.agent.linux import ip_lib
-from neutron.common import config as common_config
-from neutron.common import constants as l3_constants
-from neutron.common import topics
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import service
-from neutron import service as neutron_service
-from neutron.services.firewall.agents.l3reference import firewall_l3_agent
-from neutron.services.firewall.agents.varmour import varmour_api
-from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
-
-
-LOG = logging.getLogger(__name__)
-
-
-class vArmourL3NATAgent(l3_agent.L3NATAgent,
-                        firewall_l3_agent.FWaaSL3AgentRpcCallback):
-    def __init__(self, host, conf=None):
-        LOG.debug('vArmourL3NATAgent: __init__')
-        self.rest = varmour_api.vArmourRestAPI()
-        super(vArmourL3NATAgent, self).__init__(host, conf)
-
-    def _destroy_router_namespaces(self, only_router_id=None):
-        return
-
-    def _destroy_router_namespace(self, namespace):
-        return
-
-    def _create_router_namespace(self, ri):
-        return
-
-    def _router_added(self, router_id, router):
-        LOG.debug("_router_added: %s", router_id)
-        ri = l3_agent.RouterInfo(router_id, self.root_helper, router)
-        self.router_info[router_id] = ri
-        super(vArmourL3NATAgent, self).process_router_add(ri)
-
-    def _router_removed(self, router_id):
-        LOG.debug("_router_removed: %s", router_id)
-
-        ri = self.router_info[router_id]
-        if ri:
-            ri.router['gw_port'] = None
-            ri.router[l3_constants.INTERFACE_KEY] = []
-            ri.router[l3_constants.FLOATINGIP_KEY] = []
-            self.process_router(ri)
-
-            name = va_utils.get_snat_rule_name(ri)
-            self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
-
-            name = va_utils.get_dnat_rule_name(ri)
-            self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
-
-            name = va_utils.get_trusted_zone_name(ri)
-            self._va_unset_zone_interfaces(name, True)
-
-            name = va_utils.get_untrusted_zone_name(ri)
-            self._va_unset_zone_interfaces(name, True)
-
-            del self.router_info[router_id]
-
-    def _spawn_metadata_proxy(self, router_id, ns_name):
-        return
-
-    def _destroy_metadata_proxy(self, router_id, ns_name):
-        return
-
-    def _set_subnet_info(self, port):
-        ips = port['fixed_ips']
-        if not ips:
-            raise Exception(_("Router port %s has no IP address") % port['id'])
-        if len(ips) > 1:
-            LOG.warn(_LW("Ignoring multiple IPs on router port %s"),
-                     port['id'])
-        prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
-        port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
-
-    def _va_unset_zone_interfaces(self, zone_name, remove_zone=False):
-        # return True if zone exists; otherwise, return False
-        LOG.debug("_va_unset_zone_interfaces: %s", zone_name)
-        resp = self.rest.rest_api('GET', va_utils.REST_URL_CONF_ZONE)
-        if resp and resp['status'] == 200:
-            zlist = resp['body']['response']
-            for zn in zlist:
-                if zn == zone_name:
-                    commit = False
-
-                    if 'interface' in zlist[zn]:
-                        for intf in zlist[zn]['interface']:
-                            self.rest.rest_api('DELETE',
-                                               va_utils.REST_URL_CONF +
-                                               va_utils.REST_ZONE_NAME % zn +
-                                               va_utils.REST_INTF_NAME % intf)
-                            commit = True
-                    if remove_zone:
-                        self.rest.rest_api('DELETE',
-                                           va_utils.REST_URL_CONF +
-                                           va_utils.REST_ZONE_NAME % zn)
-                        commit = True
-
-                    if commit:
-                        self.rest.commit()
-
-                    return True
-
-        return False
-
-    def _va_pif_2_lif(self, pif):
-        return pif + '.0'
-
-    def _va_set_interface_ip(self, pif, cidr):
-        LOG.debug("_va_set_interface_ip: %(pif)s %(cidr)s",
-                  {'pif': pif, 'cidr': cidr})
-
-        lif = self._va_pif_2_lif(pif)
-        obj = va_utils.REST_INTF_NAME % pif + va_utils.REST_LOGIC_NAME % lif
-        body = {
-            'name': lif,
-            'family': 'ipv4',
-            'address': cidr
-        }
-        self.rest.rest_api('PUT', va_utils.REST_URL_CONF + obj, body)
-
-    def _va_get_port_name(self, port_list, name):
-        if name:
-            for p in port_list:
-                if p['VM name'] == name:
-                    return p['name']
-
-    def _va_config_trusted_zone(self, ri, plist):
-        zone = va_utils.get_trusted_zone_name(ri)
-        LOG.debug("_va_config_trusted_zone: %s", zone)
-
-        body = {
-            'name': zone,
-            'type': 'L3',
-            'interface': []
-        }
-
-        if not self._va_unset_zone_interfaces(zone):
-            # if zone doesn't exist, create it
-            self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
-            self.rest.commit()
-
-        # add new internal ports to trusted zone
-        for p in ri.internal_ports:
-            if p['admin_state_up']:
-                dev = self.get_internal_device_name(p['id'])
-                pif = self._va_get_port_name(plist, dev)
-                if pif:
-                    lif = self._va_pif_2_lif(pif)
-                    if lif not in body['interface']:
-                        body['interface'].append(lif)
-
-                        self._va_set_interface_ip(pif, p['ip_cidr'])
-
-        if body['interface']:
-            self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
-            self.rest.commit()
-
-    def _va_config_untrusted_zone(self, ri, plist):
-        zone = va_utils.get_untrusted_zone_name(ri)
-        LOG.debug("_va_config_untrusted_zone: %s", zone)
-
-        body = {
-            'name': zone,
-            'type': 'L3',
-            'interface': []
-        }
-
-        if not self._va_unset_zone_interfaces(zone):
-            # if zone doesn't exist, create it
-            self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
-            self.rest.commit()
-
-        # add new gateway ports to untrusted zone
-        if ri.ex_gw_port:
-            LOG.debug("_va_config_untrusted_zone: gw=%r", ri.ex_gw_port)
-            dev = self.get_external_device_name(ri.ex_gw_port['id'])
-            pif = self._va_get_port_name(plist, dev)
-            if pif:
-                lif = self._va_pif_2_lif(pif)
-
-                self._va_set_interface_ip(pif, ri.ex_gw_port['ip_cidr'])
-
-                body['interface'].append(lif)
-                self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
-                self.rest.commit()
-
-    def _va_config_router_snat_rules(self, ri, plist):
-        LOG.debug('_va_config_router_snat_rules: %s', ri.router['id'])
-
-        prefix = va_utils.get_snat_rule_name(ri)
-        self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
-
-        if not ri.enable_snat:
-            return
-
-        for idx, p in enumerate(ri.internal_ports):
-            if p['admin_state_up']:
-                dev = self.get_internal_device_name(p['id'])
-                pif = self._va_get_port_name(plist, dev)
-                if pif:
-                    net = netaddr.IPNetwork(p['ip_cidr'])
-                    body = {
-                        'name': '%s_%d' % (prefix, idx),
-                        'ingress-context-type': 'interface',
-                        'ingress-index': self._va_pif_2_lif(pif),
-                        'source-address': [
-                            [str(netaddr.IPAddress(net.first + 2)),
-                             str(netaddr.IPAddress(net.last - 1))]
-                        ],
-                        'flag': 'interface translate-source'
-                    }
-                    self.rest.rest_api('POST',
-                                       va_utils.REST_URL_CONF_NAT_RULE,
-                                       body)
-
-        if ri.internal_ports:
-            self.rest.commit()
-
-    def _va_config_floating_ips(self, ri):
-        LOG.debug('_va_config_floating_ips: %s', ri.router['id'])
-
-        prefix = va_utils.get_dnat_rule_name(ri)
-        self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
-
-        # add new dnat rules
-        for idx, fip in enumerate(ri.floating_ips):
-            body = {
-                'name': '%s_%d' % (prefix, idx),
-                'ingress-context-type': 'zone',
-                'ingress-index': va_utils.get_untrusted_zone_name(ri),
-                'destination-address': [[fip['floating_ip_address'],
-                                         fip['floating_ip_address']]],
-                'static': [fip['fixed_ip_address'], fip['fixed_ip_address']],
-                'flag': 'translate-destination'
-            }
-            self.rest.rest_api('POST', va_utils.REST_URL_CONF_NAT_RULE, body)
-
-        if ri.floating_ips:
-            self.rest.commit()
-
-    def process_router(self, ri):
-        LOG.debug("process_router: %s", ri.router['id'])
-        super(vArmourL3NATAgent, self).process_router(ri)
-
-        self.rest.auth()
-
-        # read internal port name and configuration port name map
-        resp = self.rest.rest_api('GET', va_utils.REST_URL_INTF_MAP)
-        if resp and resp['status'] == 200:
-            try:
-                plist = resp['body']['response']
-            except ValueError:
-                LOG.warn(_LW("Unable to parse interface mapping."))
-                return
-        else:
-            LOG.warn(_LW("Unable to read interface mapping."))
-            return
-
-        if ri.ex_gw_port:
-            self._set_subnet_info(ri.ex_gw_port)
-        self._va_config_trusted_zone(ri, plist)
-        self._va_config_untrusted_zone(ri, plist)
-        self._va_config_router_snat_rules(ri, plist)
-        self._va_config_floating_ips(ri)
-
-    def _handle_router_snat_rules(self, ri, ex_gw_port,
-                                  interface_name, action):
-        return
-
-    def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
-        return
-
-    def external_gateway_added(self, ri, ex_gw_port,
-                               interface_name, internal_cidrs):
-        LOG.debug("external_gateway_added: %s", ri.router['id'])
-
-        if not ip_lib.device_exists(interface_name,
-                                    root_helper=self.root_helper,
-                                    namespace=ri.ns_name):
-            self.driver.plug(ex_gw_port['network_id'],
-                             ex_gw_port['id'], interface_name,
-                             ex_gw_port['mac_address'],
-                             bridge=self.conf.external_network_bridge,
-                             namespace=ri.ns_name,
-                             prefix=l3_agent.EXTERNAL_DEV_PREFIX)
-        self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
-                            namespace=ri.ns_name)
-
-    def _update_routing_table(self, ri, operation, route):
-        return
-
-
-class vArmourL3NATAgentWithStateReport(vArmourL3NATAgent,
-                                       l3_agent.L3NATAgentWithStateReport):
-    pass
-
-
-def main():
-    conf = cfg.CONF
-    conf.register_opts(vArmourL3NATAgent.OPTS)
-    conf.register_opts(l3_ha_agent.OPTS)
-    config.register_interface_driver_opts_helper(conf)
-    config.register_use_namespaces_opts_helper(conf)
-    config.register_agent_state_opts_helper(conf)
-    config.register_root_helper(conf)
-    conf.register_opts(interface.OPTS)
-    conf.register_opts(external_process.OPTS)
-    common_config.init(sys.argv[1:])
-    config.setup_logging()
-    server = neutron_service.Service.create(
-        binary='neutron-l3-agent',
-        topic=topics.L3_AGENT,
-        report_interval=cfg.CONF.AGENT.report_interval,
-        manager='neutron.services.firewall.agents.varmour.varmour_router.'
-                'vArmourL3NATAgentWithStateReport')
-    service.launch(server).wait()
diff --git a/neutron/services/firewall/agents/varmour/varmour_utils.py b/neutron/services/firewall/agents/varmour/varmour_utils.py
deleted file mode 100755 (executable)
index 1bf5072..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-ROUTER_OBJ_PREFIX = 'r-'
-OBJ_PREFIX_LEN = 8
-TRUST_ZONE = '_z_trust'
-UNTRUST_ZONE = '_z_untrust'
-SNAT_RULE = '_snat'
-DNAT_RULE = '_dnat'
-ROUTER_POLICY = '_p'
-
-REST_URL_CONF = '/config'
-REST_URL_AUTH = '/auth'
-REST_URL_COMMIT = '/commit'
-REST_URL_INTF_MAP = '/operation/interface/mapping'
-
-REST_URL_CONF_NAT_RULE = REST_URL_CONF + '/nat/rule'
-REST_URL_CONF_ZONE = REST_URL_CONF + '/zone'
-REST_URL_CONF_POLICY = REST_URL_CONF + '/policy'
-REST_URL_CONF_ADDR = REST_URL_CONF + '/address'
-REST_URL_CONF_SERVICE = REST_URL_CONF + '/service'
-
-REST_ZONE_NAME = '/zone/"name:%s"'
-REST_INTF_NAME = '/interface/"name:%s"'
-REST_LOGIC_NAME = '/logical/"name:%s"'
-REST_SERVICE_NAME = '/service/"name:%s"/rule'
-
-
-def get_router_object_prefix(ri):
-    return ROUTER_OBJ_PREFIX + ri.router['id'][:OBJ_PREFIX_LEN]
-
-
-def get_firewall_object_prefix(ri, fw):
-    return get_router_object_prefix(ri) + '-' + fw['id'][:OBJ_PREFIX_LEN]
-
-
-def get_trusted_zone_name(ri):
-    return get_router_object_prefix(ri) + TRUST_ZONE
-
-
-def get_untrusted_zone_name(ri):
-    return get_router_object_prefix(ri) + UNTRUST_ZONE
-
-
-def get_snat_rule_name(ri):
-    return get_router_object_prefix(ri) + SNAT_RULE
-
-
-def get_dnat_rule_name(ri):
-    return get_router_object_prefix(ri) + DNAT_RULE
-
-
-def get_router_policy_name(ri):
-    return get_router_object_prefix(ri) + ROUTER_POLICY
-
-
-def get_firewall_policy_name(ri, fw, rule):
-    return get_firewall_object_prefix(ri, fw) + rule['id'][:OBJ_PREFIX_LEN]
diff --git a/neutron/services/firewall/drivers/__init__.py b/neutron/services/firewall/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/drivers/fwaas_base.py b/neutron/services/firewall/drivers/fwaas_base.py
deleted file mode 100644 (file)
index ed70c53..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2013 Dell Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class FwaasDriverBase(object):
-    """Firewall as a Service Driver base class.
-
-    Using FwaasDriver Class, an instance of L3 perimeter Firewall
-    can be created. The firewall co-exists with the L3 agent.
-
-    One instance is created for each tenant. One firewall policy
-    is associated with each tenant (in the Havana release).
-
-    The Firewall can be visualized as having two zones (in Havana
-    release), trusted and untrusted.
-
-    All the 'internal' interfaces of Neutron Router is treated as trusted. The
-    interface connected to 'external network' is treated as untrusted.
-
-    The policy is applied on traffic ingressing/egressing interfaces on
-    the trusted zone. This implies that policy will be applied for traffic
-    passing from
-        - trusted to untrusted zones
-        - untrusted to trusted zones
-        - trusted to trusted zones
-
-    Policy WILL NOT be applied for traffic from untrusted to untrusted zones.
-    This is not a problem in Havana release as there is only one interface
-    connected to external network.
-
-    Since the policy is applied on the internal interfaces, the traffic
-    will be not be NATed to floating IP. For incoming traffic, the
-    traffic will get NATed to internal IP address before it hits
-    the firewall rules. So, while writing the rules, care should be
-    taken if using rules based on floating IP.
-
-    The firewall rule addition/deletion/insertion/update are done by the
-    management console. When the policy is sent to the driver, the complete
-    policy is sent and the whole policy has to be applied atomically. The
-    firewall rules will not get updated individually. This is to avoid problems
-    related to out-of-order notifications or inconsistent behaviour by partial
-    application of rules.
-    """
-
-    @abc.abstractmethod
-    def create_firewall(self, apply_list, firewall):
-        """Create the Firewall with default (drop all) policy.
-
-        The default policy will be applied on all the interfaces of
-        trusted zone.
-        """
-        pass
-
-    @abc.abstractmethod
-    def delete_firewall(self, apply_list, firewall):
-        """Delete firewall.
-
-        Removes all policies created by this instance and frees up
-        all the resources.
-        """
-        pass
-
-    @abc.abstractmethod
-    def update_firewall(self, apply_list, firewall):
-        """Apply the policy on all trusted interfaces.
-
-        Remove previous policy and apply the new policy on all trusted
-        interfaces.
-        """
-        pass
-
-    @abc.abstractmethod
-    def apply_default_policy(self, apply_list, firewall):
-        """Apply the default policy on all trusted interfaces.
-
-        Remove current policy and apply the default policy on all trusted
-        interfaces.
-        """
-        pass
diff --git a/neutron/services/firewall/drivers/linux/__init__.py b/neutron/services/firewall/drivers/linux/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/firewall/drivers/linux/iptables_fwaas.py b/neutron/services/firewall/drivers/linux/iptables_fwaas.py
deleted file mode 100644 (file)
index 4e0dea7..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-# Copyright 2013 Dell Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.agent.linux import iptables_manager
-from neutron.extensions import firewall as fw_ext
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.services.firewall.drivers import fwaas_base
-
-LOG = logging.getLogger(__name__)
-FWAAS_DRIVER_NAME = 'Fwaas iptables driver'
-FWAAS_DEFAULT_CHAIN = 'fwaas-default-policy'
-INGRESS_DIRECTION = 'ingress'
-EGRESS_DIRECTION = 'egress'
-CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
-                     EGRESS_DIRECTION: 'o'}
-
-""" Firewall rules are applied on internal-interfaces of Neutron router.
-    The packets ingressing tenant's network will be on the output
-    direction on internal-interfaces.
-"""
-IPTABLES_DIR = {INGRESS_DIRECTION: '-o',
-                EGRESS_DIRECTION: '-i'}
-IPV4 = 'ipv4'
-IPV6 = 'ipv6'
-IP_VER_TAG = {IPV4: 'v4',
-              IPV6: 'v6'}
-
-INTERNAL_DEV_PREFIX = 'qr-'
-SNAT_INT_DEV_PREFIX = 'sg-'
-ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
-
-
-class IptablesFwaasDriver(fwaas_base.FwaasDriverBase):
-    """IPTables driver for Firewall As A Service."""
-
-    def __init__(self):
-        LOG.debug("Initializing fwaas iptables driver")
-
-    def create_firewall(self, agent_mode, apply_list, firewall):
-        LOG.debug('Creating firewall %(fw_id)s for tenant %(tid)s)',
-                  {'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
-        try:
-            if firewall['admin_state_up']:
-                self._setup_firewall(agent_mode, apply_list, firewall)
-            else:
-                self.apply_default_policy(agent_mode, apply_list, firewall)
-        except (LookupError, RuntimeError):
-            # catch known library exceptions and raise Fwaas generic exception
-            LOG.exception(_LE("Failed to create firewall: %s"), firewall['id'])
-            raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
-
-    def _get_ipt_mgrs_with_if_prefix(self, agent_mode, router_info):
-        """Gets the iptables manager along with the if prefix to apply rules.
-
-        With DVR we can have differing namespaces depending on which agent
-        (on Network or Compute node). Also, there is an associated i/f for
-        each namespace. The iptables on the relevant namespace and matching
-        i/f are provided. On the Network node we could have both the snat
-        namespace and a fip so this is provided back as a list - so in that
-        scenario rules can be applied on both.
-        """
-        if not router_info.router['distributed']:
-            return [{'ipt': router_info.iptables_manager,
-                     'if_prefix': INTERNAL_DEV_PREFIX}]
-        ipt_mgrs = []
-        # TODO(sridar): refactor to get strings to a common location.
-        if agent_mode == 'dvr_snat':
-            if router_info.snat_iptables_manager:
-                ipt_mgrs.append({'ipt': router_info.snat_iptables_manager,
-                                 'if_prefix': SNAT_INT_DEV_PREFIX})
-        if router_info.dist_fip_count:
-            # handle the fip case on n/w or compute node.
-            ipt_mgrs.append({'ipt': router_info.iptables_manager,
-                             'if_prefix': ROUTER_2_FIP_DEV_PREFIX})
-        return ipt_mgrs
-
-    def delete_firewall(self, agent_mode, apply_list, firewall):
-        LOG.debug('Deleting firewall %(fw_id)s for tenant %(tid)s)',
-                  {'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
-        fwid = firewall['id']
-        try:
-            for router_info in apply_list:
-                ipt_if_prefix_list = self._get_ipt_mgrs_with_if_prefix(
-                    agent_mode, router_info)
-                for ipt_if_prefix in ipt_if_prefix_list:
-                    ipt_mgr = ipt_if_prefix['ipt']
-                    self._remove_chains(fwid, ipt_mgr)
-                    self._remove_default_chains(ipt_mgr)
-                    # apply the changes immediately (no defer in firewall path)
-                    ipt_mgr.defer_apply_off()
-        except (LookupError, RuntimeError):
-            # catch known library exceptions and raise Fwaas generic exception
-            LOG.exception(_LE("Failed to delete firewall: %s"), fwid)
-            raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
-
-    def update_firewall(self, agent_mode, apply_list, firewall):
-        LOG.debug('Updating firewall %(fw_id)s for tenant %(tid)s)',
-                  {'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
-        try:
-            if firewall['admin_state_up']:
-                self._setup_firewall(agent_mode, apply_list, firewall)
-            else:
-                self.apply_default_policy(agent_mode, apply_list, firewall)
-        except (LookupError, RuntimeError):
-            # catch known library exceptions and raise Fwaas generic exception
-            LOG.exception(_LE("Failed to update firewall: %s"), firewall['id'])
-            raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
-
-    def apply_default_policy(self, agent_mode, apply_list, firewall):
-        LOG.debug('Applying firewall %(fw_id)s for tenant %(tid)s)',
-                  {'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
-        fwid = firewall['id']
-        try:
-            for router_info in apply_list:
-                ipt_if_prefix_list = self._get_ipt_mgrs_with_if_prefix(
-                    agent_mode, router_info)
-                for ipt_if_prefix in ipt_if_prefix_list:
-                    # the following only updates local memory; no hole in FW
-                    ipt_mgr = ipt_if_prefix['ipt']
-                    self._remove_chains(fwid, ipt_mgr)
-                    self._remove_default_chains(ipt_mgr)
-
-                    # create default 'DROP ALL' policy chain
-                    self._add_default_policy_chain_v4v6(ipt_mgr)
-                    self._enable_policy_chain(fwid, ipt_if_prefix)
-
-                    # apply the changes immediately (no defer in firewall path)
-                    ipt_mgr.defer_apply_off()
-        except (LookupError, RuntimeError):
-            # catch known library exceptions and raise Fwaas generic exception
-            LOG.exception(
-                _LE("Failed to apply default policy on firewall: %s"), fwid)
-            raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
-
-    def _setup_firewall(self, agent_mode, apply_list, firewall):
-        fwid = firewall['id']
-        for router_info in apply_list:
-            ipt_if_prefix_list = self._get_ipt_mgrs_with_if_prefix(
-                agent_mode, router_info)
-            for ipt_if_prefix in ipt_if_prefix_list:
-                ipt_mgr = ipt_if_prefix['ipt']
-                # the following only updates local memory; no hole in FW
-                self._remove_chains(fwid, ipt_mgr)
-                self._remove_default_chains(ipt_mgr)
-
-                # create default 'DROP ALL' policy chain
-                self._add_default_policy_chain_v4v6(ipt_mgr)
-                #create chain based on configured policy
-                self._setup_chains(firewall, ipt_if_prefix)
-
-                # apply the changes immediately (no defer in firewall path)
-                ipt_mgr.defer_apply_off()
-
-    def _get_chain_name(self, fwid, ver, direction):
-        return '%s%s%s' % (CHAIN_NAME_PREFIX[direction],
-                           IP_VER_TAG[ver],
-                           fwid)
-
-    def _setup_chains(self, firewall, ipt_if_prefix):
-        """Create Fwaas chain using the rules in the policy
-        """
-        fw_rules_list = firewall['firewall_rule_list']
-        fwid = firewall['id']
-        ipt_mgr = ipt_if_prefix['ipt']
-
-        #default rules for invalid packets and established sessions
-        invalid_rule = self._drop_invalid_packets_rule()
-        est_rule = self._allow_established_rule()
-
-        for ver in [IPV4, IPV6]:
-            if ver == IPV4:
-                table = ipt_mgr.ipv4['filter']
-            else:
-                table = ipt_mgr.ipv6['filter']
-            ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION)
-            ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION)
-            for name in [ichain_name, ochain_name]:
-                table.add_chain(name)
-                table.add_rule(name, invalid_rule)
-                table.add_rule(name, est_rule)
-
-        for rule in fw_rules_list:
-            if not rule['enabled']:
-                continue
-            iptbl_rule = self._convert_fwaas_to_iptables_rule(rule)
-            if rule['ip_version'] == 4:
-                ver = IPV4
-                table = ipt_mgr.ipv4['filter']
-            else:
-                ver = IPV6
-                table = ipt_mgr.ipv6['filter']
-            ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION)
-            ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION)
-            table.add_rule(ichain_name, iptbl_rule)
-            table.add_rule(ochain_name, iptbl_rule)
-        self._enable_policy_chain(fwid, ipt_if_prefix)
-
-    def _remove_default_chains(self, nsid):
-        """Remove fwaas default policy chain."""
-        self._remove_chain_by_name(IPV4, FWAAS_DEFAULT_CHAIN, nsid)
-        self._remove_chain_by_name(IPV6, FWAAS_DEFAULT_CHAIN, nsid)
-
-    def _remove_chains(self, fwid, ipt_mgr):
-        """Remove fwaas policy chain."""
-        for ver in [IPV4, IPV6]:
-            for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
-                chain_name = self._get_chain_name(fwid, ver, direction)
-                self._remove_chain_by_name(ver, chain_name, ipt_mgr)
-
-    def _add_default_policy_chain_v4v6(self, ipt_mgr):
-        ipt_mgr.ipv4['filter'].add_chain(FWAAS_DEFAULT_CHAIN)
-        ipt_mgr.ipv4['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP')
-        ipt_mgr.ipv6['filter'].add_chain(FWAAS_DEFAULT_CHAIN)
-        ipt_mgr.ipv6['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP')
-
-    def _remove_chain_by_name(self, ver, chain_name, ipt_mgr):
-        if ver == IPV4:
-            ipt_mgr.ipv4['filter'].remove_chain(chain_name)
-        else:
-            ipt_mgr.ipv6['filter'].remove_chain(chain_name)
-
-    def _add_rules_to_chain(self, ipt_mgr, ver, chain_name, rules):
-        if ver == IPV4:
-            table = ipt_mgr.ipv4['filter']
-        else:
-            table = ipt_mgr.ipv6['filter']
-        for rule in rules:
-            table.add_rule(chain_name, rule)
-
-    def _enable_policy_chain(self, fwid, ipt_if_prefix):
-        bname = iptables_manager.binary_name
-        ipt_mgr = ipt_if_prefix['ipt']
-        if_prefix = ipt_if_prefix['if_prefix']
-
-        for (ver, tbl) in [(IPV4, ipt_mgr.ipv4['filter']),
-                           (IPV6, ipt_mgr.ipv6['filter'])]:
-            for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
-                chain_name = self._get_chain_name(fwid, ver, direction)
-                chain_name = iptables_manager.get_chain_name(chain_name)
-                if chain_name in tbl.chains:
-                    jump_rule = ['%s %s+ -j %s-%s' % (IPTABLES_DIR[direction],
-                        if_prefix, bname, chain_name)]
-                    self._add_rules_to_chain(ipt_mgr,
-                        ver, 'FORWARD', jump_rule)
-
-        #jump to DROP_ALL policy
-        chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN)
-        jump_rule = ['-o %s+ -j %s-%s' % (if_prefix, bname, chain_name)]
-        self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule)
-        self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule)
-
-        #jump to DROP_ALL policy
-        chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN)
-        jump_rule = ['-i %s+ -j %s-%s' % (if_prefix, bname, chain_name)]
-        self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule)
-        self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule)
-
-    def _convert_fwaas_to_iptables_rule(self, rule):
-        action = 'ACCEPT' if rule.get('action') == 'allow' else 'DROP'
-        args = [self._protocol_arg(rule.get('protocol')),
-                self._port_arg('dport',
-                               rule.get('protocol'),
-                               rule.get('destination_port')),
-                self._port_arg('sport',
-                               rule.get('protocol'),
-                               rule.get('source_port')),
-                self._ip_prefix_arg('s', rule.get('source_ip_address')),
-                self._ip_prefix_arg('d', rule.get('destination_ip_address')),
-                self._action_arg(action)]
-
-        iptables_rule = ' '.join(args)
-        return iptables_rule
-
-    def _drop_invalid_packets_rule(self):
-        return '-m state --state INVALID -j DROP'
-
-    def _allow_established_rule(self):
-        return '-m state --state ESTABLISHED,RELATED -j ACCEPT'
-
-    def _action_arg(self, action):
-        if action:
-            return '-j %s' % action
-        return ''
-
-    def _protocol_arg(self, protocol):
-        if protocol:
-            return '-p %s' % protocol
-        return ''
-
-    def _port_arg(self, direction, protocol, port):
-        if not (protocol in ['udp', 'tcp'] and port):
-            return ''
-        return '--%s %s' % (direction, port)
-
-    def _ip_prefix_arg(self, direction, ip_prefix):
-        if ip_prefix:
-            return '-%s %s' % (direction, ip_prefix)
-        return ''
diff --git a/neutron/services/firewall/drivers/varmour/__init__.py b/neutron/services/firewall/drivers/varmour/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/services/firewall/drivers/varmour/varmour_fwaas.py b/neutron/services/firewall/drivers/varmour/varmour_fwaas.py
deleted file mode 100755 (executable)
index d0ecba8..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-from neutron.services.firewall.agents.varmour import varmour_api
-from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
-from neutron.services.firewall.drivers import fwaas_base
-
-LOG = logging.getLogger(__name__)
-
-
-class vArmourFwaasDriver(fwaas_base.FwaasDriverBase):
-    def __init__(self):
-        LOG.debug("Initializing fwaas vArmour driver")
-
-        self.rest = varmour_api.vArmourRestAPI()
-
-    def create_firewall(self, apply_list, firewall):
-        LOG.debug('create_firewall (%s)', firewall['id'])
-
-        return self.update_firewall(apply_list, firewall)
-
-    def update_firewall(self, apply_list, firewall):
-        LOG.debug("update_firewall (%s)", firewall['id'])
-
-        if firewall['admin_state_up']:
-            return self._update_firewall(apply_list, firewall)
-        else:
-            return self.apply_default_policy(apply_list, firewall)
-
-    def delete_firewall(self, apply_list, firewall):
-        LOG.debug("delete_firewall (%s)", firewall['id'])
-
-        return self.apply_default_policy(apply_list, firewall)
-
-    def apply_default_policy(self, apply_list, firewall):
-        LOG.debug("apply_default_policy (%s)", firewall['id'])
-
-        self.rest.auth()
-
-        for ri in apply_list:
-            self._clear_policy(ri, firewall)
-
-        return True
-
-    def _update_firewall(self, apply_list, firewall):
-        LOG.debug("Updating firewall (%s)", firewall['id'])
-
-        self.rest.auth()
-
-        for ri in apply_list:
-            self._clear_policy(ri, firewall)
-            self._setup_policy(ri, firewall)
-
-        return True
-
-    def _setup_policy(self, ri, fw):
-        # create zones no matter if they exist. Interfaces are added by router
-        body = {
-            'type': 'L3',
-            'interface': []
-        }
-
-        body['name'] = va_utils.get_trusted_zone_name(ri)
-        self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
-        body['name'] = va_utils.get_untrusted_zone_name(ri)
-        self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
-        self.rest.commit()
-
-        servs = dict()
-        addrs = dict()
-        for rule in fw['firewall_rule_list']:
-            if not rule['enabled']:
-                continue
-
-            if rule['ip_version'] == 4:
-                service = self._make_service(ri, fw, rule, servs)
-                s_addr = self._make_address(ri, fw, rule, addrs, True)
-                d_addr = self._make_address(ri, fw, rule, addrs, False)
-
-                policy = va_utils.get_firewall_policy_name(ri, fw, rule)
-                z0 = va_utils.get_trusted_zone_name(ri)
-                z1 = va_utils.get_untrusted_zone_name(ri)
-                body = self._make_policy(policy + '_0', rule,
-                                         z0, z0, s_addr, d_addr, service)
-                self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
-                body = self._make_policy(policy + '_1', rule,
-                                         z0, z1, s_addr, d_addr, service)
-                self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
-                body = self._make_policy(policy + '_2', rule,
-                                         z1, z0, s_addr, d_addr, service)
-                self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
-
-                self.rest.commit()
-            else:
-                LOG.warn(_LW("Unsupported IP version rule."))
-
-    def _clear_policy(self, ri, fw):
-        prefix = va_utils.get_firewall_object_prefix(ri, fw)
-        self.rest.del_cfg_objs(va_utils.REST_URL_CONF_POLICY, prefix)
-        self.rest.del_cfg_objs(va_utils.REST_URL_CONF_ADDR, prefix)
-        self.rest.del_cfg_objs(va_utils.REST_URL_CONF_SERVICE, prefix)
-
-    def _make_service(self, ri, fw, rule, servs):
-        prefix = va_utils.get_firewall_object_prefix(ri, fw)
-
-        if rule.get('protocol'):
-            key = rule.get('protocol')
-            if rule.get('source_port'):
-                key += '-' + rule.get('source_port')
-            if rule.get('destination_port'):
-                key += '-' + rule.get('destination_port')
-        else:
-            return
-
-        if key in servs:
-            name = '%s_%d' % (prefix, servs[key])
-        else:
-            # create new service object with index
-            idx = len(servs)
-            servs[key] = idx
-            name = '%s_%d' % (prefix, idx)
-
-            body = {'name': name}
-            self.rest.rest_api('POST',
-                               va_utils.REST_URL_CONF_SERVICE,
-                               body)
-            body = self._make_service_rule(rule)
-            self.rest.rest_api('POST',
-                               va_utils.REST_URL_CONF +
-                               va_utils.REST_SERVICE_NAME % name,
-                               body)
-            self.rest.commit()
-
-        return name
-
-    def _make_service_rule(self, rule):
-        body = {
-            'name': '1',
-            'protocol': rule.get('protocol')
-        }
-        if 'source_port' in rule:
-            body['source-start'] = rule['source_port']
-            body['source-end'] = rule['source_port']
-        if 'destination_port' in rule:
-            body['dest-start'] = rule['destination_port']
-            body['dest-end'] = rule['destination_port']
-
-        return body
-
-    def _make_address(self, ri, fw, rule, addrs, is_src):
-        prefix = va_utils.get_firewall_object_prefix(ri, fw)
-
-        if is_src:
-            key = rule.get('source_ip_address')
-        else:
-            key = rule.get('destination_ip_address')
-
-        if not key:
-            return
-
-        if key in addrs:
-            name = '%s_%d' % (prefix, addrs[key])
-        else:
-            # create new address object with idx
-            idx = len(addrs)
-            addrs[key] = idx
-            name = '%s_%d' % (prefix, idx)
-
-            body = {
-                'name': name,
-                'type': 'ipv4',
-                'ipv4': key
-            }
-            self.rest.rest_api('POST', va_utils.REST_URL_CONF_ADDR, body)
-            self.rest.commit()
-
-        return name
-
-    def _make_policy(self, name, rule, zone0, zone1, s_addr, d_addr, service):
-        body = {
-            'name': name,
-            'action': 'permit' if rule.get('action') == 'allow' else 'deny',
-            'from': zone0,
-            'to': zone1,
-            'match-source-address': [s_addr or 'Any'],
-            'match-dest-address': [d_addr or 'Any'],
-            'match-service': [service or 'Any']
-        }
-
-        return body
diff --git a/neutron/services/firewall/fwaas_plugin.py b/neutron/services/firewall/fwaas_plugin.py
deleted file mode 100644 (file)
index fa03eb3..0000000
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright 2013 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-from oslo import messaging
-
-from neutron.common import exceptions as n_exception
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron import context as neutron_context
-from neutron.db.firewall import firewall_db
-from neutron.extensions import firewall as fw_ext
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants as const
-
-
-LOG = logging.getLogger(__name__)
-
-
-class FirewallCallbacks(object):
-    target = messaging.Target(version='1.0')
-
-    def __init__(self, plugin):
-        super(FirewallCallbacks, self).__init__()
-        self.plugin = plugin
-
-    def set_firewall_status(self, context, firewall_id, status, **kwargs):
-        """Agent uses this to set a firewall's status."""
-        LOG.debug("set_firewall_status() called")
-        with context.session.begin(subtransactions=True):
-            fw_db = self.plugin._get_firewall(context, firewall_id)
-            # ignore changing status if firewall expects to be deleted
-            # That case means that while some pending operation has been
-            # performed on the backend, neutron server received delete request
-            # and changed firewall status to const.PENDING_DELETE
-            if fw_db.status == const.PENDING_DELETE:
-                LOG.debug("Firewall %(fw_id)s in PENDING_DELETE state, "
-                          "not changing to %(status)s",
-                          {'fw_id': firewall_id, 'status': status})
-                return False
-            if status in (const.ACTIVE, const.DOWN):
-                fw_db.status = status
-                return True
-            else:
-                fw_db.status = const.ERROR
-                return False
-
-    def firewall_deleted(self, context, firewall_id, **kwargs):
-        """Agent uses this to indicate firewall is deleted."""
-        LOG.debug("firewall_deleted() called")
-        with context.session.begin(subtransactions=True):
-            fw_db = self.plugin._get_firewall(context, firewall_id)
-            # allow to delete firewalls in ERROR state
-            if fw_db.status in (const.PENDING_DELETE, const.ERROR):
-                self.plugin.delete_db_firewall_object(context, firewall_id)
-                return True
-            else:
-                LOG.warn(_LW('Firewall %(fw)s unexpectedly deleted by agent, '
-                             'status was %(status)s'),
-                         {'fw': firewall_id, 'status': fw_db.status})
-                fw_db.status = const.ERROR
-                return False
-
-    def get_firewalls_for_tenant(self, context, **kwargs):
-        """Agent uses this to get all firewalls and rules for a tenant."""
-        LOG.debug("get_firewalls_for_tenant() called")
-        fw_list = [
-            self.plugin._make_firewall_dict_with_rules(context, fw['id'])
-            for fw in self.plugin.get_firewalls(context)
-        ]
-        return fw_list
-
-    def get_firewalls_for_tenant_without_rules(self, context, **kwargs):
-        """Agent uses this to get all firewalls for a tenant."""
-        LOG.debug("get_firewalls_for_tenant_without_rules() called")
-        fw_list = [fw for fw in self.plugin.get_firewalls(context)]
-        return fw_list
-
-    def get_tenants_with_firewalls(self, context, **kwargs):
-        """Agent uses this to get all tenants that have firewalls."""
-        LOG.debug("get_tenants_with_firewalls() called")
-        ctx = neutron_context.get_admin_context()
-        fw_list = self.plugin.get_firewalls(ctx)
-        fw_tenant_list = list(set(fw['tenant_id'] for fw in fw_list))
-        return fw_tenant_list
-
-
-class FirewallAgentApi(object):
-    """Plugin side of plugin to agent RPC API."""
-
-    def __init__(self, topic, host):
-        self.host = host
-        target = messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def create_firewall(self, context, firewall):
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, 'create_firewall', firewall=firewall,
-                   host=self.host)
-
-    def update_firewall(self, context, firewall):
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, 'update_firewall', firewall=firewall,
-                   host=self.host)
-
-    def delete_firewall(self, context, firewall):
-        cctxt = self.client.prepare(fanout=True)
-        cctxt.cast(context, 'delete_firewall', firewall=firewall,
-                   host=self.host)
-
-
-class FirewallCountExceeded(n_exception.Conflict):
-
-    """Reference implementation specific exception for firewall count.
-
-    Only one firewall is supported per tenant. When a second
-    firewall is tried to be created, this exception will be raised.
-    """
-    message = _("Exceeded allowed count of firewalls for tenant "
-                "%(tenant_id)s. Only one firewall is supported per tenant.")
-
-
-class FirewallPlugin(firewall_db.Firewall_db_mixin):
-
-    """Implementation of the Neutron Firewall Service Plugin.
-
-    This class manages the workflow of FWaaS request/response.
-    Most DB related works are implemented in class
-    firewall_db.Firewall_db_mixin.
-    """
-    supported_extension_aliases = ["fwaas"]
-
-    def __init__(self):
-        """Do the initialization for the firewall service plugin here."""
-
-        self.endpoints = [FirewallCallbacks(self)]
-
-        self.conn = n_rpc.create_connection(new=True)
-        self.conn.create_consumer(
-            topics.FIREWALL_PLUGIN, self.endpoints, fanout=False)
-        self.conn.consume_in_threads()
-
-        self.agent_rpc = FirewallAgentApi(
-            topics.L3_AGENT,
-            cfg.CONF.host
-        )
-
-    def _make_firewall_dict_with_rules(self, context, firewall_id):
-        firewall = self.get_firewall(context, firewall_id)
-        fw_policy_id = firewall['firewall_policy_id']
-        if fw_policy_id:
-            fw_policy = self.get_firewall_policy(context, fw_policy_id)
-            fw_rules_list = [self.get_firewall_rule(
-                context, rule_id) for rule_id in fw_policy['firewall_rules']]
-            firewall['firewall_rule_list'] = fw_rules_list
-        else:
-            firewall['firewall_rule_list'] = []
-        # FIXME(Sumit): If the size of the firewall object we are creating
-        # here exceeds the largest message size supported by rabbit/qpid
-        # then we will have a problem.
-        return firewall
-
-    def _rpc_update_firewall(self, context, firewall_id):
-        status_update = {"firewall": {"status": const.PENDING_UPDATE}}
-        super(FirewallPlugin, self).update_firewall(context, firewall_id,
-                                                    status_update)
-        fw_with_rules = self._make_firewall_dict_with_rules(context,
-                                                            firewall_id)
-        self.agent_rpc.update_firewall(context, fw_with_rules)
-
-    def _rpc_update_firewall_policy(self, context, firewall_policy_id):
-        firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
-        if firewall_policy:
-            for firewall_id in firewall_policy['firewall_list']:
-                self._rpc_update_firewall(context, firewall_id)
-
-    def _ensure_update_firewall(self, context, firewall_id):
-        fwall = self.get_firewall(context, firewall_id)
-        if fwall['status'] in [const.PENDING_CREATE,
-                               const.PENDING_UPDATE,
-                               const.PENDING_DELETE]:
-            raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
-                                                pending_state=fwall['status'])
-
-    def _ensure_update_firewall_policy(self, context, firewall_policy_id):
-        firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
-        if firewall_policy and 'firewall_list' in firewall_policy:
-            for firewall_id in firewall_policy['firewall_list']:
-                self._ensure_update_firewall(context, firewall_id)
-
-    def _ensure_update_firewall_rule(self, context, firewall_rule_id):
-        fw_rule = self.get_firewall_rule(context, firewall_rule_id)
-        if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']:
-            self._ensure_update_firewall_policy(context,
-                                                fw_rule['firewall_policy_id'])
-
-    def create_firewall(self, context, firewall):
-        LOG.debug("create_firewall() called")
-        tenant_id = self._get_tenant_id_for_create(context,
-                                                   firewall['firewall'])
-        fw_count = self.get_firewalls_count(context,
-                                            filters={'tenant_id': [tenant_id]})
-        if fw_count:
-            raise FirewallCountExceeded(tenant_id=tenant_id)
-        fw = super(FirewallPlugin, self).create_firewall(context, firewall)
-        fw_with_rules = (
-            self._make_firewall_dict_with_rules(context, fw['id']))
-        self.agent_rpc.create_firewall(context, fw_with_rules)
-        return fw
-
-    def update_firewall(self, context, id, firewall):
-        LOG.debug("update_firewall() called")
-        self._ensure_update_firewall(context, id)
-        firewall['firewall']['status'] = const.PENDING_UPDATE
-        fw = super(FirewallPlugin, self).update_firewall(context, id, firewall)
-        fw_with_rules = (
-            self._make_firewall_dict_with_rules(context, fw['id']))
-        self.agent_rpc.update_firewall(context, fw_with_rules)
-        return fw
-
-    def delete_db_firewall_object(self, context, id):
-        firewall = self.get_firewall(context, id)
-        if firewall['status'] == const.PENDING_DELETE:
-            super(FirewallPlugin, self).delete_firewall(context, id)
-
-    def delete_firewall(self, context, id):
-        LOG.debug("delete_firewall() called")
-        status_update = {"firewall": {"status": const.PENDING_DELETE}}
-        fw = super(FirewallPlugin, self).update_firewall(context, id,
-                                                         status_update)
-        fw_with_rules = (
-            self._make_firewall_dict_with_rules(context, fw['id']))
-        self.agent_rpc.delete_firewall(context, fw_with_rules)
-
-    def update_firewall_policy(self, context, id, firewall_policy):
-        LOG.debug("update_firewall_policy() called")
-        self._ensure_update_firewall_policy(context, id)
-        fwp = super(FirewallPlugin,
-                    self).update_firewall_policy(context, id, firewall_policy)
-        self._rpc_update_firewall_policy(context, id)
-        return fwp
-
-    def update_firewall_rule(self, context, id, firewall_rule):
-        LOG.debug("update_firewall_rule() called")
-        self._ensure_update_firewall_rule(context, id)
-        fwr = super(FirewallPlugin,
-                    self).update_firewall_rule(context, id, firewall_rule)
-        firewall_policy_id = fwr['firewall_policy_id']
-        if firewall_policy_id:
-            self._rpc_update_firewall_policy(context, firewall_policy_id)
-        return fwr
-
-    def insert_rule(self, context, id, rule_info):
-        LOG.debug("insert_rule() called")
-        self._ensure_update_firewall_policy(context, id)
-        fwp = super(FirewallPlugin,
-                    self).insert_rule(context, id, rule_info)
-        self._rpc_update_firewall_policy(context, id)
-        return fwp
-
-    def remove_rule(self, context, id, rule_info):
-        LOG.debug("remove_rule() called")
-        self._ensure_update_firewall_policy(context, id)
-        fwp = super(FirewallPlugin,
-                    self).remove_rule(context, id, rule_info)
-        self._rpc_update_firewall_policy(context, id)
-        return fwp
diff --git a/neutron/services/loadbalancer/agent/__init__.py b/neutron/services/loadbalancer/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/agent/agent.py b/neutron/services/loadbalancer/agent/agent.py
deleted file mode 100644 (file)
index bc7bfe2..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-import eventlet
-eventlet.monkey_patch()
-
-from oslo.config import cfg
-
-from neutron.agent.common import config
-from neutron.agent.linux import interface
-from neutron.common import config as common_config
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.openstack.common import service
-from neutron.services.loadbalancer.agent import agent_manager as manager
-
-OPTS = [
-    cfg.IntOpt(
-        'periodic_interval',
-        default=10,
-        help=_('Seconds between periodic task runs')
-    )
-]
-
-
-class LbaasAgentService(n_rpc.Service):
-    def start(self):
-        super(LbaasAgentService, self).start()
-        self.tg.add_timer(
-            cfg.CONF.periodic_interval,
-            self.manager.run_periodic_tasks,
-            None,
-            None
-        )
-
-
-def main():
-    cfg.CONF.register_opts(OPTS)
-    cfg.CONF.register_opts(manager.OPTS)
-    # import interface options just in case the driver uses namespaces
-    cfg.CONF.register_opts(interface.OPTS)
-    config.register_interface_driver_opts_helper(cfg.CONF)
-    config.register_agent_state_opts_helper(cfg.CONF)
-    config.register_root_helper(cfg.CONF)
-
-    common_config.init(sys.argv[1:])
-    config.setup_logging()
-
-    mgr = manager.LbaasAgentManager(cfg.CONF)
-    svc = LbaasAgentService(
-        host=cfg.CONF.host,
-        topic=topics.LOADBALANCER_AGENT,
-        manager=mgr
-    )
-    service.launch(svc).wait()
diff --git a/neutron/services/loadbalancer/agent/agent_api.py b/neutron/services/loadbalancer/agent/agent_api.py
deleted file mode 100644 (file)
index 83c5129..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo import messaging
-
-from neutron.common import rpc as n_rpc
-
-
-class LbaasAgentApi(object):
-    """Agent side of the Agent to Plugin RPC API."""
-
-    # history
-    #   1.0 Initial version
-    #   2.0 Generic API for agent based drivers
-    #       - get_logical_device() handling changed on plugin side;
-    #       - pool_deployed() and update_status() methods added;
-
-    def __init__(self, topic, context, host):
-        self.context = context
-        self.host = host
-        target = messaging.Target(topic=topic, version='2.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_ready_devices(self):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'get_ready_devices', host=self.host)
-
-    def pool_destroyed(self, pool_id):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'pool_destroyed', pool_id=pool_id)
-
-    def pool_deployed(self, pool_id):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'pool_deployed', pool_id=pool_id)
-
-    def get_logical_device(self, pool_id):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'get_logical_device', pool_id=pool_id)
-
-    def update_status(self, obj_type, obj_id, status):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'update_status', obj_type=obj_type,
-                          obj_id=obj_id, status=status)
-
-    def plug_vip_port(self, port_id):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'plug_vip_port', port_id=port_id,
-                          host=self.host)
-
-    def unplug_vip_port(self, port_id):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'unplug_vip_port', port_id=port_id,
-                          host=self.host)
-
-    def update_pool_stats(self, pool_id, stats):
-        cctxt = self.client.prepare()
-        return cctxt.call(self.context, 'update_pool_stats', pool_id=pool_id,
-                          stats=stats, host=self.host)
diff --git a/neutron/services/loadbalancer/agent/agent_device_driver.py b/neutron/services/loadbalancer/agent/agent_device_driver.py
deleted file mode 100644 (file)
index ad65bcf..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2013 OpenStack Foundation.  All rights reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AgentDeviceDriver(object):
-    """Abstract device driver that defines the API required by LBaaS agent."""
-
-    @abc.abstractmethod
-    def get_name(cls):
-        """Returns unique name across all LBaaS device drivers."""
-        pass
-
-    @abc.abstractmethod
-    def deploy_instance(self, logical_config):
-        """Fully deploys a loadbalancer instance from a given config."""
-        pass
-
-    @abc.abstractmethod
-    def undeploy_instance(self, pool_id):
-        """Fully undeploys the loadbalancer instance."""
-        pass
-
-    @abc.abstractmethod
-    def get_stats(self, pool_id):
-        pass
-
-    def remove_orphans(self, known_pool_ids):
-        # Not all drivers will support this
-        raise NotImplementedError()
-
-    @abc.abstractmethod
-    def create_vip(self, vip):
-        pass
-
-    @abc.abstractmethod
-    def update_vip(self, old_vip, vip):
-        pass
-
-    @abc.abstractmethod
-    def delete_vip(self, vip):
-        pass
-
-    @abc.abstractmethod
-    def create_pool(self, pool):
-        pass
-
-    @abc.abstractmethod
-    def update_pool(self, old_pool, pool):
-        pass
-
-    @abc.abstractmethod
-    def delete_pool(self, pool):
-        pass
-
-    @abc.abstractmethod
-    def create_member(self, member):
-        pass
-
-    @abc.abstractmethod
-    def update_member(self, old_member, member):
-        pass
-
-    @abc.abstractmethod
-    def delete_member(self, member):
-        pass
-
-    @abc.abstractmethod
-    def create_pool_health_monitor(self, health_monitor, pool_id):
-        pass
-
-    @abc.abstractmethod
-    def update_pool_health_monitor(self,
-                                   old_health_monitor,
-                                   health_monitor,
-                                   pool_id):
-        pass
-
-    @abc.abstractmethod
-    def delete_pool_health_monitor(self, health_monitor, pool_id):
-        pass
diff --git a/neutron/services/loadbalancer/agent/agent_manager.py b/neutron/services/loadbalancer/agent/agent_manager.py
deleted file mode 100644 (file)
index 1f39128..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-from oslo import messaging
-from oslo.utils import importutils
-
-from neutron.agent import rpc as agent_rpc
-from neutron.common import constants as n_const
-from neutron.common import exceptions as n_exc
-from neutron.common import topics
-from neutron import context
-from neutron.i18n import _LE, _LI
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import loopingcall
-from neutron.openstack.common import periodic_task
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.agent import agent_api
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
-    cfg.MultiStrOpt(
-        'device_driver',
-        default=['neutron.services.loadbalancer.drivers'
-                 '.haproxy.namespace_driver.HaproxyNSDriver'],
-        help=_('Drivers used to manage loadbalancing devices'),
-    ),
-]
-
-
-class DeviceNotFoundOnAgent(n_exc.NotFound):
-    msg = _('Unknown device with pool_id %(pool_id)s')
-
-
-class LbaasAgentManager(periodic_task.PeriodicTasks):
-
-    # history
-    #   1.0 Initial version
-    #   1.1 Support agent_updated call
-    #   2.0 Generic API for agent based drivers
-    #       - modify/reload/destroy_pool methods were removed;
-    #       - added methods to handle create/update/delete for every lbaas
-    #       object individually;
-    target = messaging.Target(version='2.0')
-
-    def __init__(self, conf):
-        super(LbaasAgentManager, self).__init__()
-        self.conf = conf
-        self.context = context.get_admin_context_without_session()
-        self.plugin_rpc = agent_api.LbaasAgentApi(
-            topics.LOADBALANCER_PLUGIN,
-            self.context,
-            self.conf.host
-        )
-        self._load_drivers()
-
-        self.agent_state = {
-            'binary': 'neutron-lbaas-agent',
-            'host': conf.host,
-            'topic': topics.LOADBALANCER_AGENT,
-            'configurations': {'device_drivers': self.device_drivers.keys()},
-            'agent_type': n_const.AGENT_TYPE_LOADBALANCER,
-            'start_flag': True}
-        self.admin_state_up = True
-
-        self._setup_state_rpc()
-        self.needs_resync = False
-        # pool_id->device_driver_name mapping used to store known instances
-        self.instance_mapping = {}
-
-    def _load_drivers(self):
-        self.device_drivers = {}
-        for driver in self.conf.device_driver:
-            try:
-                driver_inst = importutils.import_object(
-                    driver,
-                    self.conf,
-                    self.plugin_rpc
-                )
-            except ImportError:
-                msg = _('Error importing loadbalancer device driver: %s')
-                raise SystemExit(msg % driver)
-
-            driver_name = driver_inst.get_name()
-            if driver_name not in self.device_drivers:
-                self.device_drivers[driver_name] = driver_inst
-            else:
-                msg = _('Multiple device drivers with the same name found: %s')
-                raise SystemExit(msg % driver_name)
-
-    def _setup_state_rpc(self):
-        self.state_rpc = agent_rpc.PluginReportStateAPI(
-            topics.LOADBALANCER_PLUGIN)
-        report_interval = self.conf.AGENT.report_interval
-        if report_interval:
-            heartbeat = loopingcall.FixedIntervalLoopingCall(
-                self._report_state)
-            heartbeat.start(interval=report_interval)
-
-    def _report_state(self):
-        try:
-            instance_count = len(self.instance_mapping)
-            self.agent_state['configurations']['instances'] = instance_count
-            self.state_rpc.report_state(self.context,
-                                        self.agent_state)
-            self.agent_state.pop('start_flag', None)
-        except Exception:
-            LOG.exception(_LE("Failed reporting state!"))
-
-    def initialize_service_hook(self, started_by):
-        self.sync_state()
-
-    @periodic_task.periodic_task
-    def periodic_resync(self, context):
-        if self.needs_resync:
-            self.needs_resync = False
-            self.sync_state()
-
-    @periodic_task.periodic_task(spacing=6)
-    def collect_stats(self, context):
-        for pool_id, driver_name in self.instance_mapping.items():
-            driver = self.device_drivers[driver_name]
-            try:
-                stats = driver.get_stats(pool_id)
-                if stats:
-                    self.plugin_rpc.update_pool_stats(pool_id, stats)
-            except Exception:
-                LOG.exception(_LE('Error updating statistics on pool %s'),
-                              pool_id)
-                self.needs_resync = True
-
-    def sync_state(self):
-        known_instances = set(self.instance_mapping.keys())
-        try:
-            ready_instances = set(self.plugin_rpc.get_ready_devices())
-
-            for deleted_id in known_instances - ready_instances:
-                self._destroy_pool(deleted_id)
-
-            for pool_id in ready_instances:
-                self._reload_pool(pool_id)
-
-        except Exception:
-            LOG.exception(_LE('Unable to retrieve ready devices'))
-            self.needs_resync = True
-
-        self.remove_orphans()
-
-    def _get_driver(self, pool_id):
-        if pool_id not in self.instance_mapping:
-            raise DeviceNotFoundOnAgent(pool_id=pool_id)
-
-        driver_name = self.instance_mapping[pool_id]
-        return self.device_drivers[driver_name]
-
-    def _reload_pool(self, pool_id):
-        try:
-            logical_config = self.plugin_rpc.get_logical_device(pool_id)
-            driver_name = logical_config['driver']
-            if driver_name not in self.device_drivers:
-                LOG.error(_LE('No device driver on agent: %s.'), driver_name)
-                self.plugin_rpc.update_status(
-                    'pool', pool_id, constants.ERROR)
-                return
-
-            self.device_drivers[driver_name].deploy_instance(logical_config)
-            self.instance_mapping[pool_id] = driver_name
-            self.plugin_rpc.pool_deployed(pool_id)
-        except Exception:
-            LOG.exception(_LE('Unable to deploy instance for pool: %s'),
-                          pool_id)
-            self.needs_resync = True
-
-    def _destroy_pool(self, pool_id):
-        driver = self._get_driver(pool_id)
-        try:
-            driver.undeploy_instance(pool_id)
-            del self.instance_mapping[pool_id]
-            self.plugin_rpc.pool_destroyed(pool_id)
-        except Exception:
-            LOG.exception(_LE('Unable to destroy device for pool: %s'),
-                          pool_id)
-            self.needs_resync = True
-
-    def remove_orphans(self):
-        for driver_name in self.device_drivers:
-            pool_ids = [pool_id for pool_id in self.instance_mapping
-                        if self.instance_mapping[pool_id] == driver_name]
-            try:
-                self.device_drivers[driver_name].remove_orphans(pool_ids)
-            except NotImplementedError:
-                pass  # Not all drivers will support this
-
-    def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
-        LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device '
-                          'driver %(driver)s'),
-                      {'operation': operation.capitalize(), 'obj': obj_type,
-                       'id': obj_id, 'driver': driver})
-        self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
-
-    def create_vip(self, context, vip):
-        driver = self._get_driver(vip['pool_id'])
-        try:
-            driver.create_vip(vip)
-        except Exception:
-            self._handle_failed_driver_call('create', 'vip', vip['id'],
-                                            driver.get_name())
-        else:
-            self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
-
-    def update_vip(self, context, old_vip, vip):
-        driver = self._get_driver(vip['pool_id'])
-        try:
-            driver.update_vip(old_vip, vip)
-        except Exception:
-            self._handle_failed_driver_call('update', 'vip', vip['id'],
-                                            driver.get_name())
-        else:
-            self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
-
-    def delete_vip(self, context, vip):
-        driver = self._get_driver(vip['pool_id'])
-        driver.delete_vip(vip)
-
-    def create_pool(self, context, pool, driver_name):
-        if driver_name not in self.device_drivers:
-            LOG.error(_LE('No device driver on agent: %s.'), driver_name)
-            self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
-            return
-
-        driver = self.device_drivers[driver_name]
-        try:
-            driver.create_pool(pool)
-        except Exception:
-            self._handle_failed_driver_call('create', 'pool', pool['id'],
-                                            driver.get_name())
-        else:
-            self.instance_mapping[pool['id']] = driver_name
-            self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
-
-    def update_pool(self, context, old_pool, pool):
-        driver = self._get_driver(pool['id'])
-        try:
-            driver.update_pool(old_pool, pool)
-        except Exception:
-            self._handle_failed_driver_call('update', 'pool', pool['id'],
-                                            driver.get_name())
-        else:
-            self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
-
-    def delete_pool(self, context, pool):
-        driver = self._get_driver(pool['id'])
-        driver.delete_pool(pool)
-        del self.instance_mapping[pool['id']]
-
-    def create_member(self, context, member):
-        driver = self._get_driver(member['pool_id'])
-        try:
-            driver.create_member(member)
-        except Exception:
-            self._handle_failed_driver_call('create', 'member', member['id'],
-                                            driver.get_name())
-        else:
-            self.plugin_rpc.update_status('member', member['id'],
-                                          constants.ACTIVE)
-
-    def update_member(self, context, old_member, member):
-        driver = self._get_driver(member['pool_id'])
-        try:
-            driver.update_member(old_member, member)
-        except Exception:
-            self._handle_failed_driver_call('update', 'member', member['id'],
-                                            driver.get_name())
-        else:
-            self.plugin_rpc.update_status('member', member['id'],
-                                          constants.ACTIVE)
-
-    def delete_member(self, context, member):
-        driver = self._get_driver(member['pool_id'])
-        driver.delete_member(member)
-
-    def create_pool_health_monitor(self, context, health_monitor, pool_id):
-        driver = self._get_driver(pool_id)
-        assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
-        try:
-            driver.create_pool_health_monitor(health_monitor, pool_id)
-        except Exception:
-            self._handle_failed_driver_call(
-                'create', 'health_monitor', assoc_id, driver.get_name())
-        else:
-            self.plugin_rpc.update_status(
-                'health_monitor', assoc_id, constants.ACTIVE)
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor, pool_id):
-        driver = self._get_driver(pool_id)
-        assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
-        try:
-            driver.update_pool_health_monitor(old_health_monitor,
-                                              health_monitor,
-                                              pool_id)
-        except Exception:
-            self._handle_failed_driver_call(
-                'update', 'health_monitor', assoc_id, driver.get_name())
-        else:
-            self.plugin_rpc.update_status(
-                'health_monitor', assoc_id, constants.ACTIVE)
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
-        driver = self._get_driver(pool_id)
-        driver.delete_pool_health_monitor(health_monitor, pool_id)
-
-    def agent_updated(self, context, payload):
-        """Handle the agent_updated notification event."""
-        if payload['admin_state_up'] != self.admin_state_up:
-            self.admin_state_up = payload['admin_state_up']
-            if self.admin_state_up:
-                self.needs_resync = True
-            else:
-                for pool_id in self.instance_mapping.keys():
-                    LOG.info(_LI("Destroying pool %s due to agent disabling"),
-                             pool_id)
-                    self._destroy_pool(pool_id)
-            LOG.info(_LI("Agent_updated by server side %s!"), payload)
diff --git a/neutron/services/loadbalancer/drivers/a10networks/README.txt b/neutron/services/loadbalancer/drivers/a10networks/README.txt
deleted file mode 100644 (file)
index 81c2854..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-A10 Networks LBaaS Driver
-
-Installation info:
-
-To use this driver, you must:
-- Install the a10-neutron-lbaas module. (E.g.: 'pip install a10-neutron-lbaas')
-- Create a driver config file, a sample of which is given below.
-- Enable it in neutron.conf
-- Restart neutron-server
-
-Third-party CI info:
-
-Contact info for any problems is: a10-openstack-ci at a10networks dot com
-Or contact Doug Wiegley directly (IRC: dougwig)
-
-Configuration file:
-
-Create a configuration file with a list of A10 appliances, similar to the
-file below, located at:
-/etc/neutron/services/loadbalancer/a10networks/config.py
-
-Or you can override that directory by setting the environment
-variable A10_CONFIG_DIR.
-
-Example config file:
-
-devices = {
-    "ax1": {
-        "name": "ax1",
-        "host": "10.10.100.20",
-        "port": 443,
-        "protocol": "https",
-        "username": "admin",
-        "password": "a10",
-        "status": True,
-        "autosnat": False,
-        "api_version": "2.1",
-        "v_method": "LSI",
-        "max_instance": 5000,
-        "use_float": False,
-        "method": "hash"
-    },
-    "ax4": {
-        "host": "10.10.100.23",
-        "username": "admin",
-        "password": "a10",
-    },
-}
diff --git a/neutron/services/loadbalancer/drivers/a10networks/__init__.py b/neutron/services/loadbalancer/drivers/a10networks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/a10networks/driver_v1.py b/neutron/services/loadbalancer/drivers/a10networks/driver_v1.py
deleted file mode 100644 (file)
index 4168193..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import a10_neutron_lbaas
-
-from neutron.db import l3_db
-from neutron.db.loadbalancer import loadbalancer_db as lb_db
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers import abstract_driver
-
-VERSION = "1.0.0"
-LOG = logging.getLogger(__name__)
-
-
-# Most driver calls below are straight passthroughs to the A10 package
-# 'a10_neutron_lbaas'.  Any function that has not been fully abstracted
-# into the openstack driver/plugin interface is NOT passed through, to
-# make it obvious which hidden interfaces/db calls that we rely on.
-
-class ThunderDriver(abstract_driver.LoadBalancerAbstractDriver):
-
-    def __init__(self, plugin):
-        LOG.debug("A10Driver: init version=%s", VERSION)
-        self.plugin = plugin
-
-        # Map the string types to neutron classes/functions, in order to keep
-        # from reaching into the bowels of Neutron from anywhere but this file.
-        self.neutron_map = {
-            'member': {
-                'model': lb_db.Member,
-                'delete_func': self.plugin._delete_db_member,
-            },
-            'pool': {
-                'model': lb_db.Pool,
-                'delete_func': self.plugin._delete_db_pool,
-            },
-            'vip': {
-                'model': lb_db.Vip,
-                'delete_func': self.plugin._delete_db_vip,
-            },
-        }
-
-        LOG.debug("A10Driver: initializing, version=%s, lbaas_manager=%s",
-                  VERSION, a10_neutron_lbaas.VERSION)
-
-        self.a10 = a10_neutron_lbaas.A10OpenstackLBV1(self)
-
-    # The following private helper methods are used by a10_neutron_lbaas,
-    # and reflect the neutron interfaces required by that package.
-
-    def _hm_binding_count(self, context, hm_id):
-        return context.session.query(lb_db.PoolMonitorAssociation).filter_by(
-            monitor_id=hm_id).join(lb_db.Pool).count()
-
-    def _member_count(self, context, member):
-        return context.session.query(lb_db.Member).filter_by(
-            tenant_id=member['tenant_id'],
-            address=member['address']).count()
-
-    def _member_get(self, context, member_id):
-        return self.plugin.get_member(context, member_id)
-
-    def _member_get_ip(self, context, member, use_float=False):
-        ip_address = member['address']
-        if use_float:
-            fip_qry = context.session.query(l3_db.FloatingIP)
-            if (fip_qry.filter_by(fixed_ip_address=ip_address).count() > 0):
-                float_address = fip_qry.filter_by(
-                    fixed_ip_address=ip_address).first()
-                ip_address = str(float_address.floating_ip_address)
-        return ip_address
-
-    def _pool_get_hm(self, context, hm_id):
-        return self.plugin.get_health_monitor(context, hm_id)
-
-    def _pool_get_tenant_id(self, context, pool_id):
-        pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id)
-        z = pool_qry.first()
-        if z:
-            return z.tenant_id
-        else:
-            return ''
-
-    def _pool_get_vip_id(self, context, pool_id):
-        pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id)
-        z = pool_qry.first()
-        if z:
-            return z.vip_id
-        else:
-            return ''
-
-    def _pool_total(self, context, tenant_id):
-        return context.session.query(lb_db.Pool).filter_by(
-            tenant_id=tenant_id).count()
-
-    def _vip_get(self, context, vip_id):
-        return self.plugin.get_vip(context, vip_id)
-
-    def _active(self, context, model_type, model_id):
-        self.plugin.update_status(context,
-                                  self.neutron_map[model_type]['model'],
-                                  model_id,
-                                  constants.ACTIVE)
-
-    def _failed(self, context, model_type, model_id):
-        self.plugin.update_status(context,
-                                  self.neutron_map[model_type]['model'],
-                                  model_id,
-                                  constants.ERROR)
-
-    def _db_delete(self, context, model_type, model_id):
-        self.neutron_map[model_type]['delete_func'](context, model_id)
-
-    def _hm_active(self, context, hm_id, pool_id):
-        self.plugin.update_pool_health_monitor(context, hm_id, pool_id,
-                                               constants.ACTIVE)
-
-    def _hm_failed(self, context, hm_id, pool_id):
-        self.plugin.update_pool_health_monitor(context, hm_id, pool_id,
-                                               constants.ERROR)
-
-    def _hm_db_delete(self, context, hm_id, pool_id):
-        self.plugin._delete_db_pool_health_monitor(context, hm_id, pool_id)
-
-    # Pass-through driver
-
-    def create_vip(self, context, vip):
-        self.a10.vip.create(context, vip)
-
-    def update_vip(self, context, old_vip, vip):
-        self.a10.vip.update(context, old_vip, vip)
-
-    def delete_vip(self, context, vip):
-        self.a10.vip.delete(context, vip)
-
-    def create_pool(self, context, pool):
-        self.a10.pool.create(context, pool)
-
-    def update_pool(self, context, old_pool, pool):
-        self.a10.pool.update(context, old_pool, pool)
-
-    def delete_pool(self, context, pool):
-        self.a10.pool.delete(context, pool)
-
-    def stats(self, context, pool_id):
-        return self.a10.pool.stats(context, pool_id)
-
-    def create_member(self, context, member):
-        self.a10.member.create(context, member)
-
-    def update_member(self, context, old_member, member):
-        self.a10.member.update(context, old_member, member)
-
-    def delete_member(self, context, member):
-        self.a10.member.delete(context, member)
-
-    def update_pool_health_monitor(self, context, old_hm, hm, pool_id):
-        self.a10.hm.update(context, old_hm, hm, pool_id)
-
-    def create_pool_health_monitor(self, context, hm, pool_id):
-        self.a10.hm.create(context, hm, pool_id)
-
-    def delete_pool_health_monitor(self, context, hm, pool_id):
-        self.a10.hm.delete(context, hm, pool_id)
diff --git a/neutron/services/loadbalancer/drivers/common/__init__.py b/neutron/services/loadbalancer/drivers/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py
deleted file mode 100644 (file)
index 9d7f5f0..0000000
+++ /dev/null
@@ -1,440 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import uuid
-
-from oslo.config import cfg
-from oslo import messaging
-from oslo.utils import importutils
-
-from neutron.common import constants as q_const
-from neutron.common import exceptions as n_exc
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db import agents_db
-from neutron.db.loadbalancer import loadbalancer_db
-from neutron.extensions import lbaas_agentscheduler
-from neutron.extensions import portbindings
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers import abstract_driver
-
-LOG = logging.getLogger(__name__)
-
-AGENT_SCHEDULER_OPTS = [
-    cfg.StrOpt('loadbalancer_pool_scheduler_driver',
-               default='neutron.services.loadbalancer.agent_scheduler'
-                       '.ChanceScheduler',
-               help=_('Driver to use for scheduling '
-                      'pool to a default loadbalancer agent')),
-]
-
-cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS)
-
-
-class DriverNotSpecified(n_exc.NeutronException):
-    message = _("Device driver for agent should be specified "
-                "in plugin driver.")
-
-
-class LoadBalancerCallbacks(object):
-
-    # history
-    #   1.0 Initial version
-    #   2.0 Generic API for agent based drivers
-    #       - get_logical_device() handling changed;
-    #       - pool_deployed() and update_status() methods added;
-    target = messaging.Target(version='2.0')
-
-    def __init__(self, plugin):
-        super(LoadBalancerCallbacks, self).__init__()
-        self.plugin = plugin
-
-    def get_ready_devices(self, context, host=None):
-        with context.session.begin(subtransactions=True):
-            agents = self.plugin.get_lbaas_agents(context,
-                                                  filters={'host': [host]})
-            if not agents:
-                return []
-            elif len(agents) > 1:
-                LOG.warning(_LW('Multiple lbaas agents found on host %s'),
-                            host)
-            pools = self.plugin.list_pools_on_lbaas_agent(context,
-                                                          agents[0].id)
-            pool_ids = [pool['id'] for pool in pools['pools']]
-
-            qry = context.session.query(loadbalancer_db.Pool.id)
-            qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids))
-            qry = qry.filter(
-                loadbalancer_db.Pool.status.in_(
-                    constants.ACTIVE_PENDING_STATUSES))
-            up = True  # makes pep8 and sqlalchemy happy
-            qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
-            return [id for id, in qry]
-
-    def get_logical_device(self, context, pool_id=None):
-        with context.session.begin(subtransactions=True):
-            qry = context.session.query(loadbalancer_db.Pool)
-            qry = qry.filter_by(id=pool_id)
-            pool = qry.one()
-            retval = {}
-            retval['pool'] = self.plugin._make_pool_dict(pool)
-
-            if pool.vip:
-                retval['vip'] = self.plugin._make_vip_dict(pool.vip)
-                retval['vip']['port'] = (
-                    self.plugin._core_plugin._make_port_dict(pool.vip.port)
-                )
-                for fixed_ip in retval['vip']['port']['fixed_ips']:
-                    fixed_ip['subnet'] = (
-                        self.plugin._core_plugin.get_subnet(
-                            context,
-                            fixed_ip['subnet_id']
-                        )
-                    )
-            retval['members'] = [
-                self.plugin._make_member_dict(m)
-                for m in pool.members if (
-                    m.status in constants.ACTIVE_PENDING_STATUSES or
-                    m.status == constants.INACTIVE)
-            ]
-            retval['healthmonitors'] = [
-                self.plugin._make_health_monitor_dict(hm.healthmonitor)
-                for hm in pool.monitors
-                if hm.status in constants.ACTIVE_PENDING_STATUSES
-            ]
-            retval['driver'] = (
-                self.plugin.drivers[pool.provider.provider_name].device_driver)
-
-            return retval
-
-    def pool_deployed(self, context, pool_id):
-        with context.session.begin(subtransactions=True):
-            qry = context.session.query(loadbalancer_db.Pool)
-            qry = qry.filter_by(id=pool_id)
-            pool = qry.one()
-
-            # set all resources to active
-            if pool.status in constants.ACTIVE_PENDING_STATUSES:
-                pool.status = constants.ACTIVE
-
-            if (pool.vip and pool.vip.status in
-                    constants.ACTIVE_PENDING_STATUSES):
-                pool.vip.status = constants.ACTIVE
-
-            for m in pool.members:
-                if m.status in constants.ACTIVE_PENDING_STATUSES:
-                    m.status = constants.ACTIVE
-
-            for hm in pool.monitors:
-                if hm.status in constants.ACTIVE_PENDING_STATUSES:
-                    hm.status = constants.ACTIVE
-
-    def update_status(self, context, obj_type, obj_id, status):
-        model_mapping = {
-            'pool': loadbalancer_db.Pool,
-            'vip': loadbalancer_db.Vip,
-            'member': loadbalancer_db.Member,
-            'health_monitor': loadbalancer_db.PoolMonitorAssociation
-        }
-        if obj_type not in model_mapping:
-            raise n_exc.Invalid(_('Unknown object type: %s') % obj_type)
-        try:
-            if obj_type == 'health_monitor':
-                self.plugin.update_pool_health_monitor(
-                    context, obj_id['monitor_id'], obj_id['pool_id'], status)
-            else:
-                self.plugin.update_status(
-                    context, model_mapping[obj_type], obj_id, status)
-        except n_exc.NotFound:
-            # update_status may come from agent on an object which was
-            # already deleted from db with other request
-            LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s '
-                            'not found in the DB, it was probably deleted '
-                            'concurrently'),
-                        {'obj_type': obj_type, 'obj_id': obj_id})
-
-    def pool_destroyed(self, context, pool_id=None):
-        """Agent confirmation hook that a pool has been destroyed.
-
-        This method exists for subclasses to change the deletion
-        behavior.
-        """
-        pass
-
-    def plug_vip_port(self, context, port_id=None, host=None):
-        if not port_id:
-            return
-
-        try:
-            port = self.plugin._core_plugin.get_port(
-                context,
-                port_id
-            )
-        except n_exc.PortNotFound:
-            LOG.debug('Unable to find port %s to plug.', port_id)
-            return
-
-        port['admin_state_up'] = True
-        port['device_owner'] = 'neutron:' + constants.LOADBALANCER
-        port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
-        port[portbindings.HOST_ID] = host
-        self.plugin._core_plugin.update_port(
-            context,
-            port_id,
-            {'port': port}
-        )
-
-    def unplug_vip_port(self, context, port_id=None, host=None):
-        if not port_id:
-            return
-
-        try:
-            port = self.plugin._core_plugin.get_port(
-                context,
-                port_id
-            )
-        except n_exc.PortNotFound:
-            LOG.debug('Unable to find port %s to unplug. This can occur when '
-                      'the Vip has been deleted first.',
-                      port_id)
-            return
-
-        port['admin_state_up'] = False
-        port['device_owner'] = ''
-        port['device_id'] = ''
-
-        try:
-            self.plugin._core_plugin.update_port(
-                context,
-                port_id,
-                {'port': port}
-            )
-
-        except n_exc.PortNotFound:
-            LOG.debug('Unable to find port %s to unplug.  This can occur when '
-                      'the Vip has been deleted first.',
-                      port_id)
-
-    def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
-        self.plugin.update_pool_stats(context, pool_id, data=stats)
-
-
-class LoadBalancerAgentApi(object):
-    """Plugin side of plugin to agent RPC API."""
-
-    # history
-    #   1.0 Initial version
-    #   1.1 Support agent_updated call
-    #   2.0 Generic API for agent based drivers
-    #       - modify/reload/destroy_pool methods were removed;
-    #       - added methods to handle create/update/delete for every lbaas
-    #       object individually;
-
-    def __init__(self, topic):
-        target = messaging.Target(topic=topic, version='2.0')
-        self.client = n_rpc.get_client(target)
-
-    def create_vip(self, context, vip, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'create_vip', vip=vip)
-
-    def update_vip(self, context, old_vip, vip, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'update_vip', old_vip=old_vip, vip=vip)
-
-    def delete_vip(self, context, vip, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'delete_vip', vip=vip)
-
-    def create_pool(self, context, pool, host, driver_name):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'create_pool', pool=pool, driver_name=driver_name)
-
-    def update_pool(self, context, old_pool, pool, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'update_pool', old_pool=old_pool, pool=pool)
-
-    def delete_pool(self, context, pool, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'delete_pool', pool=pool)
-
-    def create_member(self, context, member, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'create_member', member=member)
-
-    def update_member(self, context, old_member, member, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'update_member', old_member=old_member,
-                   member=member)
-
-    def delete_member(self, context, member, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'delete_member', member=member)
-
-    def create_pool_health_monitor(self, context, health_monitor, pool_id,
-                                   host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'create_pool_health_monitor',
-                   health_monitor=health_monitor, pool_id=pool_id)
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor, pool_id, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'update_pool_health_monitor',
-                   old_health_monitor=old_health_monitor,
-                   health_monitor=health_monitor, pool_id=pool_id)
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id,
-                                   host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'delete_pool_health_monitor',
-                health_monitor=health_monitor, pool_id=pool_id)
-
-    def agent_updated(self, context, admin_state_up, host):
-        cctxt = self.client.prepare(server=host)
-        cctxt.cast(context, 'agent_updated',
-                   payload={'admin_state_up': admin_state_up})
-
-
-class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver):
-
-    # name of device driver that should be used by the agent;
-    # vendor specific plugin drivers must override it;
-    device_driver = None
-
-    def __init__(self, plugin):
-        if not self.device_driver:
-            raise DriverNotSpecified()
-
-        self.agent_rpc = LoadBalancerAgentApi(topics.LOADBALANCER_AGENT)
-
-        self.plugin = plugin
-        self._set_callbacks_on_plugin()
-        self.plugin.agent_notifiers.update(
-            {q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
-
-        self.pool_scheduler = importutils.import_object(
-            cfg.CONF.loadbalancer_pool_scheduler_driver)
-
-    def _set_callbacks_on_plugin(self):
-        # other agent based plugin driver might already set callbacks on plugin
-        if hasattr(self.plugin, 'agent_callbacks'):
-            return
-
-        self.plugin.agent_endpoints = [
-            LoadBalancerCallbacks(self.plugin),
-            agents_db.AgentExtRpcCallback(self.plugin)
-        ]
-        self.plugin.conn = n_rpc.create_connection(new=True)
-        self.plugin.conn.create_consumer(
-            topics.LOADBALANCER_PLUGIN,
-            self.plugin.agent_endpoints,
-            fanout=False)
-        self.plugin.conn.consume_in_threads()
-
-    def get_pool_agent(self, context, pool_id):
-        agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id)
-        if not agent:
-            raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id)
-        return agent['agent']
-
-    def create_vip(self, context, vip):
-        agent = self.get_pool_agent(context, vip['pool_id'])
-        self.agent_rpc.create_vip(context, vip, agent['host'])
-
-    def update_vip(self, context, old_vip, vip):
-        agent = self.get_pool_agent(context, vip['pool_id'])
-        if vip['status'] in constants.ACTIVE_PENDING_STATUSES:
-            self.agent_rpc.update_vip(context, old_vip, vip, agent['host'])
-        else:
-            self.agent_rpc.delete_vip(context, vip, agent['host'])
-
-    def delete_vip(self, context, vip):
-        self.plugin._delete_db_vip(context, vip['id'])
-        agent = self.get_pool_agent(context, vip['pool_id'])
-        self.agent_rpc.delete_vip(context, vip, agent['host'])
-
-    def create_pool(self, context, pool):
-        agent = self.pool_scheduler.schedule(self.plugin, context, pool,
-                                             self.device_driver)
-        if not agent:
-            raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
-        self.agent_rpc.create_pool(context, pool, agent['host'],
-                                   self.device_driver)
-
-    def update_pool(self, context, old_pool, pool):
-        agent = self.get_pool_agent(context, pool['id'])
-        if pool['status'] in constants.ACTIVE_PENDING_STATUSES:
-            self.agent_rpc.update_pool(context, old_pool, pool,
-                                       agent['host'])
-        else:
-            self.agent_rpc.delete_pool(context, pool, agent['host'])
-
-    def delete_pool(self, context, pool):
-        # get agent first to know host as binding will be deleted
-        # after pool is deleted from db
-        agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id'])
-        self.plugin._delete_db_pool(context, pool['id'])
-        if agent:
-            self.agent_rpc.delete_pool(context, pool, agent['agent']['host'])
-
-    def create_member(self, context, member):
-        agent = self.get_pool_agent(context, member['pool_id'])
-        self.agent_rpc.create_member(context, member, agent['host'])
-
-    def update_member(self, context, old_member, member):
-        agent = self.get_pool_agent(context, member['pool_id'])
-        # member may change pool id
-        if member['pool_id'] != old_member['pool_id']:
-            old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool(
-                context, old_member['pool_id'])
-            if old_pool_agent:
-                self.agent_rpc.delete_member(context, old_member,
-                                             old_pool_agent['agent']['host'])
-            self.agent_rpc.create_member(context, member, agent['host'])
-        else:
-            self.agent_rpc.update_member(context, old_member, member,
-                                         agent['host'])
-
-    def delete_member(self, context, member):
-        self.plugin._delete_db_member(context, member['id'])
-        agent = self.get_pool_agent(context, member['pool_id'])
-        self.agent_rpc.delete_member(context, member, agent['host'])
-
-    def create_pool_health_monitor(self, context, healthmon, pool_id):
-        # healthmon is not used here
-        agent = self.get_pool_agent(context, pool_id)
-        self.agent_rpc.create_pool_health_monitor(context, healthmon,
-                                                  pool_id, agent['host'])
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor, pool_id):
-        agent = self.get_pool_agent(context, pool_id)
-        self.agent_rpc.update_pool_health_monitor(context, old_health_monitor,
-                                                  health_monitor, pool_id,
-                                                  agent['host'])
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
-        self.plugin._delete_db_pool_health_monitor(
-            context, health_monitor['id'], pool_id
-        )
-
-        agent = self.get_pool_agent(context, pool_id)
-        self.agent_rpc.delete_pool_health_monitor(context, health_monitor,
-                                                  pool_id, agent['host'])
-
-    def stats(self, context, pool_id):
-        pass
diff --git a/neutron/services/loadbalancer/drivers/driver_base.py b/neutron/services/loadbalancer/drivers/driver_base.py
deleted file mode 100644 (file)
index 8ed1f3d..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2014 A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.db.loadbalancer import loadbalancer_db as lb_db
-from neutron.services.loadbalancer.drivers import driver_mixins
-
-
-class NotImplementedManager(object):
-    """Helper class to make any subclass of LBAbstractDriver explode if it
-    is missing any of the required object managers.
-    """
-
-    def create(self, context, obj):
-        raise NotImplementedError()
-
-    def update(self, context, old_obj, obj):
-        raise NotImplementedError()
-
-    def delete(self, context, obj):
-        raise NotImplementedError()
-
-
-class LoadBalancerBaseDriver(object):
-    """LBaaSv2 object model drivers should subclass LBAbstractDriver, and
-    initialize the following manager classes to create, update, and delete
-    the various load balancer objects.
-    """
-
-    load_balancer = NotImplementedManager()
-    listener = NotImplementedManager()
-    pool = NotImplementedManager()
-    member = NotImplementedManager()
-    health_monitor = NotImplementedManager()
-
-    def __init__(self, plugin):
-        self.plugin = plugin
-
-
-class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
-                              driver_mixins.BaseStatsMixin,
-                              driver_mixins.BaseStatusUpdateMixin,
-                              driver_mixins.BaseManagerMixin):
-
-    def __init__(self, driver):
-        super(BaseLoadBalancerManager, self).__init__(driver)
-        # TODO(dougw), use lb_db.LoadBalancer when v2 lbaas
-        # TODO(dougw), get rid of __init__() in StatusHelperManager, and
-        # the if is not None clauses; after fixing this next line,
-        # it can become a mandatory variable for that subclass.
-        self.model_class = None
-
-
-class BaseListenerManager(driver_mixins.BaseManagerMixin):
-    pass
-
-
-class BasePoolManager(driver_mixins.BaseStatusUpdateMixin,
-                      driver_mixins.BaseManagerMixin):
-
-    def __init__(self, driver):
-        super(BasePoolManager, self).__init__(driver)
-        self.model_class = lb_db.Pool
-
-
-class BaseMemberManager(driver_mixins.BaseStatusUpdateMixin,
-                        driver_mixins.BaseManagerMixin):
-
-    def __init__(self, driver):
-        super(BaseMemberManager, self).__init__(driver)
-        self.model_class = lb_db.Member
-
-
-class BaseHealthMonitorManager(
-                              driver_mixins.BaseHealthMonitorStatusUpdateMixin,
-                              driver_mixins.BaseManagerMixin):
-    pass
diff --git a/neutron/services/loadbalancer/drivers/driver_mixins.py b/neutron/services/loadbalancer/drivers/driver_mixins.py
deleted file mode 100644 (file)
index 3afc921..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2014 A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import six
-
-from neutron.plugins.common import constants
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseManagerMixin(object):
-
-    def __init__(self, driver):
-        self.driver = driver
-
-    @abc.abstractmethod
-    def create(self, context, obj):
-        pass
-
-    @abc.abstractmethod
-    def update(self, context, obj_old, obj):
-        pass
-
-    @abc.abstractmethod
-    def delete(self, context, obj):
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseRefreshMixin(object):
-
-    @abc.abstractmethod
-    def refresh(self, context, obj):
-        pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseStatsMixin(object):
-
-    @abc.abstractmethod
-    def stats(self, context, obj):
-        pass
-
-
-class BaseStatusUpdateMixin(object):
-
-    # Status update helpers
-    # Note: You must set self.model_class to an appropriate neutron model
-    # in your base manager class.
-
-    def active(self, context, model_id):
-        if self.model_class is not None:
-            self.driver.plugin.update_status(context, self.model_class,
-                                             model_id, constants.ACTIVE)
-
-    def failed(self, context, model_id):
-        if self.model_class is not None:
-            self.driver.plugin.update_status(context, self.model_class,
-                                             model_id, constants.ERROR)
-
-
-class BaseHealthMonitorStatusUpdateMixin(object):
-
-    def active(self, context, health_monitor_id, pool_id):
-        self.driver.plugin.update_pool_health_monitor(context,
-                                                      health_monitor_id,
-                                                      pool_id,
-                                                      constants.ACTIVE)
-
-    def failed(self, context, health_monitor_id, pool_id):
-        self.driver.plugin.update_pool_health_monitor(context,
-                                                      health_monitor_id,
-                                                      pool_id,
-                                                      constants.ERROR)
diff --git a/neutron/services/loadbalancer/drivers/haproxy/__init__.py b/neutron/services/loadbalancer/drivers/haproxy/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py
deleted file mode 100644 (file)
index 78e76eb..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import itertools
-from six import moves
-
-from neutron.agent.linux import utils
-from neutron.plugins.common import constants as qconstants
-from neutron.services.loadbalancer import constants
-
-
-PROTOCOL_MAP = {
-    constants.PROTOCOL_TCP: 'tcp',
-    constants.PROTOCOL_HTTP: 'http',
-    constants.PROTOCOL_HTTPS: 'tcp',
-}
-
-BALANCE_MAP = {
-    constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
-    constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
-    constants.LB_METHOD_SOURCE_IP: 'source'
-}
-
-STATS_MAP = {
-    constants.STATS_ACTIVE_CONNECTIONS: 'scur',
-    constants.STATS_MAX_CONNECTIONS: 'smax',
-    constants.STATS_CURRENT_SESSIONS: 'scur',
-    constants.STATS_MAX_SESSIONS: 'smax',
-    constants.STATS_TOTAL_CONNECTIONS: 'stot',
-    constants.STATS_TOTAL_SESSIONS: 'stot',
-    constants.STATS_IN_BYTES: 'bin',
-    constants.STATS_OUT_BYTES: 'bout',
-    constants.STATS_CONNECTION_ERRORS: 'econ',
-    constants.STATS_RESPONSE_ERRORS: 'eresp'
-}
-
-ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES
-INACTIVE = qconstants.INACTIVE
-
-
-def save_config(conf_path, logical_config, socket_path=None,
-                user_group='nogroup'):
-    """Convert a logical configuration to the HAProxy version."""
-    data = []
-    data.extend(_build_global(logical_config, socket_path=socket_path,
-                              user_group=user_group))
-    data.extend(_build_defaults(logical_config))
-    data.extend(_build_frontend(logical_config))
-    data.extend(_build_backend(logical_config))
-    utils.replace_file(conf_path, '\n'.join(data))
-
-
-def _build_global(config, socket_path=None, user_group='nogroup'):
-    opts = [
-        'daemon',
-        'user nobody',
-        'group %s' % user_group,
-        'log /dev/log local0',
-        'log /dev/log local1 notice'
-    ]
-
-    if socket_path:
-        opts.append('stats socket %s mode 0666 level user' % socket_path)
-
-    return itertools.chain(['global'], ('\t' + o for o in opts))
-
-
-def _build_defaults(config):
-    opts = [
-        'log global',
-        'retries 3',
-        'option redispatch',
-        'timeout connect 5000',
-        'timeout client 50000',
-        'timeout server 50000',
-    ]
-
-    return itertools.chain(['defaults'], ('\t' + o for o in opts))
-
-
-def _build_frontend(config):
-    protocol = config['vip']['protocol']
-
-    opts = [
-        'option tcplog',
-        'bind %s:%d' % (
-            _get_first_ip_from_port(config['vip']['port']),
-            config['vip']['protocol_port']
-        ),
-        'mode %s' % PROTOCOL_MAP[protocol],
-        'default_backend %s' % config['pool']['id'],
-    ]
-
-    if config['vip']['connection_limit'] >= 0:
-        opts.append('maxconn %s' % config['vip']['connection_limit'])
-
-    if protocol == constants.PROTOCOL_HTTP:
-        opts.append('option forwardfor')
-
-    return itertools.chain(
-        ['frontend %s' % config['vip']['id']],
-        ('\t' + o for o in opts)
-    )
-
-
-def _build_backend(config):
-    protocol = config['pool']['protocol']
-    lb_method = config['pool']['lb_method']
-
-    opts = [
-        'mode %s' % PROTOCOL_MAP[protocol],
-        'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
-    ]
-
-    if protocol == constants.PROTOCOL_HTTP:
-        opts.append('option forwardfor')
-
-    # add the first health_monitor (if available)
-    server_addon, health_opts = _get_server_health_option(config)
-    opts.extend(health_opts)
-
-    # add session persistence (if available)
-    persist_opts = _get_session_persistence(config)
-    opts.extend(persist_opts)
-
-    # add the members
-    for member in config['members']:
-        if ((member['status'] in ACTIVE_PENDING_STATUSES or
-             member['status'] == INACTIVE)
-            and member['admin_state_up']):
-            server = (('server %(id)s %(address)s:%(protocol_port)s '
-                       'weight %(weight)s') % member) + server_addon
-            if _has_http_cookie_persistence(config):
-                server += ' cookie %d' % config['members'].index(member)
-            opts.append(server)
-
-    return itertools.chain(
-        ['backend %s' % config['pool']['id']],
-        ('\t' + o for o in opts)
-    )
-
-
-def _get_first_ip_from_port(port):
-    for fixed_ip in port['fixed_ips']:
-        return fixed_ip['ip_address']
-
-
-def _get_server_health_option(config):
-    """return the first active health option."""
-    for m in config['healthmonitors']:
-        # not checking the status of healthmonitor for two reasons:
-        # 1) status field is absent in HealthMonitor model
-        # 2) only active HealthMonitors are fetched with
-        # LoadBalancerCallbacks.get_logical_device
-        if m['admin_state_up']:
-            monitor = m
-            break
-    else:
-        return '', []
-
-    server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
-    opts = [
-        'timeout check %ds' % monitor['timeout']
-    ]
-
-    if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
-                           constants.HEALTH_MONITOR_HTTPS):
-        opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
-        opts.append(
-            'http-check expect rstatus %s' %
-            '|'.join(_expand_expected_codes(monitor['expected_codes']))
-        )
-
-    if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
-        opts.append('option ssl-hello-chk')
-
-    return server_addon, opts
-
-
-def _get_session_persistence(config):
-    persistence = config['vip'].get('session_persistence')
-    if not persistence:
-        return []
-
-    opts = []
-    if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
-        opts.append('stick-table type ip size 10k')
-        opts.append('stick on src')
-    elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and
-          config.get('members')):
-        opts.append('cookie SRV insert indirect nocache')
-    elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
-          persistence.get('cookie_name')):
-        opts.append('appsession %s len 56 timeout 3h' %
-                    persistence['cookie_name'])
-
-    return opts
-
-
-def _has_http_cookie_persistence(config):
-    return (config['vip'].get('session_persistence') and
-            config['vip']['session_persistence']['type'] ==
-            constants.SESSION_PERSISTENCE_HTTP_COOKIE)
-
-
-def _expand_expected_codes(codes):
-    """Expand the expected code string in set of codes.
-
-    200-204 -> 200, 201, 202, 204
-    200, 203 -> 200, 203
-    """
-
-    retval = set()
-    for code in codes.replace(',', ' ').split(' '):
-        code = code.strip()
-
-        if not code:
-            continue
-        elif '-' in code:
-            low, hi = code.split('-')[:2]
-            retval.update(str(i) for i in moves.xrange(int(low), int(hi) + 1))
-        else:
-            retval.add(code)
-    return retval
diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py
deleted file mode 100644 (file)
index dcb7a96..0000000
+++ /dev/null
@@ -1,395 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import shutil
-import socket
-
-import netaddr
-from oslo.config import cfg
-from oslo.utils import excutils
-from oslo.utils import importutils
-
-from neutron.agent.common import config
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import exceptions
-from neutron.common import utils as n_utils
-from neutron.i18n import _LE, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.agent import agent_device_driver
-from neutron.services.loadbalancer import constants as lb_const
-from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg
-
-LOG = logging.getLogger(__name__)
-NS_PREFIX = 'qlbaas-'
-DRIVER_NAME = 'haproxy_ns'
-
-STATE_PATH_DEFAULT = '$state_path/lbaas'
-USER_GROUP_DEFAULT = 'nogroup'
-OPTS = [
-    cfg.StrOpt(
-        'loadbalancer_state_path',
-        default=STATE_PATH_DEFAULT,
-        help=_('Location to store config and state files'),
-        deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path',
-                                           group='DEFAULT')],
-    ),
-    cfg.StrOpt(
-        'user_group',
-        default=USER_GROUP_DEFAULT,
-        help=_('The user group'),
-        deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')],
-    ),
-    cfg.IntOpt(
-        'send_gratuitous_arp',
-        default=3,
-        help=_('When delete and re-add the same vip, send this many '
-               'gratuitous ARPs to flush the ARP cache in the Router. '
-               'Set it below or equal to 0 to disable this feature.'),
-    )
-]
-cfg.CONF.register_opts(OPTS, 'haproxy')
-
-
-class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
-    def __init__(self, conf, plugin_rpc):
-        self.conf = conf
-        self.root_helper = config.get_root_helper(conf)
-        self.state_path = conf.haproxy.loadbalancer_state_path
-        try:
-            vif_driver = importutils.import_object(conf.interface_driver, conf)
-        except ImportError:
-            with excutils.save_and_reraise_exception():
-                msg = (_('Error importing interface driver: %s')
-                       % conf.interface_driver)
-                LOG.error(msg)
-
-        self.vif_driver = vif_driver
-        self.plugin_rpc = plugin_rpc
-        self.pool_to_port_id = {}
-
-    @classmethod
-    def get_name(cls):
-        return DRIVER_NAME
-
-    def create(self, logical_config):
-        pool_id = logical_config['pool']['id']
-        namespace = get_ns_name(pool_id)
-
-        self._plug(namespace, logical_config['vip']['port'])
-        self._spawn(logical_config)
-
-    def update(self, logical_config):
-        pool_id = logical_config['pool']['id']
-        pid_path = self._get_state_file_path(pool_id, 'pid')
-
-        extra_args = ['-sf']
-        extra_args.extend(p.strip() for p in open(pid_path, 'r'))
-        self._spawn(logical_config, extra_args)
-
-    def _spawn(self, logical_config, extra_cmd_args=()):
-        pool_id = logical_config['pool']['id']
-        namespace = get_ns_name(pool_id)
-        conf_path = self._get_state_file_path(pool_id, 'conf')
-        pid_path = self._get_state_file_path(pool_id, 'pid')
-        sock_path = self._get_state_file_path(pool_id, 'sock')
-        user_group = self.conf.haproxy.user_group
-
-        hacfg.save_config(conf_path, logical_config, sock_path, user_group)
-        cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
-        cmd.extend(extra_cmd_args)
-
-        ns = ip_lib.IPWrapper(self.root_helper, namespace)
-        ns.netns.execute(cmd)
-
-        # remember the pool<>port mapping
-        self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
-
-    @n_utils.synchronized('haproxy-driver')
-    def undeploy_instance(self, pool_id, cleanup_namespace=False):
-        namespace = get_ns_name(pool_id)
-        ns = ip_lib.IPWrapper(self.root_helper, namespace)
-        pid_path = self._get_state_file_path(pool_id, 'pid')
-
-        # kill the process
-        kill_pids_in_file(self.root_helper, pid_path)
-
-        # unplug the ports
-        if pool_id in self.pool_to_port_id:
-            self._unplug(namespace, self.pool_to_port_id[pool_id])
-
-        # delete all devices from namespace;
-        # used when deleting orphans and port_id is not known for pool_id
-        if cleanup_namespace:
-            for device in ns.get_devices(exclude_loopback=True):
-                self.vif_driver.unplug(device.name, namespace=namespace)
-
-        # remove the configuration directory
-        conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
-        if os.path.isdir(conf_dir):
-            shutil.rmtree(conf_dir)
-        ns.garbage_collect_namespace()
-
-    def exists(self, pool_id):
-        namespace = get_ns_name(pool_id)
-        root_ns = ip_lib.IPWrapper(self.root_helper)
-
-        socket_path = self._get_state_file_path(pool_id, 'sock', False)
-        if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
-            try:
-                s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-                s.connect(socket_path)
-                return True
-            except socket.error:
-                pass
-        return False
-
-    def get_stats(self, pool_id):
-        socket_path = self._get_state_file_path(pool_id, 'sock', False)
-        TYPE_BACKEND_REQUEST = 2
-        TYPE_SERVER_REQUEST = 4
-        if os.path.exists(socket_path):
-            parsed_stats = self._get_stats_from_socket(
-                socket_path,
-                entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
-            pool_stats = self._get_backend_stats(parsed_stats)
-            pool_stats['members'] = self._get_servers_stats(parsed_stats)
-            return pool_stats
-        else:
-            LOG.warn(_LW('Stats socket not found for pool %s'), pool_id)
-            return {}
-
-    def _get_backend_stats(self, parsed_stats):
-        TYPE_BACKEND_RESPONSE = '1'
-        for stats in parsed_stats:
-            if stats.get('type') == TYPE_BACKEND_RESPONSE:
-                unified_stats = dict((k, stats.get(v, ''))
-                                     for k, v in hacfg.STATS_MAP.items())
-                return unified_stats
-
-        return {}
-
-    def _get_servers_stats(self, parsed_stats):
-        TYPE_SERVER_RESPONSE = '2'
-        res = {}
-        for stats in parsed_stats:
-            if stats.get('type') == TYPE_SERVER_RESPONSE:
-                res[stats['svname']] = {
-                    lb_const.STATS_STATUS: (constants.INACTIVE
-                                            if stats['status'] == 'DOWN'
-                                            else constants.ACTIVE),
-                    lb_const.STATS_HEALTH: stats['check_status'],
-                    lb_const.STATS_FAILED_CHECKS: stats['chkfail']
-                }
-        return res
-
-    def _get_stats_from_socket(self, socket_path, entity_type):
-        try:
-            s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            s.connect(socket_path)
-            s.send('show stat -1 %s -1\n' % entity_type)
-            raw_stats = ''
-            chunk_size = 1024
-            while True:
-                chunk = s.recv(chunk_size)
-                raw_stats += chunk
-                if len(chunk) < chunk_size:
-                    break
-
-            return self._parse_stats(raw_stats)
-        except socket.error as e:
-            LOG.warn(_LW('Error while connecting to stats socket: %s'), e)
-            return {}
-
-    def _parse_stats(self, raw_stats):
-        stat_lines = raw_stats.splitlines()
-        if len(stat_lines) < 2:
-            return []
-        stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
-        res_stats = []
-        for raw_values in stat_lines[1:]:
-            if not raw_values:
-                continue
-            stat_values = [value.strip() for value in raw_values.split(',')]
-            res_stats.append(dict(zip(stat_names, stat_values)))
-
-        return res_stats
-
-    def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
-        """Returns the file name for a given kind of config file."""
-        confs_dir = os.path.abspath(os.path.normpath(self.state_path))
-        conf_dir = os.path.join(confs_dir, pool_id)
-        if ensure_state_dir:
-            if not os.path.isdir(conf_dir):
-                os.makedirs(conf_dir, 0o755)
-        return os.path.join(conf_dir, kind)
-
-    def _plug(self, namespace, port, reuse_existing=True):
-        self.plugin_rpc.plug_vip_port(port['id'])
-        interface_name = self.vif_driver.get_device_name(Wrap(port))
-
-        if ip_lib.device_exists(interface_name, self.root_helper, namespace):
-            if not reuse_existing:
-                raise exceptions.PreexistingDeviceFailure(
-                    dev_name=interface_name
-                )
-        else:
-            self.vif_driver.plug(
-                port['network_id'],
-                port['id'],
-                interface_name,
-                port['mac_address'],
-                namespace=namespace
-            )
-
-        cidrs = [
-            '%s/%s' % (ip['ip_address'],
-                       netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
-            for ip in port['fixed_ips']
-        ]
-        self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
-
-        gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
-
-        if not gw_ip:
-            host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', [])
-            for host_route in host_routes:
-                if host_route['destination'] == "0.0.0.0/0":
-                    gw_ip = host_route['nexthop']
-                    break
-
-        if gw_ip:
-            cmd = ['route', 'add', 'default', 'gw', gw_ip]
-            ip_wrapper = ip_lib.IPWrapper(self.root_helper,
-                                          namespace=namespace)
-            ip_wrapper.netns.execute(cmd, check_exit_code=False)
-            # When delete and re-add the same vip, we need to
-            # send gratuitous ARP to flush the ARP cache in the Router.
-            gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
-            if gratuitous_arp > 0:
-                for ip in port['fixed_ips']:
-                    cmd_arping = ['arping', '-U',
-                                  '-I', interface_name,
-                                  '-c', gratuitous_arp,
-                                  ip['ip_address']]
-                    ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
-
-    def _unplug(self, namespace, port_id):
-        port_stub = {'id': port_id}
-        self.plugin_rpc.unplug_vip_port(port_id)
-        interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
-        self.vif_driver.unplug(interface_name, namespace=namespace)
-
-    @n_utils.synchronized('haproxy-driver')
-    def deploy_instance(self, logical_config):
-        # do actual deploy only if vip and pool are configured and active
-        if (not logical_config or
-                'vip' not in logical_config or
-                (logical_config['vip']['status'] not in
-                 constants.ACTIVE_PENDING_STATUSES) or
-                not logical_config['vip']['admin_state_up'] or
-                (logical_config['pool']['status'] not in
-                 constants.ACTIVE_PENDING_STATUSES) or
-                not logical_config['pool']['admin_state_up']):
-            return
-
-        if self.exists(logical_config['pool']['id']):
-            self.update(logical_config)
-        else:
-            self.create(logical_config)
-
-    def _refresh_device(self, pool_id):
-        logical_config = self.plugin_rpc.get_logical_device(pool_id)
-        self.deploy_instance(logical_config)
-
-    def create_vip(self, vip):
-        self._refresh_device(vip['pool_id'])
-
-    def update_vip(self, old_vip, vip):
-        self._refresh_device(vip['pool_id'])
-
-    def delete_vip(self, vip):
-        self.undeploy_instance(vip['pool_id'])
-
-    def create_pool(self, pool):
-        # nothing to do here because a pool needs a vip to be useful
-        pass
-
-    def update_pool(self, old_pool, pool):
-        self._refresh_device(pool['id'])
-
-    def delete_pool(self, pool):
-        # delete_pool may be called before vip deletion in case
-        # pool's admin state set to down
-        if self.exists(pool['id']):
-            self.undeploy_instance(pool['id'])
-
-    def create_member(self, member):
-        self._refresh_device(member['pool_id'])
-
-    def update_member(self, old_member, member):
-        self._refresh_device(member['pool_id'])
-
-    def delete_member(self, member):
-        self._refresh_device(member['pool_id'])
-
-    def create_pool_health_monitor(self, health_monitor, pool_id):
-        self._refresh_device(pool_id)
-
-    def update_pool_health_monitor(self, old_health_monitor, health_monitor,
-                                   pool_id):
-        self._refresh_device(pool_id)
-
-    def delete_pool_health_monitor(self, health_monitor, pool_id):
-        self._refresh_device(pool_id)
-
-    def remove_orphans(self, known_pool_ids):
-        if not os.path.exists(self.state_path):
-            return
-
-        orphans = (pool_id for pool_id in os.listdir(self.state_path)
-                   if pool_id not in known_pool_ids)
-        for pool_id in orphans:
-            if self.exists(pool_id):
-                self.undeploy_instance(pool_id, cleanup_namespace=True)
-
-
-# NOTE (markmcclain) For compliance with interface.py which expects objects
-class Wrap(object):
-    """A light attribute wrapper for compatibility with the interface lib."""
-    def __init__(self, d):
-        self.__dict__.update(d)
-
-    def __getitem__(self, key):
-        return self.__dict__[key]
-
-
-def get_ns_name(namespace_id):
-    return NS_PREFIX + namespace_id
-
-
-def kill_pids_in_file(root_helper, pid_path):
-    if os.path.exists(pid_path):
-        with open(pid_path, 'r') as pids:
-            for pid in pids:
-                pid = pid.strip()
-                try:
-                    utils.execute(['kill', '-9', pid], root_helper)
-                except RuntimeError:
-                    LOG.exception(
-                        _LE('Unable to kill haproxy process: %s'),
-                        pid
-                    )
diff --git a/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py b/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py
deleted file mode 100644 (file)
index 6cdda7a..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.services.loadbalancer.drivers.common import agent_driver_base
-from neutron.services.loadbalancer.drivers.haproxy import namespace_driver
-
-
-class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase):
-    device_driver = namespace_driver.DRIVER_NAME
diff --git a/neutron/services/loadbalancer/drivers/logging_noop/__init__.py b/neutron/services/loadbalancer/drivers/logging_noop/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/logging_noop/driver.py b/neutron/services/loadbalancer/drivers/logging_noop/driver.py
deleted file mode 100644 (file)
index e4bd651..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.openstack.common import log as logging
-from neutron.services.loadbalancer.drivers import driver_base
-
-LOG = logging.getLogger(__name__)
-
-
-class LoggingNoopLoadBalancerDriver(driver_base.LoadBalancerBaseDriver):
-
-    def __init__(self, plugin):
-        self.plugin = plugin
-
-        # Each of the major LBaaS objects in the neutron database
-        # need a corresponding manager/handler class.
-        #
-        # Put common things that are shared across the entire driver, like
-        # config or a rest client handle, here.
-        #
-        # This function is executed when neutron-server starts.
-
-        self.load_balancer = LoggingNoopLoadBalancerManager(self)
-        self.listener = LoggingNoopListenerManager(self)
-        self.pool = LoggingNoopPoolManager(self)
-        self.member = LoggingNoopMemberManager(self)
-        self.health_monitor = LoggingNoopHealthMonitorManager(self)
-
-
-class LoggingNoopCommonManager(object):
-
-    def create(self, context, obj):
-        LOG.debug("LB %s no-op, create %s", self.__class__.__name__, obj.id)
-        self.active(context, obj.id)
-
-    def update(self, context, old_obj, obj):
-        LOG.debug("LB %s no-op, update %s", self.__class__.__name__, obj.id)
-        self.active(context, obj.id)
-
-    def delete(self, context, obj):
-        LOG.debug("LB %s no-op, delete %s", self.__class__.__name__, obj.id)
-
-
-class LoggingNoopLoadBalancerManager(LoggingNoopCommonManager,
-                                     driver_base.BaseLoadBalancerManager):
-
-    def refresh(self, context, lb_obj, force=False):
-        # This is intended to trigger the backend to check and repair
-        # the state of this load balancer and all of its dependent objects
-        LOG.debug("LB pool refresh %s, force=%s", lb_obj.id, force)
-
-    def stats(self, context, lb_obj):
-        LOG.debug("LB stats %s", lb_obj.id)
-        return {
-            "bytes_in": 0,
-            "bytes_out": 0,
-            "active_connections": 0,
-            "total_connections": 0
-        }
-
-
-class LoggingNoopListenerManager(LoggingNoopCommonManager,
-                                 driver_base.BaseListenerManager):
-
-    def create(self, context, obj):
-        LOG.debug("LB listener no-op, create %s", self.__class__.__name__,
-                  obj.id)
-
-    def update(self, context, old_obj, obj):
-        LOG.debug("LB listener no-op, update %s", self.__class__.__name__,
-                  obj.id)
-
-
-class LoggingNoopPoolManager(LoggingNoopCommonManager,
-                             driver_base.BasePoolManager):
-    pass
-
-
-class LoggingNoopMemberManager(LoggingNoopCommonManager,
-                               driver_base.BaseMemberManager):
-    pass
-
-
-class LoggingNoopHealthMonitorManager(LoggingNoopCommonManager,
-                                      driver_base.BaseHealthMonitorManager):
-
-    def create(self, context, obj):
-        LOG.debug("LB health monitor no-op, create %s",
-                  self.__class__.__name__, obj.id)
-        self.active(context, obj.id, obj.id)
-
-    def update(self, context, old_obj, obj):
-        LOG.debug("LB health monitor no-op, update %s",
-                  self.__class__.__name__, obj.id)
-        self.active(context, obj.id, obj.id)
diff --git a/neutron/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/services/loadbalancer/drivers/netscaler/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py b/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py
deleted file mode 100644 (file)
index 7e09249..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright 2014 Citrix Systems
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import base64
-
-from oslo.serialization import jsonutils
-import requests
-
-from neutron.common import exceptions as n_exc
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-CONTENT_TYPE_HEADER = 'Content-type'
-ACCEPT_HEADER = 'Accept'
-AUTH_HEADER = 'Authorization'
-DRIVER_HEADER = 'X-OpenStack-LBaaS'
-TENANT_HEADER = 'X-Tenant-ID'
-JSON_CONTENT_TYPE = 'application/json'
-DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas'
-
-
-class NCCException(n_exc.NeutronException):
-
-    """Represents exceptions thrown by NSClient."""
-
-    CONNECTION_ERROR = 1
-    REQUEST_ERROR = 2
-    RESPONSE_ERROR = 3
-    UNKNOWN_ERROR = 4
-
-    def __init__(self, error):
-        self.message = _("NCC Error %d") % error
-        super(NCCException, self).__init__()
-        self.error = error
-
-
-class NSClient(object):
-
-    """Client to operate on REST resources of NetScaler Control Center."""
-
-    def __init__(self, service_uri, username, password):
-        if not service_uri:
-            LOG.exception(_LE("No NetScaler Control Center URI specified. "
-                              "Cannot connect."))
-            raise NCCException(NCCException.CONNECTION_ERROR)
-        self.service_uri = service_uri.strip('/')
-        self.auth = None
-        if username and password:
-            base64string = base64.encodestring("%s:%s" % (username, password))
-            base64string = base64string[:-1]
-            self.auth = 'Basic %s' % base64string
-
-    def create_resource(self, tenant_id, resource_path, object_name,
-                        object_data):
-        """Create a resource of NetScaler Control Center."""
-        return self._resource_operation('POST', tenant_id,
-                                        resource_path,
-                                        object_name=object_name,
-                                        object_data=object_data)
-
-    def retrieve_resource(self, tenant_id, resource_path, parse_response=True):
-        """Retrieve a resource of NetScaler Control Center."""
-        return self._resource_operation('GET', tenant_id, resource_path)
-
-    def update_resource(self, tenant_id, resource_path, object_name,
-                        object_data):
-        """Update a resource of the NetScaler Control Center."""
-        return self._resource_operation('PUT', tenant_id,
-                                        resource_path,
-                                        object_name=object_name,
-                                        object_data=object_data)
-
-    def remove_resource(self, tenant_id, resource_path, parse_response=True):
-        """Remove a resource of NetScaler Control Center."""
-        return self._resource_operation('DELETE', tenant_id, resource_path)
-
-    def _resource_operation(self, method, tenant_id, resource_path,
-                            object_name=None, object_data=None):
-        resource_uri = "%s/%s" % (self.service_uri, resource_path)
-        headers = self._setup_req_headers(tenant_id)
-        request_body = None
-        if object_data:
-            if isinstance(object_data, str):
-                request_body = object_data
-            else:
-                obj_dict = {object_name: object_data}
-                request_body = jsonutils.dumps(obj_dict)
-
-        response_status, resp_dict = self._execute_request(method,
-                                                           resource_uri,
-                                                           headers,
-                                                           body=request_body)
-        return response_status, resp_dict
-
-    def _is_valid_response(self, response_status):
-        # when status is less than 400, the response is fine
-        return response_status < requests.codes.bad_request
-
-    def _setup_req_headers(self, tenant_id):
-        headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE,
-                   CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE,
-                   DRIVER_HEADER: DRIVER_HEADER_VALUE,
-                   TENANT_HEADER: tenant_id,
-                   AUTH_HEADER: self.auth}
-        return headers
-
-    def _get_response_dict(self, response):
-        response_dict = {'status': response.status_code,
-                         'body': response.text,
-                         'headers': response.headers}
-        if self._is_valid_response(response.status_code):
-            if response.text:
-                response_dict['dict'] = response.json()
-        return response_dict
-
-    def _execute_request(self, method, resource_uri, headers, body=None):
-        try:
-            response = requests.request(method, url=resource_uri,
-                                        headers=headers, data=body)
-        except requests.exceptions.SSLError:
-            LOG.exception(_LE("SSL error occurred while connecting to %s"),
-                          self.service_uri)
-            raise NCCException(NCCException.CONNECTION_ERROR)
-        except requests.exceptions.ConnectionError:
-            LOG.exception(_LE("Connection error occurred while connecting "
-                              "to %s"),
-                          self.service_uri)
-            raise NCCException(NCCException.CONNECTION_ERROR)
-        except requests.exceptions.Timeout:
-            LOG.exception(_LE("Request to %s timed out"), self.service_uri)
-            raise NCCException(NCCException.CONNECTION_ERROR)
-        except (requests.exceptions.URLRequired,
-                requests.exceptions.InvalidURL,
-                requests.exceptions.MissingSchema,
-                requests.exceptions.InvalidSchema):
-            LOG.exception(_LE("Request did not specify a valid URL"))
-            raise NCCException(NCCException.REQUEST_ERROR)
-        except requests.exceptions.TooManyRedirects:
-            LOG.exception(_LE("Too many redirects occurred for request to %s"))
-            raise NCCException(NCCException.REQUEST_ERROR)
-        except requests.exceptions.RequestException:
-            LOG.exception(_LE("A request error while connecting to %s"),
-                          self.service_uri)
-            raise NCCException(NCCException.REQUEST_ERROR)
-        except Exception:
-            LOG.exception(_LE("A unknown error occurred during request to %s"),
-                          self.service_uri)
-            raise NCCException(NCCException.UNKNOWN_ERROR)
-        resp_dict = self._get_response_dict(response)
-        LOG.debug("Response: %s", resp_dict['body'])
-        response_status = resp_dict['status']
-        if response_status == requests.codes.unauthorized:
-            LOG.exception(_LE("Unable to login. Invalid credentials passed."
-                              "for: %s"),
-                          self.service_uri)
-            raise NCCException(NCCException.RESPONSE_ERROR)
-        if not self._is_valid_response(response_status):
-            LOG.exception(_LE("Failed %(method)s operation on %(url)s "
-                              "status code: %(response_status)s"),
-                          {"method": method,
-                           "url": resource_uri,
-                           "response_status": response_status})
-            raise NCCException(NCCException.RESPONSE_ERROR)
-        return response_status, resp_dict
diff --git a/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py b/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py
deleted file mode 100644 (file)
index 71c1bd6..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-# Copyright 2014 Citrix Systems, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-
-from neutron.api.v2 import attributes
-from neutron.db.loadbalancer import loadbalancer_db
-from neutron.i18n import _LI
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers import abstract_driver
-from neutron.services.loadbalancer.drivers.netscaler import ncc_client
-
-LOG = logging.getLogger(__name__)
-
-NETSCALER_CC_OPTS = [
-    cfg.StrOpt(
-        'netscaler_ncc_uri',
-        help=_('The URL to reach the NetScaler Control Center Server.'),
-    ),
-    cfg.StrOpt(
-        'netscaler_ncc_username',
-        help=_('Username to login to the NetScaler Control Center Server.'),
-    ),
-    cfg.StrOpt(
-        'netscaler_ncc_password',
-        help=_('Password to login to the NetScaler Control Center Server.'),
-    )
-]
-
-cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver')
-
-VIPS_RESOURCE = 'vips'
-VIP_RESOURCE = 'vip'
-POOLS_RESOURCE = 'pools'
-POOL_RESOURCE = 'pool'
-POOLMEMBERS_RESOURCE = 'members'
-POOLMEMBER_RESOURCE = 'member'
-MONITORS_RESOURCE = 'healthmonitors'
-MONITOR_RESOURCE = 'healthmonitor'
-POOLSTATS_RESOURCE = 'statistics'
-PROV_SEGMT_ID = 'provider:segmentation_id'
-PROV_NET_TYPE = 'provider:network_type'
-DRIVER_NAME = 'netscaler_driver'
-
-
-class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
-
-    """NetScaler LBaaS Plugin driver class."""
-
-    def __init__(self, plugin):
-        self.plugin = plugin
-        ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri
-        ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username
-        ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password
-        self.client = ncc_client.NSClient(ncc_uri,
-                                          ncc_username,
-                                          ncc_password)
-
-    def create_vip(self, context, vip):
-        """Create a vip on a NetScaler device."""
-        network_info = self._get_vip_network_info(context, vip)
-        ncc_vip = self._prepare_vip_for_creation(vip)
-        ncc_vip = dict(ncc_vip.items() + network_info.items())
-        LOG.debug("NetScaler driver vip creation: %r", ncc_vip)
-        status = constants.ACTIVE
-        try:
-            self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
-                                        VIP_RESOURCE, ncc_vip)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"],
-                                  status)
-
-    def update_vip(self, context, old_vip, vip):
-        """Update a vip on a NetScaler device."""
-        update_vip = self._prepare_vip_for_update(vip)
-        resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
-        LOG.debug("NetScaler driver vip %(vip_id)s update: %(vip_obj)r",
-                  {"vip_id": vip["id"], "vip_obj": vip})
-        status = constants.ACTIVE
-        try:
-            self.client.update_resource(context.tenant_id, resource_path,
-                                        VIP_RESOURCE, update_vip)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"],
-                                  status)
-
-    def delete_vip(self, context, vip):
-        """Delete a vip on a NetScaler device."""
-        resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
-        LOG.debug("NetScaler driver vip removal: %s", vip["id"])
-        try:
-            self.client.remove_resource(context.tenant_id, resource_path)
-        except ncc_client.NCCException:
-            self.plugin.update_status(context, loadbalancer_db.Vip,
-                                      vip["id"],
-                                      constants.ERROR)
-        else:
-            self.plugin._delete_db_vip(context, vip['id'])
-
-    def create_pool(self, context, pool):
-        """Create a pool on a NetScaler device."""
-        network_info = self._get_pool_network_info(context, pool)
-        #allocate a snat port/ipaddress on the subnet if one doesn't exist
-        self._create_snatport_for_subnet_if_not_exists(context,
-                                                       pool['tenant_id'],
-                                                       pool['subnet_id'],
-                                                       network_info)
-        ncc_pool = self._prepare_pool_for_creation(pool)
-        ncc_pool = dict(ncc_pool.items() + network_info.items())
-        LOG.debug("NetScaler driver pool creation: %r", ncc_pool)
-        status = constants.ACTIVE
-        try:
-            self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
-                                        POOL_RESOURCE, ncc_pool)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Pool,
-                                  ncc_pool["id"], status)
-
-    def update_pool(self, context, old_pool, pool):
-        """Update a pool on a NetScaler device."""
-        ncc_pool = self._prepare_pool_for_update(pool)
-        resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
-        LOG.debug("NetScaler driver pool %(pool_id)s update: %(pool_obj)r",
-                  {"pool_id": old_pool["id"], "pool_obj": ncc_pool})
-        status = constants.ACTIVE
-        try:
-            self.client.update_resource(context.tenant_id, resource_path,
-                                        POOL_RESOURCE, ncc_pool)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Pool,
-                                  old_pool["id"], status)
-
-    def delete_pool(self, context, pool):
-        """Delete a pool on a NetScaler device."""
-        resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
-        LOG.debug("NetScaler driver pool removal: %s", pool["id"])
-        try:
-            self.client.remove_resource(context.tenant_id, resource_path)
-        except ncc_client.NCCException:
-            self.plugin.update_status(context, loadbalancer_db.Pool,
-                                      pool["id"],
-                                      constants.ERROR)
-        else:
-            self.plugin._delete_db_pool(context, pool['id'])
-            self._remove_snatport_for_subnet_if_not_used(context,
-                                                         pool['tenant_id'],
-                                                         pool['subnet_id'])
-
-    def create_member(self, context, member):
-        """Create a pool member on a NetScaler device."""
-        ncc_member = self._prepare_member_for_creation(member)
-        LOG.info(_LI("NetScaler driver poolmember creation: %r"),
-                 ncc_member)
-        status = constants.ACTIVE
-        try:
-            self.client.create_resource(context.tenant_id,
-                                        POOLMEMBERS_RESOURCE,
-                                        POOLMEMBER_RESOURCE,
-                                        ncc_member)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Member,
-                                  member["id"], status)
-
-    def update_member(self, context, old_member, member):
-        """Update a pool member on a NetScaler device."""
-        ncc_member = self._prepare_member_for_update(member)
-        resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
-        LOG.debug("NetScaler driver poolmember %(member_id)s update: "
-                  "%(member_obj)r",
-                  {"member_id": old_member["id"],
-                   "member_obj": ncc_member})
-        status = constants.ACTIVE
-        try:
-            self.client.update_resource(context.tenant_id, resource_path,
-                                        POOLMEMBER_RESOURCE, ncc_member)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_status(context, loadbalancer_db.Member,
-                                  old_member["id"], status)
-
-    def delete_member(self, context, member):
-        """Delete a pool member on a NetScaler device."""
-        resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
-        LOG.debug("NetScaler driver poolmember removal: %s", member["id"])
-        try:
-            self.client.remove_resource(context.tenant_id, resource_path)
-        except ncc_client.NCCException:
-            self.plugin.update_status(context, loadbalancer_db.Member,
-                                      member["id"],
-                                      constants.ERROR)
-        else:
-            self.plugin._delete_db_member(context, member['id'])
-
-    def create_pool_health_monitor(self, context, health_monitor, pool_id):
-        """Create a pool health monitor on a NetScaler device."""
-        ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor,
-                                                          pool_id)
-        resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
-                                      MONITORS_RESOURCE)
-        LOG.debug("NetScaler driver healthmonitor creation for pool "
-                  "%(pool_id)s: %(monitor_obj)r",
-                  {"pool_id": pool_id, "monitor_obj": ncc_hm})
-        status = constants.ACTIVE
-        try:
-            self.client.create_resource(context.tenant_id, resource_path,
-                                        MONITOR_RESOURCE,
-                                        ncc_hm)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_pool_health_monitor(context,
-                                               health_monitor['id'],
-                                               pool_id,
-                                               status, "")
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor, pool_id):
-        """Update a pool health monitor on a NetScaler device."""
-        ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
-        resource_path = "%s/%s" % (MONITORS_RESOURCE,
-                                   old_health_monitor["id"])
-        LOG.debug("NetScaler driver healthmonitor %(monitor_id)s update: "
-                  "%(monitor_obj)r",
-                  {"monitor_id": old_health_monitor["id"],
-                   "monitor_obj": ncc_hm})
-        status = constants.ACTIVE
-        try:
-            self.client.update_resource(context.tenant_id, resource_path,
-                                        MONITOR_RESOURCE, ncc_hm)
-        except ncc_client.NCCException:
-            status = constants.ERROR
-        self.plugin.update_pool_health_monitor(context,
-                                               old_health_monitor['id'],
-                                               pool_id,
-                                               status, "")
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
-        """Delete a pool health monitor on a NetScaler device."""
-        resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
-                                         MONITORS_RESOURCE,
-                                         health_monitor["id"])
-        LOG.debug("NetScaler driver healthmonitor %(monitor_id)s"
-                  "removal for pool %(pool_id)s",
-                  {"monitor_id": health_monitor["id"],
-                   "pool_id": pool_id})
-        try:
-            self.client.remove_resource(context.tenant_id, resource_path)
-        except ncc_client.NCCException:
-            self.plugin.update_pool_health_monitor(context,
-                                                   health_monitor['id'],
-                                                   pool_id,
-                                                   constants.ERROR, "")
-        else:
-            self.plugin._delete_db_pool_health_monitor(context,
-                                                       health_monitor['id'],
-                                                       pool_id)
-
-    def stats(self, context, pool_id):
-        """Retrieve pool statistics from the NetScaler device."""
-        resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
-        LOG.debug("NetScaler driver pool stats retrieval: %s", pool_id)
-        try:
-            stats = self.client.retrieve_resource(context.tenant_id,
-                                                  resource_path)[1]
-        except ncc_client.NCCException:
-            self.plugin.update_status(context, loadbalancer_db.Pool,
-                                      pool_id, constants.ERROR)
-        else:
-            return stats
-
-    def _prepare_vip_for_creation(self, vip):
-        creation_attrs = {
-            'id': vip['id'],
-            'tenant_id': vip['tenant_id'],
-            'protocol': vip['protocol'],
-            'address': vip['address'],
-            'protocol_port': vip['protocol_port'],
-        }
-        if 'session_persistence' in vip:
-            creation_attrs['session_persistence'] = vip['session_persistence']
-        update_attrs = self._prepare_vip_for_update(vip)
-        creation_attrs.update(update_attrs)
-        return creation_attrs
-
-    def _prepare_vip_for_update(self, vip):
-        return {
-            'name': vip['name'],
-            'description': vip['description'],
-            'pool_id': vip['pool_id'],
-            'connection_limit': vip['connection_limit'],
-            'admin_state_up': vip['admin_state_up']
-        }
-
-    def _prepare_pool_for_creation(self, pool):
-        creation_attrs = {
-            'id': pool['id'],
-            'tenant_id': pool['tenant_id'],
-            'vip_id': pool['vip_id'],
-            'protocol': pool['protocol'],
-            'subnet_id': pool['subnet_id'],
-        }
-        update_attrs = self._prepare_pool_for_update(pool)
-        creation_attrs.update(update_attrs)
-        return creation_attrs
-
-    def _prepare_pool_for_update(self, pool):
-        return {
-            'name': pool['name'],
-            'description': pool['description'],
-            'lb_method': pool['lb_method'],
-            'admin_state_up': pool['admin_state_up']
-        }
-
-    def _prepare_member_for_creation(self, member):
-        creation_attrs = {
-            'id': member['id'],
-            'tenant_id': member['tenant_id'],
-            'address': member['address'],
-            'protocol_port': member['protocol_port'],
-        }
-        update_attrs = self._prepare_member_for_update(member)
-        creation_attrs.update(update_attrs)
-        return creation_attrs
-
-    def _prepare_member_for_update(self, member):
-        return {
-            'pool_id': member['pool_id'],
-            'weight': member['weight'],
-            'admin_state_up': member['admin_state_up']
-        }
-
-    def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id):
-        creation_attrs = {
-            'id': health_monitor['id'],
-            'tenant_id': health_monitor['tenant_id'],
-            'type': health_monitor['type'],
-        }
-        update_attrs = self._prepare_healthmonitor_for_update(health_monitor)
-        creation_attrs.update(update_attrs)
-        return creation_attrs
-
-    def _prepare_healthmonitor_for_update(self, health_monitor):
-        ncc_hm = {
-            'delay': health_monitor['delay'],
-            'timeout': health_monitor['timeout'],
-            'max_retries': health_monitor['max_retries'],
-            'admin_state_up': health_monitor['admin_state_up']
-        }
-        if health_monitor['type'] in ['HTTP', 'HTTPS']:
-            ncc_hm['http_method'] = health_monitor['http_method']
-            ncc_hm['url_path'] = health_monitor['url_path']
-            ncc_hm['expected_codes'] = health_monitor['expected_codes']
-        return ncc_hm
-
-    def _get_network_info(self, context, entity):
-        network_info = {}
-        subnet_id = entity['subnet_id']
-        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
-        network_id = subnet['network_id']
-        network = self.plugin._core_plugin.get_network(context, network_id)
-        network_info['network_id'] = network_id
-        network_info['subnet_id'] = subnet_id
-        if PROV_NET_TYPE in network:
-            network_info['network_type'] = network[PROV_NET_TYPE]
-        if PROV_SEGMT_ID in network:
-            network_info['segmentation_id'] = network[PROV_SEGMT_ID]
-        return network_info
-
-    def _get_vip_network_info(self, context, vip):
-        network_info = self._get_network_info(context, vip)
-        network_info['port_id'] = vip['port_id']
-        return network_info
-
-    def _get_pool_network_info(self, context, pool):
-        return self._get_network_info(context, pool)
-
-    def _get_pools_on_subnet(self, context, tenant_id, subnet_id):
-        filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]}
-        return self.plugin.get_pools(context, filters=filter_dict)
-
-    def _get_snatport_for_subnet(self, context, tenant_id, subnet_id):
-        device_id = '_lb-snatport-' + subnet_id
-        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
-        network_id = subnet['network_id']
-        LOG.debug("Filtering ports based on network_id=%(network_id)s, "
-                  "tenant_id=%(tenant_id)s, device_id=%(device_id)s",
-                  {'network_id': network_id,
-                   'tenant_id': tenant_id,
-                   'device_id': device_id})
-        filter_dict = {
-            'network_id': [network_id],
-            'tenant_id': [tenant_id],
-            'device_id': [device_id],
-            'device-owner': [DRIVER_NAME]
-        }
-        ports = self.plugin._core_plugin.get_ports(context,
-                                                   filters=filter_dict)
-        if ports:
-            LOG.info(_LI("Found an existing SNAT port for subnet %s"),
-                     subnet_id)
-            return ports[0]
-        LOG.info(_LI("Found no SNAT ports for subnet %s"), subnet_id)
-
-    def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
-                                    ip_address):
-        subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
-        fixed_ip = {'subnet_id': subnet['id']}
-        if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
-            fixed_ip['ip_address'] = ip_address
-        port_data = {
-            'tenant_id': tenant_id,
-            'name': '_lb-snatport-' + subnet_id,
-            'network_id': subnet['network_id'],
-            'mac_address': attributes.ATTR_NOT_SPECIFIED,
-            'admin_state_up': False,
-            'device_id': '_lb-snatport-' + subnet_id,
-            'device_owner': DRIVER_NAME,
-            'fixed_ips': [fixed_ip],
-        }
-        port = self.plugin._core_plugin.create_port(context,
-                                                    {'port': port_data})
-        LOG.info(_LI("Created SNAT port: %r"), port)
-        return port
-
-    def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
-        port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
-        if port:
-            self.plugin._core_plugin.delete_port(context, port['id'])
-            LOG.info(_LI("Removed SNAT port: %r"), port)
-
-    def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
-                                                  subnet_id, network_info):
-        port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
-        if not port:
-            LOG.info(_LI("No SNAT port found for subnet %s. Creating one..."),
-                     subnet_id)
-            port = self._create_snatport_for_subnet(context, tenant_id,
-                                                    subnet_id,
-                                                    ip_address=None)
-        network_info['port_id'] = port['id']
-        network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
-        LOG.info(_LI("SNAT port: %r"), port)
-
-    def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
-                                                subnet_id):
-        pools = self._get_pools_on_subnet(context, tenant_id, subnet_id)
-        if not pools:
-            #No pools left on the old subnet.
-            #We can remove the SNAT port/ipaddress
-            self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
-            LOG.info(_LI("Removing SNAT port for subnet %s "
-                         "as this is the last pool using it..."),
-                     subnet_id)
diff --git a/neutron/services/loadbalancer/drivers/radware/__init__.py b/neutron/services/loadbalancer/drivers/radware/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/loadbalancer/drivers/radware/driver.py b/neutron/services/loadbalancer/drivers/radware/driver.py
deleted file mode 100644 (file)
index 269b67a..0000000
+++ /dev/null
@@ -1,1110 +0,0 @@
-# Copyright 2013 Radware LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import base64
-import copy
-import httplib
-import netaddr
-import threading
-import time
-
-
-import eventlet
-eventlet.monkey_patch(thread=True)
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import excutils
-from six.moves import queue as Queue
-
-from neutron.api.v2 import attributes
-from neutron.common import log as call_log
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db as lb_db
-from neutron.extensions import loadbalancer
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers import abstract_driver
-from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
-
-LOG = logging.getLogger(__name__)
-
-RESP_STATUS = 0
-RESP_REASON = 1
-RESP_STR = 2
-RESP_DATA = 3
-
-TEMPLATE_HEADER = {'Content-Type':
-                   'application/vnd.com.radware.vdirect.'
-                   'template-parameters+json'}
-PROVISION_HEADER = {'Content-Type':
-                    'application/vnd.com.radware.'
-                    'vdirect.status+json'}
-CREATE_SERVICE_HEADER = {'Content-Type':
-                         'application/vnd.com.radware.'
-                         'vdirect.adc-service-specification+json'}
-
-driver_opts = [
-    cfg.StrOpt('vdirect_address',
-               help=_('IP address of vDirect server.')),
-    cfg.StrOpt('ha_secondary_address',
-               help=_('IP address of secondary vDirect server.')),
-    cfg.StrOpt('vdirect_user',
-               default='vDirect',
-               help=_('vDirect user name.')),
-    cfg.StrOpt('vdirect_password',
-               default='radware',
-               help=_('vDirect user password.')),
-    cfg.StrOpt('service_adc_type',
-               default="VA",
-               help=_('Service ADC type. Default: VA.')),
-    cfg.StrOpt('service_adc_version',
-               default="",
-               help=_('Service ADC version.')),
-    cfg.BoolOpt('service_ha_pair',
-                default=False,
-                help=_('Enables or disables the Service HA pair. '
-                       'Default: False.')),
-    cfg.IntOpt('service_throughput',
-               default=1000,
-               help=_('Service throughput. Default: 1000.')),
-    cfg.IntOpt('service_ssl_throughput',
-               default=100,
-               help=_('Service SSL throughput. Default: 100.')),
-    cfg.IntOpt('service_compression_throughput',
-               default=100,
-               help=_('Service compression throughput. Default: 100.')),
-    cfg.IntOpt('service_cache',
-               default=20,
-               help=_('Size of service cache. Default: 20.')),
-    cfg.StrOpt('l2_l3_workflow_name',
-               default='openstack_l2_l3',
-               help=_('Name of l2_l3 workflow. Default: '
-                      'openstack_l2_l3.')),
-    cfg.StrOpt('l4_workflow_name',
-               default='openstack_l4',
-               help=_('Name of l4 workflow. Default: openstack_l4.')),
-    cfg.DictOpt('l2_l3_ctor_params',
-                default={"service": "_REPLACE_",
-                         "ha_network_name": "HA-Network",
-                         "ha_ip_pool_name": "default",
-                         "allocate_ha_vrrp": True,
-                         "allocate_ha_ips": True,
-                         "twoleg_enabled": "_REPLACE_"},
-                help=_('Parameter for l2_l3 workflow constructor.')),
-    cfg.DictOpt('l2_l3_setup_params',
-                default={"data_port": 1,
-                         "data_ip_address": "192.168.200.99",
-                         "data_ip_mask": "255.255.255.0",
-                         "gateway": "192.168.200.1",
-                         "ha_port": 2},
-                help=_('Parameter for l2_l3 workflow setup.')),
-    cfg.ListOpt('actions_to_skip',
-                default=['setup_l2_l3'],
-                help=_('List of actions that are not pushed to '
-                       'the completion queue.')),
-    cfg.StrOpt('l4_action_name',
-               default='BaseCreate',
-               help=_('Name of the l4 workflow action. '
-                      'Default: BaseCreate.')),
-    cfg.ListOpt('service_resource_pool_ids',
-                default=[],
-                help=_('Resource pool IDs.')),
-    cfg.IntOpt('service_isl_vlan',
-               default=-1,
-               help=_('A required VLAN for the interswitch link to use.')),
-    cfg.BoolOpt('service_session_mirroring_enabled',
-                default=False,
-                help=_('Enable or disable Alteon interswitch link for '
-                       'stateful session failover. Default: False.'))
-]
-
-cfg.CONF.register_opts(driver_opts, "radware")
-
-
-class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
-
-    """Radware lbaas driver."""
-
-    def __init__(self, plugin):
-        rad = cfg.CONF.radware
-        self.plugin = plugin
-        self.service = {
-            "haPair": rad.service_ha_pair,
-            "sessionMirroringEnabled": rad.service_session_mirroring_enabled,
-            "primary": {
-                "capacity": {
-                    "throughput": rad.service_throughput,
-                    "sslThroughput": rad.service_ssl_throughput,
-                    "compressionThroughput":
-                    rad.service_compression_throughput,
-                    "cache": rad.service_cache
-                },
-                "network": {
-                    "type": "portgroup",
-                    "portgroups": ['DATA_NETWORK']
-                },
-                "adcType": rad.service_adc_type,
-                "acceptableAdc": "Exact"
-            }
-        }
-        if rad.service_resource_pool_ids:
-            ids = rad.service_resource_pool_ids
-            self.service['resourcePoolIds'] = [
-                {'name': id} for id in ids
-            ]
-        if rad.service_isl_vlan:
-            self.service['islVlan'] = rad.service_isl_vlan
-        self.l2_l3_wf_name = rad.l2_l3_workflow_name
-        self.l4_wf_name = rad.l4_workflow_name
-        self.l2_l3_ctor_params = rad.l2_l3_ctor_params
-        self.l2_l3_setup_params = rad.l2_l3_setup_params
-        self.l4_action_name = rad.l4_action_name
-        self.actions_to_skip = rad.actions_to_skip
-        vdirect_address = rad.vdirect_address
-        sec_server = rad.ha_secondary_address
-        self.rest_client = vDirectRESTClient(server=vdirect_address,
-                                             secondary_server=sec_server,
-                                             user=rad.vdirect_user,
-                                             password=rad.vdirect_password)
-        self.queue = Queue.Queue()
-        self.completion_handler = OperationCompletionHandler(self.queue,
-                                                             self.rest_client,
-                                                             plugin)
-        self.workflow_templates_exists = False
-        self.completion_handler.setDaemon(True)
-        self.completion_handler_started = False
-
-    def _populate_vip_graph(self, context, vip):
-        ext_vip = self.plugin.populate_vip_graph(context, vip)
-        vip_network_id = self._get_vip_network_id(context, ext_vip)
-        pool_network_id = self._get_pool_network_id(context, ext_vip)
-
-        # if VIP and PIP are different, we need an IP address for the PIP
-        # so create port on PIP's network and use its IP address
-        if vip_network_id != pool_network_id:
-            pip_address = self._get_pip(
-                context,
-                vip['tenant_id'],
-                _make_pip_name_from_vip(vip),
-                pool_network_id,
-                ext_vip['pool']['subnet_id'])
-            ext_vip['pip_address'] = pip_address
-        else:
-            ext_vip['pip_address'] = vip['address']
-
-        ext_vip['vip_network_id'] = vip_network_id
-        ext_vip['pool_network_id'] = pool_network_id
-        return ext_vip
-
-    def create_vip(self, context, vip):
-        log_info = {'vip': vip,
-                    'extended_vip': 'NOT_ASSIGNED',
-                    'service_name': 'NOT_ASSIGNED'}
-        try:
-            ext_vip = self._populate_vip_graph(context, vip)
-
-            service_name = self._get_service(ext_vip)
-            log_info['extended_vip'] = ext_vip
-            log_info['service_name'] = service_name
-
-            self._create_workflow(
-                vip['pool_id'], self.l4_wf_name,
-                {"service": service_name})
-            self._update_workflow(
-                vip['pool_id'],
-                self.l4_action_name, ext_vip, context)
-
-        finally:
-            LOG.debug('vip: %(vip)s, extended_vip: %(extended_vip)s, '
-                      'service_name: %(service_name)s, ',
-                      log_info)
-
-    def update_vip(self, context, old_vip, vip):
-        ext_vip = self._populate_vip_graph(context, vip)
-        self._update_workflow(
-            vip['pool_id'], self.l4_action_name,
-            ext_vip, context, False, lb_db.Vip, vip['id'])
-
-    def delete_vip(self, context, vip):
-        """Delete a Vip
-
-        First delete it from the device. If deletion ended OK
-        - remove data from DB as well.
-        If the deletion failed - mark vip with error status in DB
-
-        """
-
-        ext_vip = self._populate_vip_graph(context, vip)
-        params = _translate_vip_object_graph(ext_vip,
-                                             self.plugin, context)
-        ids = params.pop('__ids__')
-
-        try:
-            # get neutron port id associated with the vip (present if vip and
-            # pip are different) and release it after workflow removed
-            port_filter = {
-                'name': [_make_pip_name_from_vip(vip)],
-            }
-            ports = self.plugin._core_plugin.get_ports(context,
-                                                       filters=port_filter)
-            if ports:
-                LOG.debug('Retrieved pip nport: %(port)r for vip: %(vip)s',
-                          {'port': ports[0], 'vip': vip['id']})
-
-                delete_pip_nport_function = self._get_delete_pip_nports(
-                    context, ports)
-            else:
-                delete_pip_nport_function = None
-                LOG.debug('Found no pip nports associated with vip: %s',
-                          vip['id'])
-
-            # removing the WF will cause deletion of the configuration from the
-            # device
-            self._remove_workflow(ids, context, delete_pip_nport_function)
-
-        except r_exc.RESTRequestFailure:
-            pool_id = ext_vip['pool_id']
-            LOG.exception(_LE('Failed to remove workflow %s. '
-                              'Going to set vip to ERROR status'),
-                          pool_id)
-
-            self.plugin.update_status(context, lb_db.Vip, ids['vip'],
-                                      constants.ERROR)
-
-    def _get_delete_pip_nports(self, context, ports):
-        def _delete_pip_nports(success):
-            if success:
-                for port in ports:
-                    try:
-                        self.plugin._core_plugin.delete_port(
-                            context, port['id'])
-                        LOG.debug('pip nport id: %s', port['id'])
-                    except Exception as exception:
-                        # stop exception propagation, nport may have
-                        # been deleted by other means
-                        LOG.warning(_LW('pip nport delete failed: %r'),
-                                    exception)
-        return _delete_pip_nports
-
-    def create_pool(self, context, pool):
-        # nothing to do
-        pass
-
-    def update_pool(self, context, old_pool, pool):
-        self._handle_pool(context, pool)
-
-    def delete_pool(self, context, pool,):
-        self._handle_pool(context, pool, delete=True)
-
-    def _handle_pool(self, context, pool, delete=False):
-        vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None)
-        if vip_id:
-            if delete:
-                raise loadbalancer.PoolInUse(pool_id=pool['id'])
-            else:
-                vip = self.plugin.get_vip(context, vip_id)
-                ext_vip = self._populate_vip_graph(context, vip)
-                self._update_workflow(
-                    pool['id'], self.l4_action_name,
-                    ext_vip, context, delete, lb_db.Pool, pool['id'])
-        else:
-            if delete:
-                self.plugin._delete_db_pool(context, pool['id'])
-            else:
-                # we keep the pool in PENDING_UPDATE
-                # no point to modify it since it is not connected to vip yet
-                pass
-
-    def create_member(self, context, member):
-        self._handle_member(context, member)
-
-    def update_member(self, context, old_member, member):
-        self._handle_member(context, member)
-
-    def delete_member(self, context, member):
-        self._handle_member(context, member, delete=True)
-
-    def _handle_member(self, context, member, delete=False):
-        """Navigate the model. If a Vip is found - activate a bulk WF action.
-        """
-        vip_id = self.plugin.get_pool(
-            context, member['pool_id']).get('vip_id')
-        if vip_id:
-            vip = self.plugin.get_vip(context, vip_id)
-            ext_vip = self._populate_vip_graph(context, vip)
-            self._update_workflow(
-                member['pool_id'], self.l4_action_name,
-                ext_vip, context,
-                delete, lb_db.Member, member['id'])
-        # We have to delete this member but it is not connected to a vip yet
-        elif delete:
-            self.plugin._delete_db_member(context, member['id'])
-
-    def create_health_monitor(self, context, health_monitor):
-        # Anything to do here? the hm is not connected to the graph yet
-        pass
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor,
-                                   pool_id):
-        self._handle_pool_health_monitor(context, health_monitor, pool_id)
-
-    def create_pool_health_monitor(self, context,
-                                   health_monitor, pool_id):
-        self._handle_pool_health_monitor(context, health_monitor, pool_id)
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
-        self._handle_pool_health_monitor(context, health_monitor, pool_id,
-                                         True)
-
-    def _handle_pool_health_monitor(self, context, health_monitor,
-                                    pool_id, delete=False):
-        """Push a graph to vDirect
-
-        Navigate the model. Check if a pool is associated to the vip
-        and push the graph to vDirect
-
-        """
-
-        vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None)
-
-        debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
-                        "delete": delete, "vip_id": vip_id}
-        LOG.debug('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
-                  'pool_id = %(pool_id)s delete = %(delete)s '
-                  'vip_id = %(vip_id)s',
-                  debug_params)
-
-        if vip_id:
-            vip = self.plugin.get_vip(context, vip_id)
-            ext_vip = self._populate_vip_graph(context, vip)
-            self._update_workflow(pool_id, self.l4_action_name,
-                                  ext_vip, context,
-                                  delete, lb_db.PoolMonitorAssociation,
-                                  health_monitor['id'])
-        elif delete:
-            self.plugin._delete_db_pool_health_monitor(context,
-                                                       health_monitor['id'],
-                                                       pool_id)
-
-    def stats(self, context, pool_id):
-        # TODO(avishayb) implement
-        return {"bytes_in": 0,
-                "bytes_out": 0,
-                "active_connections": 0,
-                "total_connections": 0}
-
-    def _get_vip_network_id(self, context, extended_vip):
-        subnet = self.plugin._core_plugin.get_subnet(
-            context, extended_vip['subnet_id'])
-        return subnet['network_id']
-
-    def _start_completion_handling_thread(self):
-        if not self.completion_handler_started:
-            LOG.info(_LI('Starting operation completion handling thread'))
-            self.completion_handler.start()
-            self.completion_handler_started = True
-
-    def _get_pool_network_id(self, context, extended_vip):
-        subnet = self.plugin._core_plugin.get_subnet(
-            context, extended_vip['pool']['subnet_id'])
-        return subnet['network_id']
-
-    @call_log.log
-    def _update_workflow(self, wf_name, action,
-                         wf_params, context,
-                         delete=False,
-                         lbaas_entity=None, entity_id=None):
-        """Update the WF state. Push the result to a queue for processing."""
-
-        if not self.workflow_templates_exists:
-            self._verify_workflow_templates()
-
-        if action not in self.actions_to_skip:
-            params = _translate_vip_object_graph(wf_params,
-                                                 self.plugin,
-                                                 context)
-        else:
-            params = wf_params
-
-        resource = '/api/workflow/%s/action/%s' % (wf_name, action)
-        response = _rest_wrapper(self.rest_client.call('POST', resource,
-                                 {'parameters': params},
-                                 TEMPLATE_HEADER))
-        LOG.debug('_update_workflow response: %s ', response)
-
-        if action not in self.actions_to_skip:
-            ids = params.pop('__ids__', None)
-            oper = OperationAttributes(response['uri'],
-                                       ids,
-                                       lbaas_entity,
-                                       entity_id,
-                                       delete=delete)
-            LOG.debug('Pushing operation %s to the queue', oper)
-
-            self._start_completion_handling_thread()
-            self.queue.put_nowait(oper)
-
-    def _remove_workflow(self, ids, context, post_remove_function):
-
-        wf_name = ids['pool']
-        LOG.debug('Remove the workflow %s' % wf_name)
-        resource = '/api/workflow/%s' % (wf_name)
-        rest_return = self.rest_client.call('DELETE', resource, None, None)
-        response = _rest_wrapper(rest_return, [204, 202, 404])
-        if rest_return[RESP_STATUS] == 404:
-            if post_remove_function:
-                try:
-                    post_remove_function(True)
-                    LOG.debug('Post-remove workflow function %r completed',
-                              post_remove_function)
-                except Exception:
-                    with excutils.save_and_reraise_exception():
-                        LOG.exception(_LE('Post-remove workflow function '
-                                          '%r failed'), post_remove_function)
-            self.plugin._delete_db_vip(context, ids['vip'])
-        else:
-            oper = OperationAttributes(
-                response['uri'],
-                ids,
-                lb_db.Vip,
-                ids['vip'],
-                delete=True,
-                post_op_function=post_remove_function)
-            LOG.debug('Pushing operation %s to the queue', oper)
-
-            self._start_completion_handling_thread()
-            self.queue.put_nowait(oper)
-
-    def _remove_service(self, service_name):
-        resource = '/api/service/%s' % (service_name)
-        _rest_wrapper(self.rest_client.call('DELETE',
-                      resource, None, None),
-                      [202])
-
-    def _get_service(self, ext_vip):
-        """Get a service name.
-
-        if you can't find one,
-        create a service and create l2_l3 WF.
-
-        """
-        if not self.workflow_templates_exists:
-            self._verify_workflow_templates()
-        if ext_vip['vip_network_id'] != ext_vip['pool_network_id']:
-            networks_name = '%s_%s' % (ext_vip['vip_network_id'],
-                                       ext_vip['pool_network_id'])
-            self.l2_l3_ctor_params["twoleg_enabled"] = True
-        else:
-            networks_name = ext_vip['vip_network_id']
-            self.l2_l3_ctor_params["twoleg_enabled"] = False
-        incoming_service_name = 'srv_%s' % (networks_name,)
-        service_name = self._get_available_service(incoming_service_name)
-        if not service_name:
-            LOG.debug(
-                'Could not find a service named ' + incoming_service_name)
-            service_name = self._create_service(ext_vip['vip_network_id'],
-                                                ext_vip['pool_network_id'],
-                                                ext_vip['tenant_id'])
-            self.l2_l3_ctor_params["service"] = incoming_service_name
-            wf_name = 'l2_l3_' + networks_name
-            self._create_workflow(
-                wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params)
-            self._update_workflow(
-                wf_name, "setup_l2_l3", self.l2_l3_setup_params, None)
-        else:
-            LOG.debug('A service named ' + service_name + ' was found.')
-        return service_name
-
-    def _create_service(self, vip_network_id, pool_network_id, tenant_id):
-        """create the service and provision it (async)."""
-        # 1) create the service
-        service = copy.deepcopy(self.service)
-        if vip_network_id != pool_network_id:
-            service_name = 'srv_%s_%s' % (vip_network_id, pool_network_id)
-            service['primary']['network']['portgroups'] = [vip_network_id,
-                                                           pool_network_id]
-        else:
-            service_name = 'srv_' + vip_network_id
-            service['primary']['network']['portgroups'] = [vip_network_id]
-        resource = '/api/service?name=%s&tenant=%s' % (service_name, tenant_id)
-
-        response = _rest_wrapper(self.rest_client.call('POST', resource,
-                                 service,
-                                 CREATE_SERVICE_HEADER), [201])
-
-        # 2) provision the service
-        provision_uri = response['links']['actions']['provision']
-        _rest_wrapper(self.rest_client.call('POST', provision_uri,
-                                            None, PROVISION_HEADER))
-        return service_name
-
-    def _get_available_service(self, service_name):
-        """Check if service exists and return its name if it does."""
-        resource = '/api/service/' + service_name
-        try:
-            _rest_wrapper(self.rest_client.call('GET',
-                                                resource,
-                                                None, None), [200])
-        except Exception:
-            return
-        return service_name
-
-    def _workflow_exists(self, pool_id):
-        """Check if a WF having the name of the pool_id exists."""
-        resource = '/api/workflow/' + pool_id
-        try:
-            _rest_wrapper(self.rest_client.call('GET',
-                                                resource,
-                                                None,
-                                                None), [200])
-        except Exception:
-            return False
-        return True
-
-    def _create_workflow(self, wf_name, wf_template_name,
-                         create_workflow_params=None):
-        """Create a WF if it doesn't exists yet."""
-        if not self.workflow_templates_exists:
-                self._verify_workflow_templates()
-        if not self._workflow_exists(wf_name):
-            if not create_workflow_params:
-                create_workflow_params = {}
-            resource = '/api/workflowTemplate/%s?name=%s' % (
-                wf_template_name, wf_name)
-            params = {'parameters': create_workflow_params}
-            response = _rest_wrapper(self.rest_client.call('POST',
-                                                           resource,
-                                                           params,
-                                                           TEMPLATE_HEADER))
-            LOG.debug('create_workflow response: %s', response)
-
-    def _verify_workflow_templates(self):
-        """Verify the existence of workflows on vDirect server."""
-        workflows = {self.l2_l3_wf_name:
-                     False, self.l4_wf_name: False}
-        resource = '/api/workflowTemplate'
-        response = _rest_wrapper(self.rest_client.call('GET',
-                                                       resource,
-                                                       None,
-                                                       None), [200])
-        for wf in workflows.keys():
-            for wf_template in response:
-                if wf == wf_template['name']:
-                    workflows[wf] = True
-                    break
-        for wf, found in workflows.items():
-            if not found:
-                raise r_exc.WorkflowMissing(workflow=wf)
-        self.workflow_templates_exists = True
-
-    def _get_pip(self, context, tenant_id, port_name,
-                 network_id, subnet_id):
-        """Get proxy IP
-
-        Creates or get port on network_id, returns that port's IP
-        on the subnet_id.
-        """
-
-        port_filter = {
-            'name': [port_name],
-        }
-        ports = self.plugin._core_plugin.get_ports(context,
-                                                   filters=port_filter)
-        if not ports:
-            # create port, we just want any IP allocated to the port
-            # based on the network id and subnet_id
-            port_data = {
-                'tenant_id': tenant_id,
-                'name': port_name,
-                'network_id': network_id,
-                'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                'admin_state_up': False,
-                'device_id': '',
-                'device_owner': 'neutron:' + constants.LOADBALANCER,
-                'fixed_ips': [{'subnet_id': subnet_id}]
-            }
-            port = self.plugin._core_plugin.create_port(context,
-                                                        {'port': port_data})
-        else:
-            port = ports[0]
-        ips_on_subnet = [ip for ip in port['fixed_ips']
-                         if ip['subnet_id'] == subnet_id]
-        if not ips_on_subnet:
-            raise Exception(_('Could not find or allocate '
-                              'IP address for subnet id %s'),
-                            subnet_id)
-        else:
-            return ips_on_subnet[0]['ip_address']
-
-
-class vDirectRESTClient:
-    """REST server proxy to Radware vDirect."""
-
-    def __init__(self,
-                 server='localhost',
-                 secondary_server=None,
-                 user=None,
-                 password=None,
-                 port=2189,
-                 ssl=True,
-                 timeout=5000,
-                 base_uri=''):
-        self.server = server
-        self.secondary_server = secondary_server
-        self.port = port
-        self.ssl = ssl
-        self.base_uri = base_uri
-        self.timeout = timeout
-        if user and password:
-            self.auth = base64.encodestring('%s:%s' % (user, password))
-            self.auth = self.auth.replace('\n', '')
-        else:
-            raise r_exc.AuthenticationMissing()
-
-        debug_params = {'server': self.server,
-                        'sec_server': self.secondary_server,
-                        'port': self.port,
-                        'ssl': self.ssl}
-        LOG.debug('vDirectRESTClient:init server=%(server)s, '
-                  'secondary server=%(sec_server)s, '
-                  'port=%(port)d, ssl=%(ssl)r',
-                  debug_params)
-
-    def _flip_servers(self):
-        LOG.warning(_LW('Fliping servers. Current is: %(server)s, '
-                        'switching to %(secondary)s'),
-                    {'server': self.server,
-                     'secondary': self.secondary_server})
-        self.server, self.secondary_server = self.secondary_server, self.server
-
-    def _recover(self, action, resource, data, headers, binary=False):
-        if self.server and self.secondary_server:
-            self._flip_servers()
-            resp = self._call(action, resource, data,
-                              headers, binary)
-            return resp
-        else:
-            LOG.exception(_LE('REST client is not able to recover '
-                              'since only one vDirect server is '
-                            'configured.'))
-            return -1, None, None, None
-
-    def call(self, action, resource, data, headers, binary=False):
-        resp = self._call(action, resource, data, headers, binary)
-        if resp[RESP_STATUS] == -1:
-            LOG.warning(_LW('vDirect server is not responding (%s).'),
-                        self.server)
-            return self._recover(action, resource, data, headers, binary)
-        elif resp[RESP_STATUS] in (301, 307):
-            LOG.warning(_LW('vDirect server is not active (%s).'),
-                        self.server)
-            return self._recover(action, resource, data, headers, binary)
-        else:
-            return resp
-
-    @call_log.log
-    def _call(self, action, resource, data, headers, binary=False):
-        if resource.startswith('http'):
-            uri = resource
-        else:
-            uri = self.base_uri + resource
-        if binary:
-            body = data
-        else:
-            body = jsonutils.dumps(data)
-
-        debug_data = 'binary' if binary else body
-        debug_data = debug_data if debug_data else 'EMPTY'
-        if not headers:
-            headers = {'Authorization': 'Basic %s' % self.auth}
-        else:
-            headers['Authorization'] = 'Basic %s' % self.auth
-        conn = None
-        if self.ssl:
-            conn = httplib.HTTPSConnection(
-                self.server, self.port, timeout=self.timeout)
-            if conn is None:
-                LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS '
-                          'connection'))
-                return 0, None, None, None
-        else:
-            conn = httplib.HTTPConnection(
-                self.server, self.port, timeout=self.timeout)
-            if conn is None:
-                LOG.error(_LE('vdirectRESTClient: Could not establish HTTP '
-                          'connection'))
-                return 0, None, None, None
-
-        try:
-            conn.request(action, uri, body, headers)
-            response = conn.getresponse()
-            respstr = response.read()
-            respdata = respstr
-            try:
-                respdata = jsonutils.loads(respstr)
-            except ValueError:
-                # response was not JSON, ignore the exception
-                pass
-            ret = (response.status, response.reason, respstr, respdata)
-        except Exception as e:
-            log_dict = {'action': action, 'e': e}
-            LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
-                      log_dict)
-            ret = -1, None, None, None
-        conn.close()
-        return ret
-
-
-class OperationAttributes:
-
-    """Holds operation attributes.
-
-    The parameter 'post_op_function' (if supplied) is a function that takes
-    one boolean argument, specifying the success of the operation
-
-    """
-
-    def __init__(self,
-                 operation_url,
-                 object_graph,
-                 lbaas_entity=None,
-                 entity_id=None,
-                 delete=False,
-                 post_op_function=None):
-        self.operation_url = operation_url
-        self.object_graph = object_graph
-        self.delete = delete
-        self.lbaas_entity = lbaas_entity
-        self.entity_id = entity_id
-        self.creation_time = time.time()
-        self.post_op_function = post_op_function
-
-    def __repr__(self):
-        items = ("%s = %r" % (k, v) for k, v in self.__dict__.items())
-        return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
-
-
-class OperationCompletionHandler(threading.Thread):
-
-    """Update DB with operation status or delete the entity from DB."""
-
-    def __init__(self, queue, rest_client, plugin):
-        threading.Thread.__init__(self)
-        self.queue = queue
-        self.rest_client = rest_client
-        self.plugin = plugin
-        self.stoprequest = threading.Event()
-        self.opers_to_handle_before_rest = 0
-
-    def join(self, timeout=None):
-        self.stoprequest.set()
-        super(OperationCompletionHandler, self).join(timeout)
-
-    def handle_operation_completion(self, oper):
-        result = self.rest_client.call('GET',
-                                       oper.operation_url,
-                                       None,
-                                       None)
-        completed = result[RESP_DATA]['complete']
-        reason = result[RESP_REASON],
-        description = result[RESP_STR]
-        if completed:
-            # operation is done - update the DB with the status
-            # or delete the entire graph from DB
-            success = result[RESP_DATA]['success']
-            sec_to_completion = time.time() - oper.creation_time
-            debug_data = {'oper': oper,
-                          'sec_to_completion': sec_to_completion,
-                          'success': success}
-            LOG.debug('Operation %(oper)s is completed after '
-                      '%(sec_to_completion)d sec '
-                      'with success status: %(success)s :',
-                      debug_data)
-            db_status = None
-            if not success:
-                # failure - log it and set the return ERROR as DB state
-                if reason or description:
-                    msg = 'Reason:%s. Description:%s' % (reason, description)
-                else:
-                    msg = "unknown"
-                error_params = {"operation": oper, "msg": msg}
-                LOG.error(_LE('Operation %(operation)s failed. Reason: '
-                              '%(msg)s'),
-                          error_params)
-                db_status = constants.ERROR
-            else:
-                if oper.delete:
-                    _remove_object_from_db(self.plugin, oper)
-                else:
-                    db_status = constants.ACTIVE
-
-            if db_status:
-                _update_vip_graph_status(self.plugin, oper, db_status)
-
-            OperationCompletionHandler._run_post_op_function(success, oper)
-
-        return completed
-
-    def run(self):
-        while not self.stoprequest.isSet():
-            try:
-                oper = self.queue.get(timeout=1)
-
-                # Get the current queue size (N) and set the counter with it.
-                # Handle N operations with no intermission.
-                # Once N operations handles, get the size again and repeat.
-                if self.opers_to_handle_before_rest <= 0:
-                    self.opers_to_handle_before_rest = self.queue.qsize() + 1
-
-                LOG.debug('Operation consumed from the queue: %s', oper)
-                # check the status - if oper is done: update the db ,
-                # else push the oper again to the queue
-                if not self.handle_operation_completion(oper):
-                    LOG.debug('Operation %s is not completed yet..', oper)
-                    # Not completed - push to the queue again
-                    self.queue.put_nowait(oper)
-
-                self.queue.task_done()
-                self.opers_to_handle_before_rest -= 1
-
-                # Take one second rest before start handling
-                # new operations or operations handled before
-                if self.opers_to_handle_before_rest <= 0:
-                    time.sleep(1)
-
-            except Queue.Empty:
-                continue
-            except Exception:
-                m = _("Exception was thrown inside OperationCompletionHandler")
-                LOG.exception(m)
-
-    @staticmethod
-    def _run_post_op_function(success, oper):
-        if oper.post_op_function:
-            log_data = {'func': oper.post_op_function, 'oper': oper}
-            try:
-                oper.post_op_function(success)
-                LOG.debug('Post-operation function %(func)r completed '
-                          'after operation %(oper)r',
-                          log_data)
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    LOG.exception(_LE('Post-operation function %(func)r '
-                                      'failed after operation %(oper)r'),
-                                  log_data)
-
-
-def _rest_wrapper(response, success_codes=[202]):
-    """Wrap a REST call and make sure a valid status is returned."""
-    if not response:
-        raise r_exc.RESTRequestFailure(
-            status=-1,
-            reason="Unknown",
-            description="Unknown",
-            success_codes=success_codes
-        )
-    elif response[RESP_STATUS] not in success_codes:
-        raise r_exc.RESTRequestFailure(
-            status=response[RESP_STATUS],
-            reason=response[RESP_REASON],
-            description=response[RESP_STR],
-            success_codes=success_codes
-        )
-    else:
-        return response[RESP_DATA]
-
-
-def _make_pip_name_from_vip(vip):
-    """Standard way of making PIP name based on VIP ID."""
-    return 'pip_' + vip['id']
-
-
-def _update_vip_graph_status(plugin, oper, status):
-    """Update the status
-
-    Of all the Vip object graph
-    or a specific entity in the graph.
-
-    """
-
-    ctx = context.get_admin_context(load_admin_roles=False)
-
-    LOG.debug('_update: %s ', oper)
-    if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
-        plugin.update_pool_health_monitor(ctx,
-                                          oper.entity_id,
-                                          oper.object_graph['pool'],
-                                          status)
-    elif oper.entity_id:
-        plugin.update_status(ctx,
-                             oper.lbaas_entity,
-                             oper.entity_id,
-                             status)
-    else:
-        _update_vip_graph_status_cascade(plugin,
-                                         oper.object_graph,
-                                         ctx, status)
-
-
-def _update_vip_graph_status_cascade(plugin, ids, ctx, status):
-    plugin.update_status(ctx,
-                         lb_db.Vip,
-                         ids['vip'],
-                         status)
-    plugin.update_status(ctx,
-                         lb_db.Pool,
-                         ids['pool'],
-                         status)
-    for member_id in ids['members']:
-        plugin.update_status(ctx,
-                             lb_db.Member,
-                             member_id,
-                             status)
-    for hm_id in ids['health_monitors']:
-        plugin.update_pool_health_monitor(ctx,
-                                          hm_id,
-                                          ids['pool'],
-                                          status)
-
-
-def _remove_object_from_db(plugin, oper):
-    """Remove a specific entity from db."""
-    LOG.debug('_remove_object_from_db %s', oper)
-
-    ctx = context.get_admin_context(load_admin_roles=False)
-
-    if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
-        plugin._delete_db_pool_health_monitor(ctx,
-                                              oper.entity_id,
-                                              oper.object_graph['pool'])
-    elif oper.lbaas_entity == lb_db.Member:
-        plugin._delete_db_member(ctx, oper.entity_id)
-    elif oper.lbaas_entity == lb_db.Vip:
-        plugin._delete_db_vip(ctx, oper.entity_id)
-    elif oper.lbaas_entity == lb_db.Pool:
-        plugin._delete_db_pool(ctx, oper.entity_id)
-    else:
-        raise r_exc.UnsupportedEntityOperation(
-            operation='Remove from DB', entity=oper.lbaas_entity
-        )
-
-TRANSLATION_DEFAULTS = {'session_persistence_type': 'none',
-                        'session_persistence_cookie_name': 'none',
-                        'url_path': '/',
-                        'http_method': 'GET',
-                        'expected_codes': '200',
-                        'subnet': '255.255.255.255',
-                        'mask': '255.255.255.255',
-                        'gw': '255.255.255.255',
-                        }
-VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit',
-                  'admin_state_up', 'session_persistence_type',
-                  'session_persistence_cookie_name']
-POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up']
-MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up',
-                     'subnet', 'mask', 'gw']
-HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
-                             'admin_state_up', 'url_path', 'http_method',
-                             'expected_codes', 'id']
-
-
-def _translate_vip_object_graph(extended_vip, plugin, context):
-    """Translate the extended vip
-
-    translate to a structure that can be
-    understood by the workflow.
-
-    """
-    def _create_key(prefix, property_name):
-        return prefix + '_' + property_name + '_array'
-
-    def _trans_prop_name(prop_name):
-        if prop_name == 'id':
-            return 'uuid'
-        else:
-            return prop_name
-
-    def get_ids(extended_vip):
-        ids = {}
-        ids['vip'] = extended_vip['id']
-        ids['pool'] = extended_vip['pool']['id']
-        ids['members'] = [m['id'] for m in extended_vip['members']]
-        ids['health_monitors'] = [
-            hm['id'] for hm in extended_vip['health_monitors']
-        ]
-        return ids
-
-    trans_vip = {}
-    LOG.debug('Vip graph to be translated: %s', extended_vip)
-    for vip_property in VIP_PROPERTIES:
-        trans_vip['vip_' + vip_property] = extended_vip.get(
-            vip_property, TRANSLATION_DEFAULTS.get(vip_property))
-    for pool_property in POOL_PROPERTIES:
-        trans_vip['pool_' + pool_property] = extended_vip[
-            'pool'][pool_property]
-    for member_property in MEMBER_PROPERTIES:
-        trans_vip[_create_key('member', member_property)] = []
-
-    two_leg = (extended_vip['pip_address'] != extended_vip['address'])
-    if two_leg:
-        pool_subnet = plugin._core_plugin.get_subnet(
-            context, extended_vip['pool']['subnet_id'])
-
-    for member in extended_vip['members']:
-        if member['status'] != constants.PENDING_DELETE:
-            if (two_leg and netaddr.IPAddress(member['address'])
-                not in netaddr.IPNetwork(pool_subnet['cidr'])):
-                member_ports = plugin._core_plugin.get_ports(
-                    context,
-                    filters={'fixed_ips': {'ip_address': [member['address']]},
-                             'tenant_id': [extended_vip['tenant_id']]})
-                if len(member_ports) == 1:
-                    member_subnet = plugin._core_plugin.get_subnet(
-                        context,
-                        member_ports[0]['fixed_ips'][0]['subnet_id'])
-                    member_network = netaddr.IPNetwork(member_subnet['cidr'])
-                    member['subnet'] = str(member_network.network)
-                    member['mask'] = str(member_network.netmask)
-                else:
-                    member['subnet'] = member['address']
-
-                member['gw'] = pool_subnet['gateway_ip']
-
-            for member_property in MEMBER_PROPERTIES:
-                trans_vip[_create_key('member', member_property)].append(
-                    member.get(member_property,
-                               TRANSLATION_DEFAULTS.get(member_property)))
-
-    for hm_property in HEALTH_MONITOR_PROPERTIES:
-        trans_vip[
-            _create_key('hm', _trans_prop_name(hm_property))] = []
-    for hm in extended_vip['health_monitors']:
-        hm_pool = plugin.get_pool_health_monitor(context,
-                                                 hm['id'],
-                                                 extended_vip['pool']['id'])
-        if hm_pool['status'] != constants.PENDING_DELETE:
-            for hm_property in HEALTH_MONITOR_PROPERTIES:
-                value = hm.get(hm_property,
-                               TRANSLATION_DEFAULTS.get(hm_property))
-                trans_vip[_create_key('hm',
-                          _trans_prop_name(hm_property))].append(value)
-    ids = get_ids(extended_vip)
-    trans_vip['__ids__'] = ids
-    if 'pip_address' in extended_vip:
-        trans_vip['pip_address'] = extended_vip['pip_address']
-    LOG.debug('Translated Vip graph: %s', trans_vip)
-    return trans_vip
diff --git a/neutron/services/loadbalancer/drivers/radware/exceptions.py b/neutron/services/loadbalancer/drivers/radware/exceptions.py
deleted file mode 100644 (file)
index bb16cb7..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2013 Radware LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from neutron.common import exceptions
-
-
-class RadwareLBaasException(exceptions.NeutronException):
-    message = _('An unknown exception occurred in Radware LBaaS provider.')
-
-
-class AuthenticationMissing(RadwareLBaasException):
-    message = _('vDirect user/password missing. '
-                'Specify in configuration file, under [radware] section')
-
-
-class WorkflowMissing(RadwareLBaasException):
-    message = _('Workflow %(workflow)s is missing on vDirect server. '
-                'Upload missing workflow')
-
-
-class RESTRequestFailure(RadwareLBaasException):
-    message = _('REST request failed with status %(status)s. '
-                'Reason: %(reason)s, Description: %(description)s. '
-                'Success status codes are %(success_codes)s')
-
-
-class UnsupportedEntityOperation(RadwareLBaasException):
-    message = _('%(operation)s operation is not supported for %(entity)s.')
diff --git a/neutron/services/loadbalancer/plugin.py b/neutron/services/loadbalancer/plugin.py
deleted file mode 100644 (file)
index 720e2a6..0000000
+++ /dev/null
@@ -1,325 +0,0 @@
-#
-# Copyright 2013 Radware LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.utils import excutils
-
-from neutron.api.v2 import attributes as attrs
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db as ldb
-from neutron.db import servicetype_db as st_db
-from neutron.extensions import loadbalancer
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer import agent_scheduler
-from neutron.services import provider_configuration as pconf
-from neutron.services import service_base
-
-LOG = logging.getLogger(__name__)
-
-
-class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
-                         agent_scheduler.LbaasAgentSchedulerDbMixin):
-    """Implementation of the Neutron Loadbalancer Service Plugin.
-
-    This class manages the workflow of LBaaS request/response.
-    Most DB related works are implemented in class
-    loadbalancer_db.LoadBalancerPluginDb.
-    """
-    supported_extension_aliases = ["lbaas",
-                                   "lbaas_agent_scheduler",
-                                   "service-type"]
-
-    # lbaas agent notifiers to handle agent update operations;
-    # can be updated by plugin drivers while loading;
-    # will be extracted by neutron manager when loading service plugins;
-    agent_notifiers = {}
-
-    def __init__(self):
-        """Initialization for the loadbalancer service plugin."""
-
-        self.service_type_manager = st_db.ServiceTypeManager.get_instance()
-        self._load_drivers()
-
-    def _load_drivers(self):
-        """Loads plugin-drivers specified in configuration."""
-        self.drivers, self.default_provider = service_base.load_drivers(
-            constants.LOADBALANCER, self)
-
-        # we're at the point when extensions are not loaded yet
-        # so prevent policy from being loaded
-        ctx = context.get_admin_context(load_admin_roles=False)
-        # stop service in case provider was removed, but resources were not
-        self._check_orphan_pool_associations(ctx, self.drivers.keys())
-
-    def _check_orphan_pool_associations(self, context, provider_names):
-        """Checks remaining associations between pools and providers.
-
-        If admin has not undeployed resources with provider that was deleted
-        from configuration, neutron service is stopped. Admin must delete
-        resources prior to removing providers from configuration.
-        """
-        pools = self.get_pools(context)
-        lost_providers = set([pool['provider'] for pool in pools
-                              if pool['provider'] not in provider_names])
-        # resources are left without provider - stop the service
-        if lost_providers:
-            LOG.exception(_LE("Delete associated loadbalancer pools before "
-                              "removing providers %s"),
-                          list(lost_providers))
-            raise SystemExit(1)
-
-    def _get_driver_for_provider(self, provider):
-        if provider in self.drivers:
-            return self.drivers[provider]
-        # raise if not associated (should never be reached)
-        raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
-                            provider)
-
-    def _get_driver_for_pool(self, context, pool_id):
-        pool = self.get_pool(context, pool_id)
-        try:
-            return self.drivers[pool['provider']]
-        except KeyError:
-            raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
-                                pool_id)
-
-    def get_plugin_type(self):
-        return constants.LOADBALANCER
-
-    def get_plugin_description(self):
-        return "Neutron LoadBalancer Service Plugin"
-
-    def create_vip(self, context, vip):
-        v = super(LoadBalancerPlugin, self).create_vip(context, vip)
-        driver = self._get_driver_for_pool(context, v['pool_id'])
-        driver.create_vip(context, v)
-        return v
-
-    def update_vip(self, context, id, vip):
-        if 'status' not in vip['vip']:
-            vip['vip']['status'] = constants.PENDING_UPDATE
-        old_vip = self.get_vip(context, id)
-        v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
-        driver = self._get_driver_for_pool(context, v['pool_id'])
-        driver.update_vip(context, old_vip, v)
-        return v
-
-    def _delete_db_vip(self, context, id):
-        # proxy the call until plugin inherits from DBPlugin
-        super(LoadBalancerPlugin, self).delete_vip(context, id)
-
-    def delete_vip(self, context, id):
-        self.update_status(context, ldb.Vip,
-                           id, constants.PENDING_DELETE)
-        v = self.get_vip(context, id)
-        driver = self._get_driver_for_pool(context, v['pool_id'])
-        driver.delete_vip(context, v)
-
-    def _get_provider_name(self, context, pool):
-        if ('provider' in pool and
-            pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
-            provider_name = pconf.normalize_provider_name(pool['provider'])
-            self.validate_provider(provider_name)
-            return provider_name
-        else:
-            if not self.default_provider:
-                raise pconf.DefaultServiceProviderNotFound(
-                    service_type=constants.LOADBALANCER)
-            return self.default_provider
-
-    def create_pool(self, context, pool):
-        provider_name = self._get_provider_name(context, pool['pool'])
-        p = super(LoadBalancerPlugin, self).create_pool(context, pool)
-
-        self.service_type_manager.add_resource_association(
-            context,
-            constants.LOADBALANCER,
-            provider_name, p['id'])
-        #need to add provider name to pool dict,
-        #because provider was not known to db plugin at pool creation
-        p['provider'] = provider_name
-        driver = self.drivers[provider_name]
-        try:
-            driver.create_pool(context, p)
-        except loadbalancer.NoEligibleBackend:
-            # that should catch cases when backend of any kind
-            # is not available (agent, appliance, etc)
-            self.update_status(context, ldb.Pool,
-                               p['id'], constants.ERROR,
-                               "No eligible backend")
-            raise loadbalancer.NoEligibleBackend(pool_id=p['id'])
-        return p
-
-    def update_pool(self, context, id, pool):
-        if 'status' not in pool['pool']:
-            pool['pool']['status'] = constants.PENDING_UPDATE
-        old_pool = self.get_pool(context, id)
-        p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
-        driver = self._get_driver_for_provider(p['provider'])
-        driver.update_pool(context, old_pool, p)
-        return p
-
-    def _delete_db_pool(self, context, id):
-        # proxy the call until plugin inherits from DBPlugin
-        # rely on uuid uniqueness:
-        try:
-            with context.session.begin(subtransactions=True):
-                self.service_type_manager.del_resource_associations(
-                    context, [id])
-                super(LoadBalancerPlugin, self).delete_pool(context, id)
-        except Exception:
-            # that should not happen
-            # if it's still a case - something goes wrong
-            # log the error and mark the pool as ERROR
-            LOG.error(_LE('Failed to delete pool %s, putting it in ERROR '
-                          'state'),
-                      id)
-            with excutils.save_and_reraise_exception():
-                self.update_status(context, ldb.Pool,
-                                   id, constants.ERROR)
-
-    def delete_pool(self, context, id):
-        # check for delete conditions and update the status
-        # within a transaction to avoid a race
-        with context.session.begin(subtransactions=True):
-            self.update_status(context, ldb.Pool,
-                               id, constants.PENDING_DELETE)
-            self._ensure_pool_delete_conditions(context, id)
-        p = self.get_pool(context, id)
-        driver = self._get_driver_for_provider(p['provider'])
-        driver.delete_pool(context, p)
-
-    def create_member(self, context, member):
-        m = super(LoadBalancerPlugin, self).create_member(context, member)
-        driver = self._get_driver_for_pool(context, m['pool_id'])
-        driver.create_member(context, m)
-        return m
-
-    def update_member(self, context, id, member):
-        if 'status' not in member['member']:
-            member['member']['status'] = constants.PENDING_UPDATE
-        old_member = self.get_member(context, id)
-        m = super(LoadBalancerPlugin, self).update_member(context, id, member)
-        driver = self._get_driver_for_pool(context, m['pool_id'])
-        driver.update_member(context, old_member, m)
-        return m
-
-    def _delete_db_member(self, context, id):
-        # proxy the call until plugin inherits from DBPlugin
-        super(LoadBalancerPlugin, self).delete_member(context, id)
-
-    def delete_member(self, context, id):
-        self.update_status(context, ldb.Member,
-                           id, constants.PENDING_DELETE)
-        m = self.get_member(context, id)
-        driver = self._get_driver_for_pool(context, m['pool_id'])
-        driver.delete_member(context, m)
-
-    def _validate_hm_parameters(self, delay, timeout):
-        if delay < timeout:
-            raise loadbalancer.DelayOrTimeoutInvalid()
-
-    def create_health_monitor(self, context, health_monitor):
-        new_hm = health_monitor['health_monitor']
-        self._validate_hm_parameters(new_hm['delay'], new_hm['timeout'])
-
-        hm = super(LoadBalancerPlugin, self).create_health_monitor(
-            context,
-            health_monitor
-        )
-        return hm
-
-    def update_health_monitor(self, context, id, health_monitor):
-        new_hm = health_monitor['health_monitor']
-        old_hm = self.get_health_monitor(context, id)
-        delay = new_hm.get('delay', old_hm.get('delay'))
-        timeout = new_hm.get('timeout', old_hm.get('timeout'))
-        self._validate_hm_parameters(delay, timeout)
-
-        hm = super(LoadBalancerPlugin, self).update_health_monitor(
-            context,
-            id,
-            health_monitor
-        )
-
-        with context.session.begin(subtransactions=True):
-            qry = context.session.query(
-                ldb.PoolMonitorAssociation
-            ).filter_by(monitor_id=hm['id']).join(ldb.Pool)
-            for assoc in qry:
-                driver = self._get_driver_for_pool(context, assoc['pool_id'])
-                driver.update_pool_health_monitor(context, old_hm,
-                                                  hm, assoc['pool_id'])
-        return hm
-
-    def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
-        super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
-                                                                   hm_id,
-                                                                   pool_id)
-
-    def _delete_db_health_monitor(self, context, id):
-        super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
-
-    def create_pool_health_monitor(self, context, health_monitor, pool_id):
-        retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
-            context,
-            health_monitor,
-            pool_id
-        )
-        monitor_id = health_monitor['health_monitor']['id']
-        hm = self.get_health_monitor(context, monitor_id)
-        driver = self._get_driver_for_pool(context, pool_id)
-        driver.create_pool_health_monitor(context, hm, pool_id)
-        return retval
-
-    def delete_pool_health_monitor(self, context, id, pool_id):
-        self.update_pool_health_monitor(context, id, pool_id,
-                                        constants.PENDING_DELETE)
-        hm = self.get_health_monitor(context, id)
-        driver = self._get_driver_for_pool(context, pool_id)
-        driver.delete_pool_health_monitor(context, hm, pool_id)
-
-    def stats(self, context, pool_id):
-        driver = self._get_driver_for_pool(context, pool_id)
-        stats_data = driver.stats(context, pool_id)
-        # if we get something from the driver -
-        # update the db and return the value from db
-        # else - return what we have in db
-        if stats_data:
-            super(LoadBalancerPlugin, self).update_pool_stats(
-                context,
-                pool_id,
-                stats_data
-            )
-        return super(LoadBalancerPlugin, self).stats(context,
-                                                     pool_id)
-
-    def populate_vip_graph(self, context, vip):
-        """Populate the vip with: pool, members, healthmonitors."""
-
-        pool = self.get_pool(context, vip['pool_id'])
-        vip['pool'] = pool
-        vip['members'] = [self.get_member(context, member_id)
-                          for member_id in pool['members']]
-        vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
-                                  for hm_id in pool['health_monitors']]
-        return vip
-
-    def validate_provider(self, provider):
-        if provider not in self.drivers:
-            raise pconf.ServiceProviderNotFound(
-                provider=provider, service_type=constants.LOADBALANCER)
diff --git a/neutron/services/vpn/agent.py b/neutron/services/vpn/agent.py
deleted file mode 100644 (file)
index 62807bb..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-from oslo.utils import importutils
-
-from neutron.agent import l3_agent
-from neutron.extensions import vpnaas
-
-vpn_agent_opts = [
-    cfg.MultiStrOpt(
-        'vpn_device_driver',
-        default=['neutron.services.vpn.device_drivers.'
-                 'ipsec.OpenSwanDriver'],
-        help=_("The vpn device drivers Neutron will use")),
-]
-cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent')
-
-
-class VPNAgent(l3_agent.L3NATAgentWithStateReport):
-    """VPNAgent class which can handle vpn service drivers."""
-    def __init__(self, host, conf=None):
-        super(VPNAgent, self).__init__(host=host, conf=conf)
-        self.setup_device_drivers(host)
-
-    def setup_device_drivers(self, host):
-        """Setting up device drivers.
-
-        :param host: hostname. This is needed for rpc
-        Each devices will stays as processes.
-        They will communicate with
-        server side service plugin using rpc with
-        device specific rpc topic.
-        :returns: None
-        """
-        device_drivers = cfg.CONF.vpnagent.vpn_device_driver
-        self.devices = []
-        for device_driver in device_drivers:
-            try:
-                self.devices.append(
-                    importutils.import_object(device_driver, self, host))
-            except ImportError:
-                raise vpnaas.DeviceDriverImportError(
-                    device_driver=device_driver)
-
-    def get_namespace(self, router_id):
-        """Get namespace of router.
-
-        :router_id: router_id
-        :returns: namespace string.
-            Note if the router is not exist, this function
-            returns None
-        """
-        router_info = self.router_info.get(router_id)
-        if not router_info:
-            return
-        return router_info.ns_name
-
-    def add_nat_rule(self, router_id, chain, rule, top=False):
-        """Add nat rule in namespace.
-
-        :param router_id: router_id
-        :param chain: a string of chain name
-        :param rule: a string of rule
-        :param top: if top is true, the rule
-            will be placed on the top of chain
-            Note if there is no rotuer, this method do nothing
-        """
-        router_info = self.router_info.get(router_id)
-        if not router_info:
-            return
-        router_info.iptables_manager.ipv4['nat'].add_rule(
-            chain, rule, top=top)
-
-    def remove_nat_rule(self, router_id, chain, rule, top=False):
-        """Remove nat rule in namespace.
-
-        :param router_id: router_id
-        :param chain: a string of chain name
-        :param rule: a string of rule
-        :param top: unused
-            needed to have same argument with add_nat_rule
-        """
-        router_info = self.router_info.get(router_id)
-        if not router_info:
-            return
-        router_info.iptables_manager.ipv4['nat'].remove_rule(
-            chain, rule, top=top)
-
-    def iptables_apply(self, router_id):
-        """Apply IPtables.
-
-        :param router_id: router_id
-        This method do nothing if there is no router
-        """
-        router_info = self.router_info.get(router_id)
-        if not router_info:
-            return
-        router_info.iptables_manager.apply()
-
-    def _router_added(self, router_id, router):
-        """Router added event.
-
-        This method overwrites parent class method.
-        :param router_id: id of added router
-        :param router: dict of rotuer
-        """
-        super(VPNAgent, self)._router_added(router_id, router)
-        for device in self.devices:
-            device.create_router(router_id)
-
-    def _router_removed(self, router_id):
-        """Router removed event.
-
-        This method overwrites parent class method.
-        :param router_id: id of removed router
-        """
-        super(VPNAgent, self)._router_removed(router_id)
-        for device in self.devices:
-            device.destroy_router(router_id)
-
-    def _process_router_if_compatible(self, router):
-        """Router sync event.
-
-        This method overwrites parent class method.
-        :param router: a router
-        """
-        super(VPNAgent, self)._process_router_if_compatible(router)
-        for device in self.devices:
-            device.sync(self.context, [router])
-
-
-def main():
-    l3_agent.main(
-        manager='neutron.services.vpn.agent.VPNAgent')
diff --git a/neutron/services/vpn/common/__init__.py b/neutron/services/vpn/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/services/vpn/common/topics.py b/neutron/services/vpn/common/topics.py
deleted file mode 100644 (file)
index d17c829..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-IPSEC_DRIVER_TOPIC = 'ipsec_driver'
-IPSEC_AGENT_TOPIC = 'ipsec_agent'
-CISCO_IPSEC_DRIVER_TOPIC = 'cisco_csr_ipsec_driver'
-CISCO_IPSEC_AGENT_TOPIC = 'cisco_csr_ipsec_agent'
diff --git a/neutron/services/vpn/device_drivers/__init__.py b/neutron/services/vpn/device_drivers/__init__.py
deleted file mode 100644 (file)
index 3f01f93..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DeviceDriver(object):
-
-    def __init__(self, agent, host):
-        pass
-
-    @abc.abstractmethod
-    def sync(self, context, processes):
-        pass
-
-    @abc.abstractmethod
-    def create_router(self, process_id):
-        pass
-
-    @abc.abstractmethod
-    def destroy_router(self, process_id):
-        pass
diff --git a/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py b/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py
deleted file mode 100644 (file)
index 04e6c09..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-
-import netaddr
-from oslo.serialization import jsonutils
-import requests
-from requests import exceptions as r_exc
-
-from neutron.i18n import _LE, _LW
-from neutron.openstack.common import log as logging
-
-
-TIMEOUT = 20.0
-
-LOG = logging.getLogger(__name__)
-HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'}
-URL_BASE = 'https://%(host)s/api/v1/%(resource)s'
-
-# CSR RESTapi URIs
-URI_VPN_IPSEC_POLICIES = 'vpn-svc/ipsec/policies'
-URI_VPN_IPSEC_POLICIES_ID = URI_VPN_IPSEC_POLICIES + '/%s'
-URI_VPN_IKE_POLICIES = 'vpn-svc/ike/policies'
-URI_VPN_IKE_POLICIES_ID = URI_VPN_IKE_POLICIES + '/%s'
-URI_VPN_IKE_KEYRINGS = 'vpn-svc/ike/keyrings'
-URI_VPN_IKE_KEYRINGS_ID = URI_VPN_IKE_KEYRINGS + '/%s'
-URI_VPN_IKE_KEEPALIVE = 'vpn-svc/ike/keepalive'
-URI_VPN_SITE_TO_SITE = 'vpn-svc/site-to-site'
-URI_VPN_SITE_TO_SITE_ID = URI_VPN_SITE_TO_SITE + '/%s'
-URI_VPN_SITE_TO_SITE_STATE = URI_VPN_SITE_TO_SITE + '/%s/state'
-URI_VPN_SITE_ACTIVE_SESSIONS = URI_VPN_SITE_TO_SITE + '/active/sessions'
-URI_ROUTING_STATIC_ROUTES = 'routing-svc/static-routes'
-URI_ROUTING_STATIC_ROUTES_ID = URI_ROUTING_STATIC_ROUTES + '/%s'
-
-
-def make_route_id(cidr, interface):
-    """Build ID that will be used to identify route for later deletion."""
-    net = netaddr.IPNetwork(cidr)
-    return '%(network)s_%(prefix)s_%(interface)s' % {
-        'network': net.network,
-        'prefix': net.prefixlen,
-        'interface': interface}
-
-
-class CsrRestClient(object):
-
-    """REST CsrRestClient for accessing the Cisco Cloud Services Router."""
-
-    def __init__(self, settings):
-        self.port = str(settings.get('protocol_port', 55443))
-        self.host = ':'.join([settings.get('rest_mgmt_ip', ''), self.port])
-        self.auth = (settings['username'], settings['password'])
-        self.inner_if_name = settings.get('inner_if_name', '')
-        self.outer_if_name = settings.get('outer_if_name', '')
-        self.token = None
-        self.vrf = settings.get('vrf', '')
-        self.vrf_prefix = 'vrf/%s/' % self.vrf if self.vrf else ""
-        self.status = requests.codes.OK
-        self.timeout = settings.get('timeout')
-        self.max_tries = 5
-        self.session = requests.Session()
-
-    def _response_info_for(self, response, method):
-        """Return contents or location from response.
-
-        For a POST or GET with a 200 response, the response content
-        is returned.
-
-        For a POST with a 201 response, return the header's location,
-        which contains the identifier for the created resource.
-
-        If there is an error, return the response content, so that
-        it can be used in error processing ('error-code', 'error-message',
-        and 'detail' fields).
-        """
-        if method in ('POST', 'GET') and self.status == requests.codes.OK:
-            LOG.debug('RESPONSE: %s', response.json())
-            return response.json()
-        if method == 'POST' and self.status == requests.codes.CREATED:
-            return response.headers.get('location', '')
-        if self.status >= requests.codes.BAD_REQUEST and response.content:
-            if 'error-code' in response.content:
-                content = jsonutils.loads(response.content)
-                LOG.debug("Error response content %s", content)
-                return content
-
-    def _request(self, method, url, **kwargs):
-        """Perform REST request and save response info."""
-        try:
-            LOG.debug("%(method)s: Request for %(resource)s payload: "
-                      "%(payload)s",
-                      {'method': method.upper(), 'resource': url,
-                       'payload': kwargs.get('data')})
-            start_time = time.time()
-            response = self.session.request(method, url, verify=False,
-                                            timeout=self.timeout, **kwargs)
-            LOG.debug("%(method)s Took %(time).2f seconds to process",
-                      {'method': method.upper(),
-                       'time': time.time() - start_time})
-        except (r_exc.Timeout, r_exc.SSLError) as te:
-            # Should never see SSLError, unless requests package is old (<2.0)
-            timeout_val = 0.0 if self.timeout is None else self.timeout
-            LOG.warning(_LW("%(method)s: Request timeout%(ssl)s "
-                            "(%(timeout).3f sec) for CSR(%(host)s)"),
-                        {'method': method,
-                         'timeout': timeout_val,
-                         'ssl': '(SSLError)'
-                         if isinstance(te, r_exc.SSLError) else '',
-                         'host': self.host})
-            self.status = requests.codes.REQUEST_TIMEOUT
-        except r_exc.ConnectionError:
-            LOG.exception(_LE("%(method)s: Unable to connect to "
-                              "CSR(%(host)s)"),
-                          {'method': method, 'host': self.host})
-            self.status = requests.codes.NOT_FOUND
-        except Exception as e:
-            LOG.error(_LE("%(method)s: Unexpected error for CSR (%(host)s): "
-                          "%(error)s"),
-                      {'method': method, 'host': self.host, 'error': e})
-            self.status = requests.codes.INTERNAL_SERVER_ERROR
-        else:
-            self.status = response.status_code
-            LOG.debug("%(method)s: Completed [%(status)s]",
-                      {'method': method, 'status': self.status})
-            return self._response_info_for(response, method)
-
-    def authenticate(self):
-        """Obtain a token to use for subsequent CSR REST requests.
-
-        This is called when there is no token yet, or if the token has expired
-        and attempts to use it resulted in an UNAUTHORIZED REST response.
-        """
-
-        url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'}
-        headers = {'Content-Length': '0',
-                   'Accept': 'application/json'}
-        headers.update(HEADER_CONTENT_TYPE_JSON)
-        LOG.debug("%(auth)s with CSR %(host)s",
-                  {'auth': 'Authenticating' if self.token is None
-                   else 'Reauthenticating', 'host': self.host})
-        self.token = None
-        response = self._request("POST", url, headers=headers, auth=self.auth)
-        if response:
-            self.token = response['token-id']
-            LOG.debug("Successfully authenticated with CSR %s", self.host)
-            return True
-        LOG.error(_LE("Failed authentication with CSR %(host)s [%(status)s]"),
-                  {'host': self.host, 'status': self.status})
-
-    def _do_request(self, method, resource, payload=None, more_headers=None,
-                    full_url=False):
-        """Perform a REST request to a CSR resource.
-
-        If this is the first time interacting with the CSR, a token will
-        be obtained. If the request fails, due to an expired token, the
-        token will be obtained and the request will be retried once more.
-        """
-
-        if self.token is None:
-            if not self.authenticate():
-                return
-
-        if full_url:
-            url = resource
-        else:
-            url = ('https://%(host)s/api/v1/%(resource)s' %
-                   {'host': self.host, 'resource': resource})
-        headers = {'Accept': 'application/json', 'X-auth-token': self.token}
-        if more_headers:
-            headers.update(more_headers)
-        if payload:
-            payload = jsonutils.dumps(payload)
-        response = self._request(method, url, data=payload, headers=headers)
-        if self.status == requests.codes.UNAUTHORIZED:
-            if not self.authenticate():
-                return
-            headers['X-auth-token'] = self.token
-            response = self._request(method, url, data=payload,
-                                     headers=headers)
-        if self.status != requests.codes.REQUEST_TIMEOUT:
-            return response
-        LOG.error(_LE("%(method)s: Request timeout for CSR(%(host)s)"),
-                  {'method': method, 'host': self.host})
-
-    def get_request(self, resource, full_url=False):
-        """Perform a REST GET requests for a CSR resource."""
-        return self._do_request('GET', resource, full_url=full_url)
-
-    def post_request(self, resource, payload=None):
-        """Perform a POST request to a CSR resource."""
-        return self._do_request('POST', resource, payload=payload,
-                                more_headers=HEADER_CONTENT_TYPE_JSON)
-
-    def put_request(self, resource, payload=None):
-        """Perform a PUT request to a CSR resource."""
-        return self._do_request('PUT', resource, payload=payload,
-                                more_headers=HEADER_CONTENT_TYPE_JSON)
-
-    def delete_request(self, resource):
-        """Perform a DELETE request on a CSR resource."""
-        return self._do_request('DELETE', resource,
-                                more_headers=HEADER_CONTENT_TYPE_JSON)
-
-    # VPN Specific APIs
-
-    def create_ike_policy(self, policy_info):
-        base_ike_policy_info = {u'version': u'v1',
-                                u'local-auth-method': u'pre-share'}
-        base_ike_policy_info.update(policy_info)
-        return self.post_request(URI_VPN_IKE_POLICIES,
-                                 payload=base_ike_policy_info)
-
-    def create_ipsec_policy(self, policy_info):
-        base_ipsec_policy_info = {u'mode': u'tunnel'}
-        base_ipsec_policy_info.update(policy_info)
-        return self.post_request(URI_VPN_IPSEC_POLICIES,
-                                 payload=base_ipsec_policy_info)
-
-    def create_pre_shared_key(self, psk_info):
-        return self.post_request(self.vrf_prefix + URI_VPN_IKE_KEYRINGS,
-                                 payload=psk_info)
-
-    def create_ipsec_connection(self, connection_info):
-        base_conn_info = {
-            u'vpn-type': u'site-to-site',
-            u'ip-version': u'ipv4',
-            u'local-device': {
-                u'tunnel-ip-address': self.outer_if_name,
-                u'ip-address': self.inner_if_name
-            }
-        }
-        connection_info.update(base_conn_info)
-        if self.vrf:
-            connection_info[u'tunnel-vrf'] = self.vrf
-        return self.post_request(self.vrf_prefix + URI_VPN_SITE_TO_SITE,
-                                 payload=connection_info)
-
-    def configure_ike_keepalive(self, keepalive_info):
-        base_keepalive_info = {u'periodic': True}
-        keepalive_info.update(base_keepalive_info)
-        return self.put_request(URI_VPN_IKE_KEEPALIVE, keepalive_info)
-
-    def create_static_route(self, route_info):
-        return self.post_request(self.vrf_prefix + URI_ROUTING_STATIC_ROUTES,
-                                 payload=route_info)
-
-    def delete_static_route(self, route_id):
-        return self.delete_request(
-            self.vrf_prefix + URI_ROUTING_STATIC_ROUTES_ID % route_id)
-
-    def set_ipsec_connection_state(self, tunnel, admin_up=True):
-        """Set the IPSec site-to-site connection (tunnel) admin state.
-
-        Note: When a tunnel is created, it will be admin up.
-        """
-        info = {u'vpn-interface-name': tunnel, u'enabled': admin_up}
-        return self.put_request(
-            self.vrf_prefix + URI_VPN_SITE_TO_SITE_STATE % tunnel, info)
-
-    def delete_ipsec_connection(self, conn_id):
-        return self.delete_request(
-            self.vrf_prefix + URI_VPN_SITE_TO_SITE_ID % conn_id)
-
-    def delete_ipsec_policy(self, policy_id):
-        return self.delete_request(URI_VPN_IPSEC_POLICIES_ID % policy_id)
-
-    def delete_ike_policy(self, policy_id):
-        return self.delete_request(URI_VPN_IKE_POLICIES_ID % policy_id)
-
-    def delete_pre_shared_key(self, key_id):
-        return self.delete_request(
-            self.vrf_prefix + URI_VPN_IKE_KEYRINGS_ID % key_id)
-
-    def read_tunnel_statuses(self):
-        results = self.get_request(self.vrf_prefix +
-                                   URI_VPN_SITE_ACTIVE_SESSIONS)
-        if self.status != requests.codes.OK or not results:
-            return []
-        tunnels = [(t[u'vpn-interface-name'], t[u'status'])
-                   for t in results['items']]
-        return tunnels
diff --git a/neutron/services/vpn/device_drivers/cisco_ipsec.py b/neutron/services/vpn/device_drivers/cisco_ipsec.py
deleted file mode 100644 (file)
index b55e5b5..0000000
+++ /dev/null
@@ -1,742 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-import collections
-import requests
-
-from oslo.config import cfg
-from oslo import messaging
-import six
-
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron import context as ctx
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import lockutils
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import loopingcall
-from neutron.plugins.common import constants
-from neutron.plugins.common import utils as plugin_utils
-from neutron.services.vpn.common import topics
-from neutron.services.vpn import device_drivers
-from neutron.services.vpn.device_drivers import (
-    cisco_csr_rest_client as csr_client)
-
-
-ipsec_opts = [
-    cfg.IntOpt('status_check_interval',
-               default=60,
-               help=_("Status check interval for Cisco CSR IPSec connections"))
-]
-cfg.CONF.register_opts(ipsec_opts, 'cisco_csr_ipsec')
-
-LOG = logging.getLogger(__name__)
-
-RollbackStep = collections.namedtuple('RollbackStep',
-                                      ['action', 'resource_id', 'title'])
-
-
-class CsrResourceCreateFailure(exceptions.NeutronException):
-    message = _("Cisco CSR failed to create %(resource)s (%(which)s)")
-
-
-class CsrAdminStateChangeFailure(exceptions.NeutronException):
-    message = _("Cisco CSR failed to change %(tunnel)s admin state to "
-                "%(state)s")
-
-
-class CsrDriverMismatchError(exceptions.NeutronException):
-    message = _("Required %(resource)s attribute %(attr)s mapping for Cisco "
-                "CSR is missing in device driver")
-
-
-class CsrUnknownMappingError(exceptions.NeutronException):
-    message = _("Device driver does not have a mapping of '%(value)s for "
-                "attribute %(attr)s of %(resource)s")
-
-
-class CiscoCsrIPsecVpnDriverApi(object):
-    """RPC API for agent to plugin messaging."""
-
-    def __init__(self, topic):
-        target = messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_vpn_services_on_host(self, context, host):
-        """Get list of vpnservices on this host.
-
-        The vpnservices including related ipsec_site_connection,
-        ikepolicy, ipsecpolicy, and Cisco info on this host.
-        """
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_vpn_services_on_host', host=host)
-
-    def update_status(self, context, status):
-        """Update status for all VPN services and connections."""
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'update_status', status=status)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class CiscoCsrIPsecDriver(device_drivers.DeviceDriver):
-    """Cisco CSR VPN Device Driver for IPSec.
-
-    This class is designed for use with L3-agent now.
-    However this driver will be used with another agent in future.
-    so the use of "Router" is kept minimul now.
-    Instead of router_id,  we are using process_id in this code.
-    """
-
-    # history
-    #   1.0 Initial version
-    target = messaging.Target(version='1.0')
-
-    def __init__(self, agent, host):
-        self.host = host
-        self.conn = n_rpc.create_connection(new=True)
-        context = ctx.get_admin_context_without_session()
-        node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host)
-
-        self.service_state = {}
-
-        self.endpoints = [self]
-        self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
-        self.conn.consume_in_threads()
-        self.agent_rpc = (
-            CiscoCsrIPsecVpnDriverApi(topics.CISCO_IPSEC_DRIVER_TOPIC))
-        self.periodic_report = loopingcall.FixedIntervalLoopingCall(
-            self.report_status, context)
-        self.periodic_report.start(
-            interval=agent.conf.cisco_csr_ipsec.status_check_interval)
-        LOG.debug("Device driver initialized for %s", node_topic)
-
-    def vpnservice_updated(self, context, **kwargs):
-        """Handle VPNaaS service driver change notifications."""
-        LOG.debug("Handling VPN service update notification '%s'",
-                  kwargs.get('reason', ''))
-        self.sync(context, [])
-
-    def create_vpn_service(self, service_data):
-        """Create new entry to track VPN service and its connections."""
-        csr = csr_client.CsrRestClient(service_data['router_info'])
-        vpn_service_id = service_data['id']
-        self.service_state[vpn_service_id] = CiscoCsrVpnService(
-            service_data, csr)
-        return self.service_state[vpn_service_id]
-
-    def update_connection(self, context, vpn_service_id, conn_data):
-        """Handle notification for a single IPSec connection."""
-        vpn_service = self.service_state[vpn_service_id]
-        conn_id = conn_data['id']
-        conn_is_admin_up = conn_data[u'admin_state_up']
-
-        if conn_id in vpn_service.conn_state:  # Existing connection...
-            ipsec_conn = vpn_service.conn_state[conn_id]
-            config_changed = ipsec_conn.check_for_changes(conn_data)
-            if config_changed:
-                LOG.debug("Update: Existing connection %s changed", conn_id)
-                ipsec_conn.delete_ipsec_site_connection(context, conn_id)
-                ipsec_conn.create_ipsec_site_connection(context, conn_data)
-                ipsec_conn.conn_info = conn_data
-
-            if ipsec_conn.forced_down:
-                if vpn_service.is_admin_up and conn_is_admin_up:
-                    LOG.debug("Update: Connection %s no longer admin down",
-                              conn_id)
-                    ipsec_conn.set_admin_state(is_up=True)
-                    ipsec_conn.forced_down = False
-            else:
-                if not vpn_service.is_admin_up or not conn_is_admin_up:
-                    LOG.debug("Update: Connection %s forced to admin down",
-                              conn_id)
-                    ipsec_conn.set_admin_state(is_up=False)
-                    ipsec_conn.forced_down = True
-        else:  # New connection...
-            ipsec_conn = vpn_service.create_connection(conn_data)
-            ipsec_conn.create_ipsec_site_connection(context, conn_data)
-            if not vpn_service.is_admin_up or not conn_is_admin_up:
-                LOG.debug("Update: Created new connection %s in admin down "
-                          "state", conn_id)
-                ipsec_conn.set_admin_state(is_up=False)
-                ipsec_conn.forced_down = True
-            else:
-                LOG.debug("Update: Created new connection %s", conn_id)
-
-        ipsec_conn.is_dirty = False
-        ipsec_conn.last_status = conn_data['status']
-        ipsec_conn.is_admin_up = conn_is_admin_up
-        return ipsec_conn
-
-    def update_service(self, context, service_data):
-        """Handle notification for a single VPN Service and its connections."""
-        vpn_service_id = service_data['id']
-        if vpn_service_id in self.service_state:
-            LOG.debug("Update: Existing VPN service %s detected",
-                      vpn_service_id)
-            vpn_service = self.service_state[vpn_service_id]
-        else:
-            LOG.debug("Update: New VPN service %s detected", vpn_service_id)
-            vpn_service = self.create_vpn_service(service_data)
-            if not vpn_service:
-                return
-
-        vpn_service.is_dirty = False
-        vpn_service.connections_removed = False
-        vpn_service.last_status = service_data['status']
-        vpn_service.is_admin_up = service_data[u'admin_state_up']
-        for conn_data in service_data['ipsec_conns']:
-            self.update_connection(context, vpn_service_id, conn_data)
-        LOG.debug("Update: Completed update processing")
-        return vpn_service
-
-    def update_all_services_and_connections(self, context):
-        """Update services and connections based on plugin info.
-
-        Perform any create and update operations and then update status.
-        Mark every visited connection as no longer "dirty" so they will
-        not be deleted at end of sync processing.
-        """
-        services_data = self.agent_rpc.get_vpn_services_on_host(context,
-                                                                self.host)
-        LOG.debug("Sync updating for %d VPN services", len(services_data))
-        vpn_services = []
-        for service_data in services_data:
-            vpn_service = self.update_service(context, service_data)
-            if vpn_service:
-                vpn_services.append(vpn_service)
-        return vpn_services
-
-    def mark_existing_connections_as_dirty(self):
-        """Mark all existing connections as "dirty" for sync."""
-        service_count = 0
-        connection_count = 0
-        for service_state in self.service_state.values():
-            service_state.is_dirty = True
-            service_count += 1
-            for conn_id in service_state.conn_state:
-                service_state.conn_state[conn_id].is_dirty = True
-                connection_count += 1
-        LOG.debug("Mark: %(service)d VPN services and %(conn)d IPSec "
-                  "connections marked dirty", {'service': service_count,
-                                               'conn': connection_count})
-
-    def remove_unknown_connections(self, context):
-        """Remove connections that are not known by service driver."""
-        service_count = 0
-        connection_count = 0
-        for vpn_service_id, vpn_service in self.service_state.items():
-            dirty = [c_id for c_id, c in vpn_service.conn_state.items()
-                     if c.is_dirty]
-            vpn_service.connections_removed = len(dirty) > 0
-            for conn_id in dirty:
-                conn_state = vpn_service.conn_state[conn_id]
-                conn_state.delete_ipsec_site_connection(context, conn_id)
-                connection_count += 1
-                del vpn_service.conn_state[conn_id]
-            if vpn_service.is_dirty:
-                service_count += 1
-                del self.service_state[vpn_service_id]
-            elif dirty:
-                self.connections_removed = True
-        LOG.debug("Sweep: Removed %(service)d dirty VPN service%(splural)s "
-                  "and %(conn)d dirty IPSec connection%(cplural)s",
-                  {'service': service_count, 'conn': connection_count,
-                   'splural': 's'[service_count == 1:],
-                   'cplural': 's'[connection_count == 1:]})
-
-    def build_report_for_connections_on(self, vpn_service):
-        """Create the report fragment for IPSec connections on a service.
-
-        Collect the current status from the Cisco CSR and use that to update
-        the status and generate report fragment for each connection on the
-        service. If there is no status information, or no change, then no
-        report info will be created for the connection. The combined report
-        data is returned.
-        """
-        LOG.debug("Report: Collecting status for IPSec connections on VPN "
-                  "service %s", vpn_service.service_id)
-        tunnels = vpn_service.get_ipsec_connections_status()
-        report = {}
-        for connection in vpn_service.conn_state.values():
-            if connection.forced_down:
-                LOG.debug("Connection %s forced down", connection.conn_id)
-                current_status = constants.DOWN
-            else:
-                current_status = connection.find_current_status_in(tunnels)
-                LOG.debug("Connection %(conn)s reported %(status)s",
-                          {'conn': connection.conn_id,
-                           'status': current_status})
-            frag = connection.update_status_and_build_report(current_status)
-            if frag:
-                LOG.debug("Report: Adding info for IPSec connection %s",
-                          connection.conn_id)
-                report.update(frag)
-        return report
-
-    def build_report_for_service(self, vpn_service):
-        """Create the report info for a VPN service and its IPSec connections.
-
-        Get the report info for the connections on the service, and include
-        it into the report info for the VPN service. If there is no report
-        info for the connection, then no change has occurred and no report
-        will be generated. If there is only one connection for the service,
-        we'll set the service state to match the connection (with ERROR seen
-        as DOWN).
-        """
-        conn_report = self.build_report_for_connections_on(vpn_service)
-        if conn_report or vpn_service.connections_removed:
-            pending_handled = plugin_utils.in_pending_status(
-                vpn_service.last_status)
-            vpn_service.update_last_status()
-            LOG.debug("Report: Adding info for VPN service %s",
-                      vpn_service.service_id)
-            return {u'id': vpn_service.service_id,
-                    u'status': vpn_service.last_status,
-                    u'updated_pending_status': pending_handled,
-                    u'ipsec_site_connections': conn_report}
-        else:
-            return {}
-
-    @lockutils.synchronized('vpn-agent', 'neutron-')
-    def report_status(self, context):
-        """Report status of all VPN services and IPSec connections to plugin.
-
-        This is called periodically by the agent, to push up changes in
-        status. Use a lock to serialize access to (and changing of)
-        running state.
-        """
-        return self.report_status_internal(context)
-
-    def report_status_internal(self, context):
-        """Generate report and send to plugin, if anything changed."""
-        service_report = []
-        LOG.debug("Report: Starting status report processing")
-        for vpn_service_id, vpn_service in self.service_state.items():
-            LOG.debug("Report: Collecting status for VPN service %s",
-                      vpn_service_id)
-            report = self.build_report_for_service(vpn_service)
-            if report:
-                service_report.append(report)
-        if service_report:
-            LOG.info(_LI("Sending status report update to plugin"))
-            self.agent_rpc.update_status(context, service_report)
-        LOG.debug("Report: Completed status report processing")
-        return service_report
-
-    @lockutils.synchronized('vpn-agent', 'neutron-')
-    def sync(self, context, routers):
-        """Synchronize with plugin and report current status.
-
-        Mark all "known" services/connections as dirty, update them based on
-        information from the plugin, remove (sweep) any connections that are
-        not updated (dirty), and report updates, if any, back to plugin.
-        Called when update/delete a service or create/update/delete a
-        connection (vpnservice_updated message), or router change
-        (_process_routers).
-
-        Use lock to serialize access (and changes) to running state for VPN
-        service and IPsec connections.
-        """
-        self.mark_existing_connections_as_dirty()
-        self.update_all_services_and_connections(context)
-        self.remove_unknown_connections(context)
-        self.report_status_internal(context)
-
-    def create_router(self, process_id):
-        """Actions taken when router created."""
-        # Note: Since Cisco CSR is running out-of-band, nothing to do here
-        pass
-
-    def destroy_router(self, process_id):
-        """Actions taken when router deleted."""
-        # Note: Since Cisco CSR is running out-of-band, nothing to do here
-        pass
-
-
-class CiscoCsrVpnService(object):
-
-    """Maintains state/status information for a service and its connections."""
-
-    def __init__(self, service_data, csr):
-        self.service_id = service_data['id']
-        self.conn_state = {}
-        self.csr = csr
-        self.is_admin_up = True
-        # TODO(pcm) FUTURE - handle sharing of policies
-
-    def create_connection(self, conn_data):
-        conn_id = conn_data['id']
-        self.conn_state[conn_id] = CiscoCsrIPSecConnection(conn_data, self.csr)
-        return self.conn_state[conn_id]
-
-    def get_connection(self, conn_id):
-        return self.conn_state.get(conn_id)
-
-    def conn_status(self, conn_id):
-        conn_state = self.get_connection(conn_id)
-        if conn_state:
-            return conn_state.last_status
-
-    def snapshot_conn_state(self, ipsec_conn):
-        """Create/obtain connection state and save current status."""
-        conn_state = self.conn_state.setdefault(
-            ipsec_conn['id'], CiscoCsrIPSecConnection(ipsec_conn, self.csr))
-        conn_state.last_status = ipsec_conn['status']
-        conn_state.is_dirty = False
-        return conn_state
-
-    STATUS_MAP = {'ERROR': constants.ERROR,
-                  'UP-ACTIVE': constants.ACTIVE,
-                  'UP-IDLE': constants.ACTIVE,
-                  'UP-NO-IKE': constants.ACTIVE,
-                  'DOWN': constants.DOWN,
-                  'DOWN-NEGOTIATING': constants.DOWN}
-
-    def get_ipsec_connections_status(self):
-        """Obtain current status of all tunnels on a Cisco CSR.
-
-        Convert them to OpenStack status values.
-        """
-        tunnels = self.csr.read_tunnel_statuses()
-        for tunnel in tunnels:
-            LOG.debug("CSR Reports %(tunnel)s status '%(status)s'",
-                      {'tunnel': tunnel[0], 'status': tunnel[1]})
-        return dict(map(lambda x: (x[0], self.STATUS_MAP[x[1]]), tunnels))
-
-    def find_matching_connection(self, tunnel_id):
-        """Find IPSec connection using Cisco CSR tunnel specified, if any."""
-        for connection in self.conn_state.values():
-            if connection.tunnel == tunnel_id:
-                return connection.conn_id
-
-    def no_connections_up(self):
-        return not any(c.last_status == 'ACTIVE'
-                       for c in self.conn_state.values())
-
-    def update_last_status(self):
-        if not self.is_admin_up or self.no_connections_up():
-            self.last_status = constants.DOWN
-        else:
-            self.last_status = constants.ACTIVE
-
-
-class CiscoCsrIPSecConnection(object):
-
-    """State and actions for IPSec site-to-site connections."""
-
-    def __init__(self, conn_info, csr):
-        self.conn_info = conn_info
-        self.csr = csr
-        self.steps = []
-        self.forced_down = False
-        self.changed = False
-
-    @property
-    def conn_id(self):
-        return self.conn_info['id']
-
-    @property
-    def is_admin_up(self):
-        return self.conn_info['admin_state_up']
-
-    @is_admin_up.setter
-    def is_admin_up(self, is_up):
-        self.conn_info['admin_state_up'] = is_up
-
-    @property
-    def tunnel(self):
-        return self.conn_info['cisco']['site_conn_id']
-
-    def check_for_changes(self, curr_conn):
-        return not all([self.conn_info[attr] == curr_conn[attr]
-                        for attr in ('mtu', 'psk', 'peer_address',
-                                     'peer_cidrs', 'ike_policy',
-                                     'ipsec_policy', 'cisco')])
-
-    def find_current_status_in(self, statuses):
-        if self.tunnel in statuses:
-            return statuses[self.tunnel]
-        else:
-            return constants.ERROR
-
-    def update_status_and_build_report(self, current_status):
-        if current_status != self.last_status:
-            pending_handled = plugin_utils.in_pending_status(self.last_status)
-            self.last_status = current_status
-            return {self.conn_id: {'status': current_status,
-                                   'updated_pending_status': pending_handled}}
-        else:
-            return {}
-
-    DIALECT_MAP = {'ike_policy': {'name': 'IKE Policy',
-                                  'v1': u'v1',
-                                  # auth_algorithm -> hash
-                                  'sha1': u'sha',
-                                  # encryption_algorithm -> encryption
-                                  '3des': u'3des',
-                                  'aes-128': u'aes',
-                                  'aes-192': u'aes192',
-                                  'aes-256': u'aes256',
-                                  # pfs -> dhGroup
-                                  'group2': 2,
-                                  'group5': 5,
-                                  'group14': 14},
-                   'ipsec_policy': {'name': 'IPSec Policy',
-                                    # auth_algorithm -> esp-authentication
-                                    'sha1': u'esp-sha-hmac',
-                                    # transform_protocol -> ah
-                                    'esp': None,
-                                    'ah': u'ah-sha-hmac',
-                                    'ah-esp': u'ah-sha-hmac',
-                                    # encryption_algorithm -> esp-encryption
-                                    '3des': u'esp-3des',
-                                    'aes-128': u'esp-aes',
-                                    'aes-192': u'esp-192-aes',
-                                    'aes-256': u'esp-256-aes',
-                                    # pfs -> pfs
-                                    'group2': u'group2',
-                                    'group5': u'group5',
-                                    'group14': u'group14'}}
-
-    def translate_dialect(self, resource, attribute, info):
-        """Map VPNaaS attributes values to CSR values for a resource."""
-        name = self.DIALECT_MAP[resource]['name']
-        if attribute not in info:
-            raise CsrDriverMismatchError(resource=name, attr=attribute)
-        value = info[attribute].lower()
-        if value in self.DIALECT_MAP[resource]:
-            return self.DIALECT_MAP[resource][value]
-        raise CsrUnknownMappingError(resource=name, attr=attribute,
-                                     value=value)
-
-    def create_psk_info(self, psk_id, conn_info):
-        """Collect/create attributes needed for pre-shared key."""
-        return {u'keyring-name': psk_id,
-                u'pre-shared-key-list': [
-                    {u'key': conn_info['psk'],
-                     u'encrypted': False,
-                     u'peer-address': conn_info['peer_address']}]}
-
-    def create_ike_policy_info(self, ike_policy_id, conn_info):
-        """Collect/create/map attributes needed for IKE policy."""
-        for_ike = 'ike_policy'
-        policy_info = conn_info[for_ike]
-        version = self.translate_dialect(for_ike,
-                                         'ike_version',
-                                         policy_info)
-        encrypt_algorithm = self.translate_dialect(for_ike,
-                                                   'encryption_algorithm',
-                                                   policy_info)
-        auth_algorithm = self.translate_dialect(for_ike,
-                                                'auth_algorithm',
-                                                policy_info)
-        group = self.translate_dialect(for_ike,
-                                       'pfs',
-                                       policy_info)
-        lifetime = policy_info['lifetime_value']
-        return {u'version': version,
-                u'priority-id': ike_policy_id,
-                u'encryption': encrypt_algorithm,
-                u'hash': auth_algorithm,
-                u'dhGroup': group,
-                u'lifetime': lifetime}
-
-    def create_ipsec_policy_info(self, ipsec_policy_id, info):
-        """Collect/create attributes needed for IPSec policy.
-
-        Note: OpenStack will provide a default encryption algorithm, if one is
-        not provided, so a authentication only configuration of (ah, sha1),
-        which maps to ah-sha-hmac transform protocol, cannot be selected.
-        As a result, we'll always configure the encryption algorithm, and
-        will select ah-sha-hmac for transform protocol.
-        """
-
-        for_ipsec = 'ipsec_policy'
-        policy_info = info[for_ipsec]
-        transform_protocol = self.translate_dialect(for_ipsec,
-                                                    'transform_protocol',
-                                                    policy_info)
-        auth_algorithm = self.translate_dialect(for_ipsec,
-                                                'auth_algorithm',
-                                                policy_info)
-        encrypt_algorithm = self.translate_dialect(for_ipsec,
-                                                   'encryption_algorithm',
-                                                   policy_info)
-        group = self.translate_dialect(for_ipsec, 'pfs', policy_info)
-        lifetime = policy_info['lifetime_value']
-        settings = {u'policy-id': ipsec_policy_id,
-                    u'protection-suite': {
-                        u'esp-encryption': encrypt_algorithm,
-                        u'esp-authentication': auth_algorithm},
-                    u'lifetime-sec': lifetime,
-                    u'pfs': group,
-                    u'anti-replay-window-size': u'disable'}
-        if transform_protocol:
-            settings[u'protection-suite'][u'ah'] = transform_protocol
-        return settings
-
-    def create_site_connection_info(self, site_conn_id, ipsec_policy_id,
-                                    conn_info):
-        """Collect/create attributes needed for the IPSec connection."""
-        mtu = conn_info['mtu']
-        return {
-            u'vpn-interface-name': site_conn_id,
-            u'ipsec-policy-id': ipsec_policy_id,
-            u'remote-device': {
-                u'tunnel-ip-address': conn_info['peer_address']
-            },
-            u'mtu': mtu
-        }
-
-    def create_routes_info(self, site_conn_id, conn_info):
-        """Collect/create attributes for static routes."""
-        routes_info = []
-        for peer_cidr in conn_info.get('peer_cidrs', []):
-            route = {u'destination-network': peer_cidr,
-                     u'outgoing-interface': site_conn_id}
-            route_id = csr_client.make_route_id(peer_cidr, site_conn_id)
-            routes_info.append((route_id, route))
-        return routes_info
-
-    def _check_create(self, resource, which):
-        """Determine if REST create request was successful."""
-        if self.csr.status == requests.codes.CREATED:
-            LOG.debug("%(resource)s %(which)s is configured",
-                      {'resource': resource, 'which': which})
-            return
-        LOG.error(_LE("Unable to create %(resource)s %(which)s: "
-                      "%(status)d"),
-                  {'resource': resource, 'which': which,
-                   'status': self.csr.status})
-        # ToDO(pcm): Set state to error
-        raise CsrResourceCreateFailure(resource=resource, which=which)
-
-    def do_create_action(self, action_suffix, info, resource_id, title):
-        """Perform a single REST step for IPSec site connection create."""
-        create_action = 'create_%s' % action_suffix
-        try:
-            getattr(self.csr, create_action)(info)
-        except AttributeError:
-            LOG.exception(_LE("Internal error - '%s' is not defined"),
-                          create_action)
-            raise CsrResourceCreateFailure(resource=title,
-                                           which=resource_id)
-        self._check_create(title, resource_id)
-        self.steps.append(RollbackStep(action_suffix, resource_id, title))
-
-    def _verify_deleted(self, status, resource, which):
-        """Determine if REST delete request was successful."""
-        if status in (requests.codes.NO_CONTENT, requests.codes.NOT_FOUND):
-            LOG.debug("%(resource)s configuration %(which)s was removed",
-                      {'resource': resource, 'which': which})
-        else:
-            LOG.warning(_LW("Unable to delete %(resource)s %(which)s: "
-                            "%(status)d"), {'resource': resource,
-                                            'which': which,
-                                            'status': status})
-
-    def do_rollback(self):
-        """Undo create steps that were completed successfully."""
-        for step in reversed(self.steps):
-            delete_action = 'delete_%s' % step.action
-            LOG.debug("Performing rollback action %(action)s for "
-                      "resource %(resource)s", {'action': delete_action,
-                                                'resource': step.title})
-            try:
-                getattr(self.csr, delete_action)(step.resource_id)
-            except AttributeError:
-                LOG.exception(_LE("Internal error - '%s' is not defined"),
-                              delete_action)
-                raise CsrResourceCreateFailure(resource=step.title,
-                                               which=step.resource_id)
-            self._verify_deleted(self.csr.status, step.title, step.resource_id)
-        self.steps = []
-
-    def create_ipsec_site_connection(self, context, conn_info):
-        """Creates an IPSec site-to-site connection on CSR.
-
-        Create the PSK, IKE policy, IPSec policy, connection, static route,
-        and (future) DPD.
-        """
-        # Get all the IDs
-        conn_id = conn_info['id']
-        psk_id = conn_id
-        site_conn_id = conn_info['cisco']['site_conn_id']
-        ike_policy_id = conn_info['cisco']['ike_policy_id']
-        ipsec_policy_id = conn_info['cisco']['ipsec_policy_id']
-
-        LOG.debug('Creating IPSec connection %s', conn_id)
-        # Get all the attributes needed to create
-        try:
-            psk_info = self.create_psk_info(psk_id, conn_info)
-            ike_policy_info = self.create_ike_policy_info(ike_policy_id,
-                                                          conn_info)
-            ipsec_policy_info = self.create_ipsec_policy_info(ipsec_policy_id,
-                                                              conn_info)
-            connection_info = self.create_site_connection_info(site_conn_id,
-                                                               ipsec_policy_id,
-                                                               conn_info)
-            routes_info = self.create_routes_info(site_conn_id, conn_info)
-        except (CsrUnknownMappingError, CsrDriverMismatchError) as e:
-            LOG.exception(e)
-            return
-
-        try:
-            self.do_create_action('pre_shared_key', psk_info,
-                                  conn_id, 'Pre-Shared Key')
-            self.do_create_action('ike_policy', ike_policy_info,
-                                  ike_policy_id, 'IKE Policy')
-            self.do_create_action('ipsec_policy', ipsec_policy_info,
-                                  ipsec_policy_id, 'IPSec Policy')
-            self.do_create_action('ipsec_connection', connection_info,
-                                  site_conn_id, 'IPSec Connection')
-
-            # TODO(pcm): FUTURE - Do DPD for v1 and handle if >1 connection
-            # and different DPD settings
-            for route_id, route_info in routes_info:
-                self.do_create_action('static_route', route_info,
-                                      route_id, 'Static Route')
-        except CsrResourceCreateFailure:
-            self.do_rollback()
-            LOG.info(_LI("FAILED: Create of IPSec site-to-site connection %s"),
-                     conn_id)
-        else:
-            LOG.info(_LI("SUCCESS: Created IPSec site-to-site connection %s"),
-                     conn_id)
-
-    def delete_ipsec_site_connection(self, context, conn_id):
-        """Delete the site-to-site IPSec connection.
-
-        This will be best effort and will continue, if there are any
-        failures.
-        """
-        LOG.debug('Deleting IPSec connection %s', conn_id)
-        if not self.steps:
-            LOG.warning(_LW('Unable to find connection %s'), conn_id)
-        else:
-            self.do_rollback()
-
-        LOG.info(_LI("SUCCESS: Deleted IPSec site-to-site connection %s"),
-                 conn_id)
-
-    def set_admin_state(self, is_up):
-        """Change the admin state for the IPSec connection."""
-        self.csr.set_ipsec_connection_state(self.tunnel, admin_up=is_up)
-        if self.csr.status != requests.codes.NO_CONTENT:
-            state = "UP" if is_up else "DOWN"
-            LOG.error(_LE("Unable to change %(tunnel)s admin state to "
-                          "%(state)s"), {'tunnel': self.tunnel,
-                                         'state': state})
-            raise CsrAdminStateChangeFailure(tunnel=self.tunnel, state=state)
diff --git a/neutron/services/vpn/device_drivers/ipsec.py b/neutron/services/vpn/device_drivers/ipsec.py
deleted file mode 100644 (file)
index e68833b..0000000
+++ /dev/null
@@ -1,702 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import abc
-import copy
-import os
-import re
-import shutil
-
-import jinja2
-import netaddr
-from oslo.config import cfg
-from oslo import messaging
-import six
-
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import rpc as n_rpc
-from neutron import context
-from neutron.i18n import _LE
-from neutron.openstack.common import lockutils
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import loopingcall
-from neutron.plugins.common import constants
-from neutron.plugins.common import utils as plugin_utils
-from neutron.services.vpn.common import topics
-from neutron.services.vpn import device_drivers
-
-LOG = logging.getLogger(__name__)
-TEMPLATE_PATH = os.path.dirname(__file__)
-
-ipsec_opts = [
-    cfg.StrOpt(
-        'config_base_dir',
-        default='$state_path/ipsec',
-        help=_('Location to store ipsec server config files')),
-    cfg.IntOpt('ipsec_status_check_interval',
-               default=60,
-               help=_("Interval for checking ipsec status"))
-]
-cfg.CONF.register_opts(ipsec_opts, 'ipsec')
-
-openswan_opts = [
-    cfg.StrOpt(
-        'ipsec_config_template',
-        default=os.path.join(
-            TEMPLATE_PATH,
-            'template/openswan/ipsec.conf.template'),
-        help=_('Template file for ipsec configuration')),
-    cfg.StrOpt(
-        'ipsec_secret_template',
-        default=os.path.join(
-            TEMPLATE_PATH,
-            'template/openswan/ipsec.secret.template'),
-        help=_('Template file for ipsec secret configuration'))
-]
-
-cfg.CONF.register_opts(openswan_opts, 'openswan')
-
-JINJA_ENV = None
-
-STATUS_MAP = {
-    'erouted': constants.ACTIVE,
-    'unrouted': constants.DOWN
-}
-
-IPSEC_CONNS = 'ipsec_site_connections'
-
-
-def _get_template(template_file):
-    global JINJA_ENV
-    if not JINJA_ENV:
-        templateLoader = jinja2.FileSystemLoader(searchpath="/")
-        JINJA_ENV = jinja2.Environment(loader=templateLoader)
-    return JINJA_ENV.get_template(template_file)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseSwanProcess():
-    """Swan Family Process Manager
-
-    This class manages start/restart/stop ipsec process.
-    This class create/delete config template
-    """
-
-    binary = "ipsec"
-    CONFIG_DIRS = [
-        'var/run',
-        'log',
-        'etc',
-        'etc/ipsec.d/aacerts',
-        'etc/ipsec.d/acerts',
-        'etc/ipsec.d/cacerts',
-        'etc/ipsec.d/certs',
-        'etc/ipsec.d/crls',
-        'etc/ipsec.d/ocspcerts',
-        'etc/ipsec.d/policies',
-        'etc/ipsec.d/private',
-        'etc/ipsec.d/reqs',
-        'etc/pki/nssdb/'
-    ]
-
-    DIALECT_MAP = {
-        "3des": "3des",
-        "aes-128": "aes128",
-        "aes-256": "aes256",
-        "aes-192": "aes192",
-        "group2": "modp1024",
-        "group5": "modp1536",
-        "group14": "modp2048",
-        "group15": "modp3072",
-        "bi-directional": "start",
-        "response-only": "add",
-        "v2": "insist",
-        "v1": "never"
-    }
-
-    def __init__(self, conf, root_helper, process_id,
-                 vpnservice, namespace):
-        self.conf = conf
-        self.id = process_id
-        self.root_helper = root_helper
-        self.updated_pending_status = False
-        self.namespace = namespace
-        self.connection_status = {}
-        self.config_dir = os.path.join(
-            cfg.CONF.ipsec.config_base_dir, self.id)
-        self.etc_dir = os.path.join(self.config_dir, 'etc')
-        self.update_vpnservice(vpnservice)
-
-    def translate_dialect(self):
-        if not self.vpnservice:
-            return
-        for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
-            self._dialect(ipsec_site_conn, 'initiator')
-            self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version')
-            for key in ['encryption_algorithm',
-                        'auth_algorithm',
-                        'pfs']:
-                self._dialect(ipsec_site_conn['ikepolicy'], key)
-                self._dialect(ipsec_site_conn['ipsecpolicy'], key)
-
-    def update_vpnservice(self, vpnservice):
-        self.vpnservice = vpnservice
-        self.translate_dialect()
-
-    def _dialect(self, obj, key):
-        obj[key] = self.DIALECT_MAP.get(obj[key], obj[key])
-
-    @abc.abstractmethod
-    def ensure_configs(self):
-        pass
-
-    def ensure_config_file(self, kind, template, vpnservice):
-        """Update config file,  based on current settings for service."""
-        config_str = self._gen_config_content(template, vpnservice)
-        config_file_name = self._get_config_filename(kind)
-        utils.replace_file(config_file_name, config_str)
-
-    def remove_config(self):
-        """Remove whole config file."""
-        shutil.rmtree(self.config_dir, ignore_errors=True)
-
-    def _get_config_filename(self, kind):
-        config_dir = self.etc_dir
-        return os.path.join(config_dir, kind)
-
-    def _ensure_dir(self, dir_path):
-        if not os.path.isdir(dir_path):
-            os.makedirs(dir_path, 0o755)
-
-    def ensure_config_dir(self, vpnservice):
-        """Create config directory if it does not exist."""
-        self._ensure_dir(self.config_dir)
-        for subdir in self.CONFIG_DIRS:
-            dir_path = os.path.join(self.config_dir, subdir)
-            self._ensure_dir(dir_path)
-
-    def _gen_config_content(self, template_file, vpnservice):
-        template = _get_template(template_file)
-        return template.render(
-            {'vpnservice': vpnservice,
-             'state_path': cfg.CONF.state_path})
-
-    @abc.abstractmethod
-    def get_status(self):
-        pass
-
-    @property
-    def status(self):
-        if self.active:
-            return constants.ACTIVE
-        return constants.DOWN
-
-    @property
-    def active(self):
-        """Check if the process is active or not."""
-        if not self.namespace:
-            return False
-        try:
-            status = self.get_status()
-            self._update_connection_status(status)
-        except RuntimeError:
-            return False
-        return True
-
-    def update(self):
-        """Update Status based on vpnservice configuration."""
-        if self.vpnservice and not self.vpnservice['admin_state_up']:
-            self.disable()
-        else:
-            self.enable()
-
-        if plugin_utils.in_pending_status(self.vpnservice['status']):
-            self.updated_pending_status = True
-
-        self.vpnservice['status'] = self.status
-        for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
-            if plugin_utils.in_pending_status(ipsec_site_conn['status']):
-                conn_id = ipsec_site_conn['id']
-                conn_status = self.connection_status.get(conn_id)
-                if not conn_status:
-                    continue
-                conn_status['updated_pending_status'] = True
-                ipsec_site_conn['status'] = conn_status['status']
-
-    def enable(self):
-        """Enabling the process."""
-        try:
-            self.ensure_configs()
-            if self.active:
-                self.restart()
-            else:
-                self.start()
-        except RuntimeError:
-            LOG.exception(
-                _LE("Failed to enable vpn process on router %s"),
-                self.id)
-
-    def disable(self):
-        """Disabling the process."""
-        try:
-            if self.active:
-                self.stop()
-            self.remove_config()
-        except RuntimeError:
-            LOG.exception(
-                _LE("Failed to disable vpn process on router %s"),
-                self.id)
-
-    @abc.abstractmethod
-    def restart(self):
-        """Restart process."""
-
-    @abc.abstractmethod
-    def start(self):
-        """Start process."""
-
-    @abc.abstractmethod
-    def stop(self):
-        """Stop process."""
-
-    def _update_connection_status(self, status_output):
-        for line in status_output.split('\n'):
-            m = re.search('\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);', line)
-            if not m:
-                continue
-            connection_id = m.group(1)
-            status = m.group(2)
-            if not self.connection_status.get(connection_id):
-                self.connection_status[connection_id] = {
-                    'status': None,
-                    'updated_pending_status': False
-                }
-            self.connection_status[
-                connection_id]['status'] = STATUS_MAP[status]
-
-
-class OpenSwanProcess(BaseSwanProcess):
-    """OpenSwan Process manager class.
-
-    This process class uses three commands
-    (1) ipsec pluto:  IPsec IKE keying daemon
-    (2) ipsec addconn: Adds new ipsec addconn
-    (3) ipsec whack:  control interface for IPSEC keying daemon
-    """
-    def __init__(self, conf, root_helper, process_id,
-                 vpnservice, namespace):
-        super(OpenSwanProcess, self).__init__(
-            conf, root_helper, process_id,
-            vpnservice, namespace)
-        self.secrets_file = os.path.join(
-            self.etc_dir, 'ipsec.secrets')
-        self.config_file = os.path.join(
-            self.etc_dir, 'ipsec.conf')
-        self.pid_path = os.path.join(
-            self.config_dir, 'var', 'run', 'pluto')
-
-    def _execute(self, cmd, check_exit_code=True):
-        """Execute command on namespace."""
-        ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
-        return ip_wrapper.netns.execute(
-            cmd,
-            check_exit_code=check_exit_code)
-
-    def ensure_configs(self):
-        """Generate config files which are needed for OpenSwan.
-
-        If there is no directory, this function will create
-        dirs.
-        """
-        self.ensure_config_dir(self.vpnservice)
-        self.ensure_config_file(
-            'ipsec.conf',
-            self.conf.openswan.ipsec_config_template,
-            self.vpnservice)
-        self.ensure_config_file(
-            'ipsec.secrets',
-            self.conf.openswan.ipsec_secret_template,
-            self.vpnservice)
-
-    def get_status(self):
-        return self._execute([self.binary,
-                              'whack',
-                              '--ctlbase',
-                              self.pid_path,
-                              '--status'])
-
-    def restart(self):
-        """Restart the process."""
-        self.stop()
-        self.start()
-        return
-
-    def _get_nexthop(self, address):
-        routes = self._execute(
-            ['ip', 'route', 'get', address])
-        if routes.find('via') >= 0:
-            return routes.split(' ')[2]
-        return address
-
-    def _virtual_privates(self):
-        """Returns line of virtual_privates.
-
-        virtual_private contains the networks
-        that are allowed as subnet for the remote client.
-        """
-        virtual_privates = []
-        nets = [self.vpnservice['subnet']['cidr']]
-        for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
-            nets += ipsec_site_conn['peer_cidrs']
-        for net in nets:
-            version = netaddr.IPNetwork(net).version
-            virtual_privates.append('%%v%s:%s' % (version, net))
-        return ','.join(virtual_privates)
-
-    def start(self):
-        """Start the process.
-
-        Note: if there is not namespace yet,
-        just do nothing, and wait next event.
-        """
-        if not self.namespace:
-            return
-        virtual_private = self._virtual_privates()
-        #start pluto IKE keying daemon
-        self._execute([self.binary,
-                       'pluto',
-                       '--ctlbase', self.pid_path,
-                       '--ipsecdir', self.etc_dir,
-                       '--use-netkey',
-                       '--uniqueids',
-                       '--nat_traversal',
-                       '--secretsfile', self.secrets_file,
-                       '--virtual_private', virtual_private
-                       ])
-        #add connections
-        for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
-            nexthop = self._get_nexthop(ipsec_site_conn['peer_address'])
-            self._execute([self.binary,
-                           'addconn',
-                           '--ctlbase', '%s.ctl' % self.pid_path,
-                           '--defaultroutenexthop', nexthop,
-                           '--config', self.config_file,
-                           ipsec_site_conn['id']
-                           ])
-        #TODO(nati) fix this when openswan is fixed
-        #Due to openswan bug, this command always exit with 3
-        #start whack ipsec keying daemon
-        self._execute([self.binary,
-                       'whack',
-                       '--ctlbase', self.pid_path,
-                       '--listen',
-                       ], check_exit_code=False)
-
-        for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
-            if not ipsec_site_conn['initiator'] == 'start':
-                continue
-            #initiate ipsec connection
-            self._execute([self.binary,
-                           'whack',
-                           '--ctlbase', self.pid_path,
-                           '--name', ipsec_site_conn['id'],
-                           '--asynchronous',
-                           '--initiate'
-                           ])
-
-    def disconnect(self):
-        if not self.namespace:
-            return
-        if not self.vpnservice:
-            return
-        for conn_id in self.connection_status:
-            self._execute([self.binary,
-                           'whack',
-                           '--ctlbase', self.pid_path,
-                           '--name', '%s/0x1' % conn_id,
-                           '--terminate'
-                           ])
-
-    def stop(self):
-        #Stop process using whack
-        #Note this will also stop pluto
-        self.disconnect()
-        self._execute([self.binary,
-                       'whack',
-                       '--ctlbase', self.pid_path,
-                       '--shutdown',
-                       ])
-        #clean connection_status info
-        self.connection_status = {}
-
-
-class IPsecVpnDriverApi(object):
-    """IPSecVpnDriver RPC api."""
-
-    def __init__(self, topic):
-        target = messaging.Target(topic=topic, version='1.0')
-        self.client = n_rpc.get_client(target)
-
-    def get_vpn_services_on_host(self, context, host):
-        """Get list of vpnservices.
-
-        The vpnservices including related ipsec_site_connection,
-        ikepolicy and ipsecpolicy on this host
-        """
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'get_vpn_services_on_host', host=host)
-
-    def update_status(self, context, status):
-        """Update local status.
-
-        This method call updates status attribute of
-        VPNServices.
-        """
-        cctxt = self.client.prepare()
-        return cctxt.call(context, 'update_status', status=status)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class IPsecDriver(device_drivers.DeviceDriver):
-    """VPN Device Driver for IPSec.
-
-    This class is designed for use with L3-agent now.
-    However this driver will be used with another agent in future.
-    so the use of "Router" is kept minimul now.
-    Instead of router_id,  we are using process_id in this code.
-    """
-
-    # history
-    #   1.0 Initial version
-    target = messaging.Target(version='1.0')
-
-    def __init__(self, agent, host):
-        self.agent = agent
-        self.conf = self.agent.conf
-        self.root_helper = self.agent.root_helper
-        self.host = host
-        self.conn = n_rpc.create_connection(new=True)
-        self.context = context.get_admin_context_without_session()
-        self.topic = topics.IPSEC_AGENT_TOPIC
-        node_topic = '%s.%s' % (self.topic, self.host)
-
-        self.processes = {}
-        self.process_status_cache = {}
-
-        self.endpoints = [self]
-        self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
-        self.conn.consume_in_threads()
-        self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC)
-        self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
-            self.report_status, self.context)
-        self.process_status_cache_check.start(
-            interval=self.conf.ipsec.ipsec_status_check_interval)
-
-    def _update_nat(self, vpnservice, func):
-        """Setting up nat rule in iptables.
-
-        We need to setup nat rule for ipsec packet.
-        :param vpnservice: vpnservices
-        :param func: self.add_nat_rule or self.remove_nat_rule
-        """
-        local_cidr = vpnservice['subnet']['cidr']
-        router_id = vpnservice['router_id']
-        for ipsec_site_connection in vpnservice['ipsec_site_connections']:
-            for peer_cidr in ipsec_site_connection['peer_cidrs']:
-                func(
-                    router_id,
-                    'POSTROUTING',
-                    '-s %s -d %s -m policy '
-                    '--dir out --pol ipsec '
-                    '-j ACCEPT ' % (local_cidr, peer_cidr),
-                    top=True)
-        self.agent.iptables_apply(router_id)
-
-    def vpnservice_updated(self, context, **kwargs):
-        """Vpnservice updated rpc handler
-
-        VPN Service Driver will call this method
-        when vpnservices updated.
-        Then this method start sync with server.
-        """
-        self.sync(context, [])
-
-    @abc.abstractmethod
-    def create_process(self, process_id, vpnservice, namespace):
-        pass
-
-    def ensure_process(self, process_id, vpnservice=None):
-        """Ensuring process.
-
-        If the process doesn't exist, it will create process
-        and store it in self.processs
-        """
-        process = self.processes.get(process_id)
-        if not process or not process.namespace:
-            namespace = self.agent.get_namespace(process_id)
-            process = self.create_process(
-                process_id,
-                vpnservice,
-                namespace)
-            self.processes[process_id] = process
-        elif vpnservice:
-            process.update_vpnservice(vpnservice)
-        return process
-
-    def create_router(self, process_id):
-        """Handling create router event.
-
-        Agent calls this method, when the process namespace
-        is ready.
-        """
-        if process_id in self.processes:
-            # In case of vpnservice is created
-            # before router's namespace
-            process = self.processes[process_id]
-            self._update_nat(process.vpnservice, self.agent.add_nat_rule)
-            process.enable()
-
-    def destroy_router(self, process_id):
-        """Handling destroy_router event.
-
-        Agent calls this method, when the process namespace
-        is deleted.
-        """
-        if process_id in self.processes:
-            process = self.processes[process_id]
-            process.disable()
-            vpnservice = process.vpnservice
-            if vpnservice:
-                self._update_nat(vpnservice, self.agent.remove_nat_rule)
-            del self.processes[process_id]
-
-    def get_process_status_cache(self, process):
-        if not self.process_status_cache.get(process.id):
-            self.process_status_cache[process.id] = {
-                'status': None,
-                'id': process.vpnservice['id'],
-                'updated_pending_status': False,
-                'ipsec_site_connections': {}}
-        return self.process_status_cache[process.id]
-
-    def is_status_updated(self, process, previous_status):
-        if process.updated_pending_status:
-            return True
-        if process.status != previous_status['status']:
-            return True
-        if (process.connection_status !=
-            previous_status['ipsec_site_connections']):
-            return True
-
-    def unset_updated_pending_status(self, process):
-        process.updated_pending_status = False
-        for connection_status in process.connection_status.values():
-            connection_status['updated_pending_status'] = False
-
-    def copy_process_status(self, process):
-        return {
-            'id': process.vpnservice['id'],
-            'status': process.status,
-            'updated_pending_status': process.updated_pending_status,
-            'ipsec_site_connections': copy.deepcopy(process.connection_status)
-        }
-
-    def update_downed_connections(self, process_id, new_status):
-        """Update info to be reported, if connections just went down.
-
-        If there is no longer any information for a connection, because it
-        has been removed (e.g. due to an admin down of VPN service or IPSec
-        connection), but there was previous status information for the
-        connection, mark the connection as down for reporting purposes.
-        """
-        if process_id in self.process_status_cache:
-            for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
-                if conn not in new_status[IPSEC_CONNS]:
-                    new_status[IPSEC_CONNS][conn] = {
-                        'status': constants.DOWN,
-                        'updated_pending_status': True
-                    }
-
-    def report_status(self, context):
-        status_changed_vpn_services = []
-        for process in self.processes.values():
-            previous_status = self.get_process_status_cache(process)
-            if self.is_status_updated(process, previous_status):
-                new_status = self.copy_process_status(process)
-                self.update_downed_connections(process.id, new_status)
-                status_changed_vpn_services.append(new_status)
-                self.process_status_cache[process.id] = (
-                    self.copy_process_status(process))
-                # We need unset updated_pending status after it
-                # is reported to the server side
-                self.unset_updated_pending_status(process)
-
-        if status_changed_vpn_services:
-            self.agent_rpc.update_status(
-                context,
-                status_changed_vpn_services)
-
-    @lockutils.synchronized('vpn-agent', 'neutron-')
-    def sync(self, context, routers):
-        """Sync status with server side.
-
-        :param context: context object for RPC call
-        :param routers: Router objects which is created in this sync event
-
-        There could be many failure cases should be
-        considered including the followings.
-        1) Agent class restarted
-        2) Failure on process creation
-        3) VpnService is deleted during agent down
-        4) RPC failure
-
-        In order to handle, these failure cases,
-        This driver takes simple sync strategies.
-        """
-        vpnservices = self.agent_rpc.get_vpn_services_on_host(
-            context, self.host)
-        router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
-        # Ensure the ipsec process is enabled
-        for vpnservice in vpnservices:
-            process = self.ensure_process(vpnservice['router_id'],
-                                          vpnservice=vpnservice)
-            self._update_nat(vpnservice, self.agent.add_nat_rule)
-            process.update()
-
-        # Delete any IPSec processes that are
-        # associated with routers, but are not running the VPN service.
-        for router in routers:
-            #We are using router id as process_id
-            process_id = router['id']
-            if process_id not in router_ids:
-                process = self.ensure_process(process_id)
-                self.destroy_router(process_id)
-
-        # Delete any IPSec processes running
-        # VPN that do not have an associated router.
-        process_ids = [pid for pid in self.processes if pid not in router_ids]
-        for process_id in process_ids:
-            self.destroy_router(process_id)
-        self.report_status(context)
-
-
-class OpenSwanDriver(IPsecDriver):
-    def create_process(self, process_id, vpnservice, namespace):
-        return OpenSwanProcess(
-            self.conf,
-            self.root_helper,
-            process_id,
-            vpnservice,
-            namespace)
diff --git a/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template b/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template
deleted file mode 100644 (file)
index 546e27e..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-# Configuration for {{vpnservice.name}}
-config setup
-    nat_traversal=yes
-    listen={{vpnservice.external_ip}}
-conn %default
-    ikelifetime=480m
-    keylife=60m
-    keyingtries=%forever
-{% for ipsec_site_connection in vpnservice.ipsec_site_connections if ipsec_site_connection.admin_state_up
-%}conn {{ipsec_site_connection.id}}
-    # NOTE: a default route is required for %defaultroute to work...
-    left={{vpnservice.external_ip}}
-    leftid={{vpnservice.external_ip}}
-    auto={{ipsec_site_connection.initiator}}
-    # NOTE:REQUIRED
-    # [subnet]
-    leftsubnet={{vpnservice.subnet.cidr}}
-    # leftsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
-    leftnexthop=%defaultroute
-    ######################
-    # ipsec_site_connections
-    ######################
-    # [peer_address]
-    right={{ipsec_site_connection.peer_address}}
-    # [peer_id]
-    rightid={{ipsec_site_connection.peer_id}}
-    # [peer_cidrs]
-    rightsubnets={ {{ipsec_site_connection['peer_cidrs']|join(' ')}} }
-    # rightsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
-    rightnexthop=%defaultroute
-    # [mtu]
-    # Note It looks like not supported in the strongswan driver
-    # ignore it now
-    # [dpd_action]
-    dpdaction={{ipsec_site_connection.dpd_action}}
-    # [dpd_interval]
-    dpddelay={{ipsec_site_connection.dpd_interval}}
-    # [dpd_timeout]
-    dpdtimeout={{ipsec_site_connection.dpd_timeout}}
-    # [auth_mode]
-    authby=secret
-    ######################
-    # IKEPolicy params
-    ######################
-    #ike version
-    ikev2={{ipsec_site_connection.ikepolicy.ike_version}}
-    # [encryption_algorithm]-[auth_algorithm]-[pfs]
-    ike={{ipsec_site_connection.ikepolicy.encryption_algorithm}}-{{ipsec_site_connection.ikepolicy.auth_algorithm}};{{ipsec_site_connection.ikepolicy.pfs}}
-    # [lifetime_value]
-    ikelifetime={{ipsec_site_connection.ikepolicy.lifetime_value}}s
-    # NOTE: it looks lifetime_units=kilobytes can't be enforced (could be seconds,  hours,  days...)
-    ##########################
-    # IPsecPolicys params
-    ##########################
-    # [transform_protocol]
-    auth={{ipsec_site_connection.ipsecpolicy.transform_protocol}}
-    # [encryption_algorithm]-[auth_algorithm]-[pfs]
-    phase2alg={{ipsec_site_connection.ipsecpolicy.encryption_algorithm}}-{{ipsec_site_connection.ipsecpolicy.auth_algorithm}};{{ipsec_site_connection.ipsecpolicy.pfs}}
-    # [encapsulation_mode]
-    type={{ipsec_site_connection.ipsecpolicy.encapsulation_mode}}
-    # [lifetime_value]
-    lifetime={{ipsec_site_connection.ipsecpolicy.lifetime_value}}s
-    # lifebytes=100000 if lifetime_units=kilobytes (IKEv2 only)
-{% endfor %}
diff --git a/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template b/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template
deleted file mode 100644 (file)
index 8302e85..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Configuration for {{vpnservice.name}} {% for ipsec_site_connection in vpnservice.ipsec_site_connections %}
-{{vpnservice.external_ip}} {{ipsec_site_connection.peer_id}} : PSK "{{ipsec_site_connection.psk}}"
-{% endfor %}
diff --git a/neutron/services/vpn/plugin.py b/neutron/services/vpn/plugin.py
deleted file mode 100644 (file)
index dd8a20b..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#    (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
-#    All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.db.vpn import vpn_db
-from neutron.i18n import _LI
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-from neutron.services import service_base
-
-LOG = logging.getLogger(__name__)
-
-
-class VPNPlugin(vpn_db.VPNPluginDb):
-
-    """Implementation of the VPN Service Plugin.
-
-    This class manages the workflow of VPNaaS request/response.
-    Most DB related works are implemented in class
-    vpn_db.VPNPluginDb.
-    """
-    supported_extension_aliases = ["vpnaas", "service-type"]
-
-
-class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin):
-    """VpnPlugin which supports VPN Service Drivers."""
-    #TODO(nati) handle ikepolicy and ipsecpolicy update usecase
-    def __init__(self):
-        super(VPNDriverPlugin, self).__init__()
-        # Load the service driver from neutron.conf.
-        drivers, default_provider = service_base.load_drivers(
-            constants.VPN, self)
-        LOG.info(_LI("VPN plugin using service driver: %s"), default_provider)
-        self.ipsec_driver = drivers[default_provider]
-
-    def _get_driver_for_vpnservice(self, vpnservice):
-        return self.ipsec_driver
-
-    def _get_driver_for_ipsec_site_connection(self, context,
-                                              ipsec_site_connection):
-        #TODO(nati) get vpnservice when we support service type framework
-        vpnservice = None
-        return self._get_driver_for_vpnservice(vpnservice)
-
-    def _get_validator(self):
-        return self.ipsec_driver.validator
-
-    def create_ipsec_site_connection(self, context, ipsec_site_connection):
-        ipsec_site_connection = super(
-            VPNDriverPlugin, self).create_ipsec_site_connection(
-                context, ipsec_site_connection)
-        driver = self._get_driver_for_ipsec_site_connection(
-            context, ipsec_site_connection)
-        driver.create_ipsec_site_connection(context, ipsec_site_connection)
-        return ipsec_site_connection
-
-    def delete_ipsec_site_connection(self, context, ipsec_conn_id):
-        ipsec_site_connection = self.get_ipsec_site_connection(
-            context, ipsec_conn_id)
-        super(VPNDriverPlugin, self).delete_ipsec_site_connection(
-            context, ipsec_conn_id)
-        driver = self._get_driver_for_ipsec_site_connection(
-            context, ipsec_site_connection)
-        driver.delete_ipsec_site_connection(context, ipsec_site_connection)
-
-    def update_ipsec_site_connection(
-            self, context,
-            ipsec_conn_id, ipsec_site_connection):
-        old_ipsec_site_connection = self.get_ipsec_site_connection(
-            context, ipsec_conn_id)
-        ipsec_site_connection = super(
-            VPNDriverPlugin, self).update_ipsec_site_connection(
-                context,
-                ipsec_conn_id,
-                ipsec_site_connection)
-        driver = self._get_driver_for_ipsec_site_connection(
-            context, ipsec_site_connection)
-        driver.update_ipsec_site_connection(
-            context, old_ipsec_site_connection, ipsec_site_connection)
-        return ipsec_site_connection
-
-    def update_vpnservice(self, context, vpnservice_id, vpnservice):
-        old_vpn_service = self.get_vpnservice(context, vpnservice_id)
-        new_vpn_service = super(
-            VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id,
-                                                     vpnservice)
-        driver = self._get_driver_for_vpnservice(old_vpn_service)
-        driver.update_vpnservice(context, old_vpn_service, new_vpn_service)
-        return new_vpn_service
-
-    def delete_vpnservice(self, context, vpnservice_id):
-        vpnservice = self._get_vpnservice(context, vpnservice_id)
-        super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id)
-        driver = self._get_driver_for_vpnservice(vpnservice)
-        driver.delete_vpnservice(context, vpnservice)
index 0f6624193755bec158eb58ffadbe819eaa61c730..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1,111 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-from oslo import messaging
-import six
-
-from neutron.common import rpc as n_rpc
-from neutron.db.vpn import vpn_validator
-from neutron import manager
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class VpnDriver(object):
-
-    def __init__(self, service_plugin, validator=None):
-        self.service_plugin = service_plugin
-        if validator is None:
-            validator = vpn_validator.VpnReferenceValidator()
-        self.validator = validator
-
-    @property
-    def l3_plugin(self):
-        return manager.NeutronManager.get_service_plugins().get(
-            constants.L3_ROUTER_NAT)
-
-    @property
-    def service_type(self):
-        pass
-
-    @abc.abstractmethod
-    def create_vpnservice(self, context, vpnservice):
-        pass
-
-    @abc.abstractmethod
-    def update_vpnservice(
-        self, context, old_vpnservice, vpnservice):
-        pass
-
-    @abc.abstractmethod
-    def delete_vpnservice(self, context, vpnservice):
-        pass
-
-    @abc.abstractmethod
-    def create_ipsec_site_connection(self, context, ipsec_site_connection):
-        pass
-
-    @abc.abstractmethod
-    def update_ipsec_site_connection(self, context, old_ipsec_site_connection,
-                                     ipsec_site_connection):
-        pass
-
-    @abc.abstractmethod
-    def delete_ipsec_site_connection(self, context, ipsec_site_connection):
-        pass
-
-
-class BaseIPsecVpnAgentApi(object):
-    """Base class for IPSec API to agent."""
-
-    def __init__(self, topic, default_version, driver):
-        self.topic = topic
-        self.driver = driver
-        target = messaging.Target(topic=topic, version=default_version)
-        self.client = n_rpc.get_client(target)
-
-    def _agent_notification(self, context, method, router_id,
-                            version=None, **kwargs):
-        """Notify update for the agent.
-
-        This method will find where is the router, and
-        dispatch notification for the agent.
-        """
-        admin_context = context if context.is_admin else context.elevated()
-        if not version:
-            version = self.target.version
-        l3_agents = self.driver.l3_plugin.get_l3_agents_hosting_routers(
-            admin_context, [router_id],
-            admin_state_up=True,
-            active=True)
-        for l3_agent in l3_agents:
-            LOG.debug('Notify agent at %(topic)s.%(host)s the message '
-                      '%(method)s %(args)s',
-                      {'topic': self.topic,
-                       'host': l3_agent.host,
-                       'method': method,
-                       'args': kwargs})
-            cctxt = self.client.prepare(server=l3_agent.host, version=version)
-            cctxt.cast(context, method, **kwargs)
-
-    def vpnservice_updated(self, context, router_id, **kwargs):
-        """Send update event of vpnservices."""
-        self._agent_notification(context, 'vpnservice_updated', router_id,
-                                 **kwargs)
diff --git a/neutron/services/vpn/service_drivers/cisco_ipsec.py b/neutron/services/vpn/service_drivers/cisco_ipsec.py
deleted file mode 100644 (file)
index bab3793..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo import messaging
-
-from neutron.common import rpc as n_rpc
-from neutron.db.vpn import vpn_db
-from neutron.openstack.common import log as logging
-from neutron.plugins.cisco.l3.plugging_drivers import (
-    n1kv_plugging_constants as n1kv_constants)
-from neutron.services.vpn.common import topics
-from neutron.services.vpn import service_drivers
-from neutron.services.vpn.service_drivers import cisco_csr_db as csr_id_map
-from neutron.services.vpn.service_drivers import cisco_validator
-
-LOG = logging.getLogger(__name__)
-
-IPSEC = 'ipsec'
-BASE_IPSEC_VERSION = '1.0'
-LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
-                   'IPSec Policy': {'min': 120, 'max': 2592000}}
-MIN_CSR_MTU = 1500
-MAX_CSR_MTU = 9192
-VRF_SUFFIX_LEN = 6
-
-
-class CiscoCsrIPsecVpnDriverCallBack(object):
-
-    """Handler for agent to plugin RPC messaging."""
-
-    # history
-    #   1.0 Initial version
-
-    target = messaging.Target(version=BASE_IPSEC_VERSION)
-
-    def __init__(self, driver):
-        super(CiscoCsrIPsecVpnDriverCallBack, self).__init__()
-        self.driver = driver
-
-    def create_rpc_dispatcher(self):
-        return n_rpc.PluginRpcDispatcher([self])
-
-    def get_vpn_services_using(self, context, router_id):
-        query = context.session.query(vpn_db.VPNService)
-        query = query.join(vpn_db.IPsecSiteConnection)
-        query = query.join(vpn_db.IKEPolicy)
-        query = query.join(vpn_db.IPsecPolicy)
-        query = query.join(vpn_db.IPsecPeerCidr)
-        query = query.filter(vpn_db.VPNService.router_id == router_id)
-        return query.all()
-
-    def get_vpn_services_on_host(self, context, host=None):
-        """Returns info on the VPN services on the host."""
-        routers = self.driver.l3_plugin.get_active_routers_for_host(context,
-                                                                    host)
-        host_vpn_services = []
-        for router in routers:
-            vpn_services = self.get_vpn_services_using(context, router['id'])
-            for vpn_service in vpn_services:
-                host_vpn_services.append(
-                    self.driver._make_vpnservice_dict(context, vpn_service,
-                                                      router))
-        return host_vpn_services
-
-    def update_status(self, context, status):
-        """Update status of all vpnservices."""
-        plugin = self.driver.service_plugin
-        plugin.update_status_by_agent(context, status)
-
-
-class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi):
-
-    """API and handler for Cisco IPSec plugin to agent RPC messaging."""
-
-    target = messaging.Target(version=BASE_IPSEC_VERSION)
-
-    def __init__(self, topic, default_version, driver):
-        super(CiscoCsrIPsecVpnAgentApi, self).__init__(
-            topic, default_version, driver)
-
-    def _agent_notification(self, context, method, router_id,
-                            version=None, **kwargs):
-        """Notify update for the agent.
-
-        Find the host for the router being notified and then
-        dispatches a notification for the VPN device driver.
-        """
-        admin_context = context if context.is_admin else context.elevated()
-        if not version:
-            version = self.target.version
-        host = self.driver.l3_plugin.get_host_for_router(admin_context,
-                                                         router_id)
-        LOG.debug('Notify agent at %(topic)s.%(host)s the message '
-                  '%(method)s %(args)s for router %(router)s',
-                  {'topic': self.topic,
-                   'host': host,
-                   'method': method,
-                   'args': kwargs,
-                   'router': router_id})
-        cctxt = self.client.prepare(server=host, version=version)
-        cctxt.cast(context, method, **kwargs)
-
-
-class CiscoCsrIPsecVPNDriver(service_drivers.VpnDriver):
-
-    """Cisco CSR VPN Service Driver class for IPsec."""
-
-    def __init__(self, service_plugin):
-        super(CiscoCsrIPsecVPNDriver, self).__init__(
-            service_plugin,
-            cisco_validator.CiscoCsrVpnValidator(service_plugin))
-        self.endpoints = [CiscoCsrIPsecVpnDriverCallBack(self)]
-        self.conn = n_rpc.create_connection(new=True)
-        self.conn.create_consumer(
-            topics.CISCO_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
-        self.conn.consume_in_threads()
-        self.agent_rpc = CiscoCsrIPsecVpnAgentApi(
-            topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION, self)
-
-    @property
-    def service_type(self):
-        return IPSEC
-
-    def create_ipsec_site_connection(self, context, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        csr_id_map.create_tunnel_mapping(context, ipsec_site_connection)
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
-                                          reason='ipsec-conn-create')
-
-    def update_ipsec_site_connection(
-        self, context, old_ipsec_site_connection, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        self.agent_rpc.vpnservice_updated(
-            context, vpnservice['router_id'],
-            reason='ipsec-conn-update')
-
-    def delete_ipsec_site_connection(self, context, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
-                                          reason='ipsec-conn-delete')
-
-    def create_ikepolicy(self, context, ikepolicy):
-        pass
-
-    def delete_ikepolicy(self, context, ikepolicy):
-        pass
-
-    def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
-        pass
-
-    def create_ipsecpolicy(self, context, ipsecpolicy):
-        pass
-
-    def delete_ipsecpolicy(self, context, ipsecpolicy):
-        pass
-
-    def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
-        pass
-
-    def create_vpnservice(self, context, vpnservice):
-        pass
-
-    def update_vpnservice(self, context, old_vpnservice, vpnservice):
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
-                                          reason='vpn-service-update')
-
-    def delete_vpnservice(self, context, vpnservice):
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
-                                          reason='vpn-service-delete')
-
-    def get_cisco_connection_mappings(self, conn_id, context):
-        """Obtain persisted mappings for IDs related to connection."""
-        tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for(
-            conn_id, context.session)
-        return {'site_conn_id': u'Tunnel%d' % tunnel_id,
-                'ike_policy_id': u'%d' % ike_id,
-                'ipsec_policy_id': u'%s' % ipsec_id}
-
-    def _create_interface(self, interface_info):
-        hosting_info = interface_info['hosting_info']
-        vlan = hosting_info['segmentation_id']
-        # Port name "currently" is t{1,2}_p:1, as only one router per CSR,
-        # but will keep a semi-generic algorithm
-        port_name = hosting_info['hosting_port_name']
-        name, sep, num = port_name.partition(':')
-        offset = 1 if name in n1kv_constants.T2_PORT_NAME else 0
-        if_num = int(num) * 2 + offset
-        return 'GigabitEthernet%d.%d' % (if_num, vlan)
-
-    def _get_router_info(self, router_info):
-        hosting_device = router_info['hosting_device']
-        return {'rest_mgmt_ip': hosting_device['management_ip_address'],
-                'username': hosting_device['credentials']['username'],
-                'password': hosting_device['credentials']['password'],
-                'inner_if_name': self._create_interface(
-                    router_info['_interfaces'][0]),
-                'outer_if_name': self._create_interface(
-                    router_info['gw_port']),
-                'vrf': 'nrouter-' + router_info['id'][:VRF_SUFFIX_LEN],
-                'timeout': 30}  # Hard-coded for now
-
-    def _make_vpnservice_dict(self, context, vpnservice, router_info):
-        """Collect all service info, including Cisco info for IPSec conn."""
-        vpnservice_dict = dict(vpnservice)
-        vpnservice_dict['ipsec_conns'] = []
-        vpnservice_dict['subnet'] = dict(vpnservice.subnet)
-        vpnservice_dict['router_info'] = self._get_router_info(router_info)
-        for ipsec_conn in vpnservice.ipsec_site_connections:
-            ipsec_conn_dict = dict(ipsec_conn)
-            ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
-            ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
-            ipsec_conn_dict['peer_cidrs'] = [
-                peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
-            ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(
-                ipsec_conn['id'], context)
-            vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
-        return vpnservice_dict
diff --git a/neutron/services/vpn/service_drivers/cisco_validator.py b/neutron/services/vpn/service_drivers/cisco_validator.py
deleted file mode 100644 (file)
index e5e2198..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import netaddr
-from netaddr import core as net_exc
-
-from neutron.common import exceptions
-from neutron.db.vpn import vpn_validator
-from neutron.openstack.common import log as logging
-
-
-LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
-                   'IPSec Policy': {'min': 120, 'max': 2592000}}
-MIN_CSR_MTU = 1500
-MAX_CSR_MTU = 9192
-
-LOG = logging.getLogger(__name__)
-
-
-class CsrValidationFailure(exceptions.BadRequest):
-    message = _("Cisco CSR does not support %(resource)s attribute %(key)s "
-                "with value '%(value)s'")
-
-
-class CiscoCsrVpnValidator(vpn_validator.VpnReferenceValidator):
-
-    """Validator methods for the Cisco CSR."""
-
-    def __init__(self, service_plugin):
-        self.service_plugin = service_plugin
-        super(CiscoCsrVpnValidator, self).__init__()
-
-    def validate_lifetime(self, for_policy, policy_info):
-        """Ensure lifetime in secs and value is supported, based on policy."""
-        units = policy_info['lifetime']['units']
-        if units != 'seconds':
-            raise CsrValidationFailure(resource=for_policy,
-                                       key='lifetime:units',
-                                       value=units)
-        value = policy_info['lifetime']['value']
-        if (value < LIFETIME_LIMITS[for_policy]['min'] or
-            value > LIFETIME_LIMITS[for_policy]['max']):
-            raise CsrValidationFailure(resource=for_policy,
-                                       key='lifetime:value',
-                                       value=value)
-
-    def validate_ike_version(self, policy_info):
-        """Ensure IKE policy is v1 for current REST API."""
-        version = policy_info['ike_version']
-        if version != 'v1':
-            raise CsrValidationFailure(resource='IKE Policy',
-                                       key='ike_version',
-                                       value=version)
-
-    def validate_mtu(self, conn_info):
-        """Ensure the MTU value is supported."""
-        mtu = conn_info['mtu']
-        if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU:
-            raise CsrValidationFailure(resource='IPSec Connection',
-                                       key='mtu',
-                                       value=mtu)
-
-    def validate_public_ip_present(self, router):
-        """Ensure there is one gateway IP specified for the router used."""
-        gw_port = router.gw_port
-        if not gw_port or len(gw_port.fixed_ips) != 1:
-            raise CsrValidationFailure(resource='IPSec Connection',
-                                       key='router:gw_port:ip_address',
-                                       value='missing')
-
-    def validate_peer_id(self, ipsec_conn):
-        """Ensure that an IP address is specified for peer ID."""
-        # TODO(pcm) Should we check peer_address too?
-        peer_id = ipsec_conn['peer_id']
-        try:
-            netaddr.IPAddress(peer_id)
-        except net_exc.AddrFormatError:
-            raise CsrValidationFailure(resource='IPSec Connection',
-                                       key='peer_id', value=peer_id)
-
-    def validate_ipsec_site_connection(self, context, ipsec_sitecon,
-                                       ip_version):
-        """Validate IPSec site connection for Cisco CSR.
-
-        After doing reference validation, do additional checks that relate
-        to the Cisco CSR.
-        """
-        super(CiscoCsrVpnValidator, self)._check_dpd(ipsec_sitecon)
-
-        ike_policy = self.service_plugin.get_ikepolicy(
-            context, ipsec_sitecon['ikepolicy_id'])
-        ipsec_policy = self.service_plugin.get_ipsecpolicy(
-            context, ipsec_sitecon['ipsecpolicy_id'])
-        vpn_service = self.service_plugin.get_vpnservice(
-            context, ipsec_sitecon['vpnservice_id'])
-        router = self.l3_plugin._get_router(context, vpn_service['router_id'])
-        self.validate_lifetime('IKE Policy', ike_policy)
-        self.validate_lifetime('IPSec Policy', ipsec_policy)
-        self.validate_ike_version(ike_policy)
-        self.validate_mtu(ipsec_sitecon)
-        self.validate_public_ip_present(router)
-        self.validate_peer_id(ipsec_sitecon)
-        LOG.debug("IPSec connection validated for Cisco CSR")
diff --git a/neutron/services/vpn/service_drivers/ipsec.py b/neutron/services/vpn/service_drivers/ipsec.py
deleted file mode 100644 (file)
index ebda1c9..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import netaddr
-
-from oslo import messaging
-
-from neutron.common import rpc as n_rpc
-from neutron.openstack.common import log as logging
-from neutron.services.vpn.common import topics
-from neutron.services.vpn import service_drivers
-
-
-LOG = logging.getLogger(__name__)
-
-IPSEC = 'ipsec'
-BASE_IPSEC_VERSION = '1.0'
-
-
-class IPsecVpnDriverCallBack(object):
-    """Callback for IPSecVpnDriver rpc."""
-
-    # history
-    #   1.0 Initial version
-
-    target = messaging.Target(version=BASE_IPSEC_VERSION)
-
-    def __init__(self, driver):
-        super(IPsecVpnDriverCallBack, self).__init__()
-        self.driver = driver
-
-    def get_vpn_services_on_host(self, context, host=None):
-        """Returns the vpnservices on the host."""
-        plugin = self.driver.service_plugin
-        vpnservices = plugin._get_agent_hosting_vpn_services(
-            context, host)
-        return [self.driver._make_vpnservice_dict(vpnservice)
-                for vpnservice in vpnservices]
-
-    def update_status(self, context, status):
-        """Update status of vpnservices."""
-        plugin = self.driver.service_plugin
-        plugin.update_status_by_agent(context, status)
-
-
-class IPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi):
-    """Agent RPC API for IPsecVPNAgent."""
-
-    target = messaging.Target(version=BASE_IPSEC_VERSION)
-
-    def __init__(self, topic, default_version, driver):
-        super(IPsecVpnAgentApi, self).__init__(
-            topic, default_version, driver)
-
-
-class IPsecVPNDriver(service_drivers.VpnDriver):
-    """VPN Service Driver class for IPsec."""
-
-    def __init__(self, service_plugin):
-        super(IPsecVPNDriver, self).__init__(service_plugin)
-        self.endpoints = [IPsecVpnDriverCallBack(self)]
-        self.conn = n_rpc.create_connection(new=True)
-        self.conn.create_consumer(
-            topics.IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
-        self.conn.consume_in_threads()
-        self.agent_rpc = IPsecVpnAgentApi(
-            topics.IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION, self)
-
-    @property
-    def service_type(self):
-        return IPSEC
-
-    def create_ipsec_site_connection(self, context, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
-
-    def update_ipsec_site_connection(
-        self, context, old_ipsec_site_connection, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
-
-    def delete_ipsec_site_connection(self, context, ipsec_site_connection):
-        vpnservice = self.service_plugin._get_vpnservice(
-            context, ipsec_site_connection['vpnservice_id'])
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
-
-    def create_ikepolicy(self, context, ikepolicy):
-        pass
-
-    def delete_ikepolicy(self, context, ikepolicy):
-        pass
-
-    def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
-        pass
-
-    def create_ipsecpolicy(self, context, ipsecpolicy):
-        pass
-
-    def delete_ipsecpolicy(self, context, ipsecpolicy):
-        pass
-
-    def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
-        pass
-
-    def create_vpnservice(self, context, vpnservice):
-        pass
-
-    def update_vpnservice(self, context, old_vpnservice, vpnservice):
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
-
-    def delete_vpnservice(self, context, vpnservice):
-        self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
-
-    def _make_vpnservice_dict(self, vpnservice):
-        """Convert vpnservice information for vpn agent.
-
-        also converting parameter name for vpn agent driver
-        """
-        vpnservice_dict = dict(vpnservice)
-        vpnservice_dict['ipsec_site_connections'] = []
-        vpnservice_dict['subnet'] = dict(
-            vpnservice.subnet)
-        vpnservice_dict['external_ip'] = vpnservice.router.gw_port[
-            'fixed_ips'][0]['ip_address']
-        for ipsec_site_connection in vpnservice.ipsec_site_connections:
-            ipsec_site_connection_dict = dict(ipsec_site_connection)
-            try:
-                netaddr.IPAddress(ipsec_site_connection['peer_id'])
-            except netaddr.core.AddrFormatError:
-                ipsec_site_connection['peer_id'] = (
-                    '@' + ipsec_site_connection['peer_id'])
-            ipsec_site_connection_dict['ikepolicy'] = dict(
-                ipsec_site_connection.ikepolicy)
-            ipsec_site_connection_dict['ipsecpolicy'] = dict(
-                ipsec_site_connection.ipsecpolicy)
-            vpnservice_dict['ipsec_site_connections'].append(
-                ipsec_site_connection_dict)
-            peer_cidrs = [
-                peer_cidr.cidr
-                for peer_cidr in ipsec_site_connection.peer_cidrs]
-            ipsec_site_connection_dict['peer_cidrs'] = peer_cidrs
-        return vpnservice_dict
diff --git a/neutron/tests/unit/db/firewall/__init__.py b/neutron/tests/unit/db/firewall/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/firewall/test_db_firewall.py b/neutron/tests/unit/db/firewall/test_db_firewall.py
deleted file mode 100644 (file)
index fbb37ad..0000000
+++ /dev/null
@@ -1,1239 +0,0 @@
-# Copyright 2013 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License"); you may
-#  not use this file except in compliance with the License. You may obtain
-#  a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#  License for the specific language governing permissions and limitations
-#  under the License.
-
-from oslo.config import cfg
-
-import contextlib
-
-import mock
-from oslo.utils import importutils
-import webob.exc
-
-from neutron.api import extensions as api_ext
-from neutron.common import config
-from neutron import context
-from neutron.db.firewall import firewall_db as fdb
-import neutron.extensions
-from neutron.extensions import firewall
-from neutron import manager
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.firewall import fwaas_plugin
-from neutron.tests.unit import test_db_plugin
-
-
-DB_FW_PLUGIN_KLASS = (
-    "neutron.db.firewall.firewall_db.Firewall_db_mixin"
-)
-FWAAS_PLUGIN = 'neutron.services.firewall.fwaas_plugin'
-DELETEFW_PATH = FWAAS_PLUGIN + '.FirewallAgentApi.delete_firewall'
-extensions_path = ':'.join(neutron.extensions.__path__)
-DESCRIPTION = 'default description'
-SHARED = True
-PROTOCOL = 'tcp'
-IP_VERSION = 4
-SOURCE_IP_ADDRESS_RAW = '1.1.1.1'
-DESTINATION_IP_ADDRESS_RAW = '2.2.2.2'
-SOURCE_PORT = '55000:56000'
-DESTINATION_PORT = '56000:57000'
-ACTION = 'allow'
-AUDITED = True
-ENABLED = True
-ADMIN_STATE_UP = True
-
-
-class FakeAgentApi(fwaas_plugin.FirewallCallbacks):
-    """
-    This class used to mock the AgentAPI delete method inherits from
-    FirewallCallbacks because it needs access to the firewall_deleted method.
-    The delete_firewall method belongs to the FirewallAgentApi, which has
-    no access to the firewall_deleted method normally because it's not
-    responsible for deleting the firewall from the DB. However, it needs
-    to in the unit tests since there is no agent to call back.
-    """
-    def __init__(self):
-        pass
-
-    def delete_firewall(self, context, firewall, **kwargs):
-        self.plugin = manager.NeutronManager.get_service_plugins()['FIREWALL']
-        self.firewall_deleted(context, firewall['id'], **kwargs)
-
-
-class FirewallPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
-    resource_prefix_map = dict(
-        (k, constants.COMMON_PREFIXES[constants.FIREWALL])
-        for k in firewall.RESOURCE_ATTRIBUTE_MAP.keys()
-    )
-
-    def setUp(self, core_plugin=None, fw_plugin=None, ext_mgr=None):
-        self.agentapi_delf_p = mock.patch(DELETEFW_PATH, create=True,
-                                          new=FakeAgentApi().delete_firewall)
-        self.agentapi_delf_p.start()
-        if not fw_plugin:
-            fw_plugin = DB_FW_PLUGIN_KLASS
-        service_plugins = {'fw_plugin_name': fw_plugin}
-
-        fdb.Firewall_db_mixin.supported_extension_aliases = ["fwaas"]
-        super(FirewallPluginDbTestCase, self).setUp(
-            ext_mgr=ext_mgr,
-            service_plugins=service_plugins
-        )
-
-        if not ext_mgr:
-            self.plugin = importutils.import_object(fw_plugin)
-            ext_mgr = api_ext.PluginAwareExtensionManager(
-                extensions_path,
-                {constants.FIREWALL: self.plugin}
-            )
-            app = config.load_paste_app('extensions_test_app')
-            self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
-    def _test_list_resources(self, resource, items,
-                             neutron_context=None,
-                             query_params=None):
-        if resource.endswith('y'):
-            resource_plural = resource.replace('y', 'ies')
-        else:
-            resource_plural = resource + 's'
-
-        res = self._list(resource_plural,
-                         neutron_context=neutron_context,
-                         query_params=query_params)
-        resource = resource.replace('-', '_')
-        self.assertEqual(sorted([i['id'] for i in res[resource_plural]]),
-                         sorted([i[resource]['id'] for i in items]))
-
-    def _get_test_firewall_rule_attrs(self, name='firewall_rule1'):
-        attrs = {'name': name,
-                 'tenant_id': self._tenant_id,
-                 'shared': SHARED,
-                 'protocol': PROTOCOL,
-                 'ip_version': IP_VERSION,
-                 'source_ip_address': SOURCE_IP_ADDRESS_RAW,
-                 'destination_ip_address': DESTINATION_IP_ADDRESS_RAW,
-                 'source_port': SOURCE_PORT,
-                 'destination_port': DESTINATION_PORT,
-                 'action': ACTION,
-                 'enabled': ENABLED}
-        return attrs
-
-    def _get_test_firewall_policy_attrs(self, name='firewall_policy1',
-                                        audited=AUDITED):
-        attrs = {'name': name,
-                 'description': DESCRIPTION,
-                 'tenant_id': self._tenant_id,
-                 'shared': SHARED,
-                 'firewall_rules': [],
-                 'audited': audited}
-        return attrs
-
-    def _get_test_firewall_attrs(
-        self, name='firewall_1', status='PENDING_CREATE'):
-        attrs = {'name': name,
-                 'tenant_id': self._tenant_id,
-                 'admin_state_up': ADMIN_STATE_UP,
-                 'status': status}
-
-        return attrs
-
-    def _create_firewall_policy(self, fmt, name, description, shared,
-                                firewall_rules, audited,
-                                expected_res_status=None, **kwargs):
-        tenant_id = kwargs.get('tenant_id', self._tenant_id)
-        data = {'firewall_policy': {'name': name,
-                                    'description': description,
-                                    'tenant_id': tenant_id,
-                                    'shared': shared,
-                                    'firewall_rules': firewall_rules,
-                                    'audited': audited}}
-
-        fw_policy_req = self.new_create_request('firewall_policies', data, fmt)
-        fw_policy_res = fw_policy_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(fw_policy_res.status_int, expected_res_status)
-
-        return fw_policy_res
-
-    def _replace_firewall_status(self, attrs, old_status, new_status):
-        if attrs['status'] is old_status:
-            attrs['status'] = new_status
-        return attrs
-
-    @contextlib.contextmanager
-    def firewall_policy(self, fmt=None, name='firewall_policy1',
-                        description=DESCRIPTION, shared=True,
-                        firewall_rules=None, audited=True,
-                        do_delete=True, **kwargs):
-        if firewall_rules is None:
-            firewall_rules = []
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_firewall_policy(fmt, name, description, shared,
-                                           firewall_rules, audited,
-                                           **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        firewall_policy = self.deserialize(fmt or self.fmt, res)
-        yield firewall_policy
-        if do_delete:
-            self._delete('firewall_policies',
-                         firewall_policy['firewall_policy']['id'])
-
-    def _create_firewall_rule(self, fmt, name, shared, protocol,
-                              ip_version, source_ip_address,
-                              destination_ip_address, source_port,
-                              destination_port, action, enabled,
-                              expected_res_status=None, **kwargs):
-        tenant_id = kwargs.get('tenant_id', self._tenant_id)
-        data = {'firewall_rule': {'name': name,
-                                  'tenant_id': tenant_id,
-                                  'shared': shared,
-                                  'protocol': protocol,
-                                  'ip_version': ip_version,
-                                  'source_ip_address': source_ip_address,
-                                  'destination_ip_address':
-                                  destination_ip_address,
-                                  'source_port': source_port,
-                                  'destination_port': destination_port,
-                                  'action': action,
-                                  'enabled': enabled}}
-
-        fw_rule_req = self.new_create_request('firewall_rules', data, fmt)
-        fw_rule_res = fw_rule_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(fw_rule_res.status_int, expected_res_status)
-
-        return fw_rule_res
-
-    @contextlib.contextmanager
-    def firewall_rule(self, fmt=None, name='firewall_rule1',
-                      shared=SHARED, protocol=PROTOCOL, ip_version=IP_VERSION,
-                      source_ip_address=SOURCE_IP_ADDRESS_RAW,
-                      destination_ip_address=DESTINATION_IP_ADDRESS_RAW,
-                      source_port=SOURCE_PORT,
-                      destination_port=DESTINATION_PORT,
-                      action=ACTION, enabled=ENABLED,
-                      do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_firewall_rule(fmt, name, shared, protocol,
-                                         ip_version, source_ip_address,
-                                         destination_ip_address,
-                                         source_port, destination_port,
-                                         action, enabled, **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        firewall_rule = self.deserialize(fmt or self.fmt, res)
-        yield firewall_rule
-        if do_delete:
-            self._delete('firewall_rules',
-                         firewall_rule['firewall_rule']['id'])
-
-    def _create_firewall(self, fmt, name, description, firewall_policy_id,
-                         admin_state_up=True, expected_res_status=None,
-                         **kwargs):
-        tenant_id = kwargs.get('tenant_id', self._tenant_id)
-        data = {'firewall': {'name': name,
-                             'description': description,
-                             'firewall_policy_id': firewall_policy_id,
-                             'admin_state_up': admin_state_up,
-                             'tenant_id': tenant_id}}
-
-        firewall_req = self.new_create_request('firewalls', data, fmt)
-        firewall_res = firewall_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(firewall_res.status_int, expected_res_status)
-
-        return firewall_res
-
-    @contextlib.contextmanager
-    def firewall(self, fmt=None, name='firewall_1', description=DESCRIPTION,
-                 firewall_policy_id=None, admin_state_up=True,
-                 do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_firewall(fmt, name, description, firewall_policy_id,
-                                    admin_state_up, **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        firewall = self.deserialize(fmt or self.fmt, res)
-        yield firewall
-        if do_delete:
-            self._delete('firewalls', firewall['firewall']['id'])
-
-    def _rule_action(self, action, id, firewall_rule_id, insert_before=None,
-                     insert_after=None, expected_code=webob.exc.HTTPOk.code,
-                     expected_body=None, body_data=None):
-        # We intentionally do this check for None since we want to distinguish
-        # from empty dictionary
-        if body_data is None:
-            if action == 'insert':
-                body_data = {'firewall_rule_id': firewall_rule_id,
-                             'insert_before': insert_before,
-                             'insert_after': insert_after}
-            else:
-                body_data = {'firewall_rule_id': firewall_rule_id}
-
-        req = self.new_action_request('firewall_policies',
-                                      body_data, id,
-                                      "%s_rule" % action)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_code)
-        response = self.deserialize(self.fmt, res)
-        if expected_body:
-            self.assertEqual(response, expected_body)
-        return response
-
-    def _compare_firewall_rule_lists(self, firewall_policy_id,
-                                     list1, list2):
-        position = 0
-        for r1, r2 in zip(list1, list2):
-            rule = r1['firewall_rule']
-            rule['firewall_policy_id'] = firewall_policy_id
-            position += 1
-            rule['position'] = position
-            for k in rule:
-                self.assertEqual(rule[k], r2[k])
-
-
-class TestFirewallDBPlugin(FirewallPluginDbTestCase):
-
-    def test_create_firewall_policy(self):
-        name = "firewall_policy1"
-        attrs = self._get_test_firewall_policy_attrs(name)
-
-        with self.firewall_policy(name=name, shared=SHARED,
-                                  firewall_rules=None,
-                                  audited=AUDITED) as firewall_policy:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_policy['firewall_policy'][k], v)
-
-    def test_create_firewall_policy_with_rules(self):
-        name = "firewall_policy1"
-        attrs = self._get_test_firewall_policy_attrs(name)
-
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as fr:
-            fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
-            attrs['firewall_rules'] = fw_rule_ids
-            with self.firewall_policy(name=name, shared=SHARED,
-                                      firewall_rules=fw_rule_ids,
-                                      audited=AUDITED) as fwp:
-                for k, v in attrs.iteritems():
-                    self.assertEqual(fwp['firewall_policy'][k], v)
-
-    def test_create_admin_firewall_policy_with_other_tenant_rules(self):
-        with self.firewall_rule(shared=False) as fr:
-            fw_rule_ids = [fr['firewall_rule']['id']]
-            res = self._create_firewall_policy(None, 'firewall_policy1',
-                                               description=DESCRIPTION,
-                                               shared=SHARED,
-                                               firewall_rules=fw_rule_ids,
-                                               audited=AUDITED,
-                                               tenant_id='admin-tenant')
-            self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_create_firewall_policy_with_previously_associated_rule(self):
-        with self.firewall_rule() as fwr:
-            fw_rule_ids = [fwr['firewall_rule']['id']]
-            with self.firewall_policy(firewall_rules=fw_rule_ids):
-                res = self._create_firewall_policy(
-                    None, 'firewall_policy2', description=DESCRIPTION,
-                    shared=SHARED, firewall_rules=fw_rule_ids,
-                    audited=AUDITED)
-                self.assertEqual(res.status_int, 409)
-
-    def test_create_shared_firewall_policy_with_unshared_rule(self):
-        with self.firewall_rule(shared=False) as fwr:
-            fw_rule_ids = [fwr['firewall_rule']['id']]
-            res = self._create_firewall_policy(
-                None, 'firewall_policy1', description=DESCRIPTION, shared=True,
-                firewall_rules=fw_rule_ids, audited=AUDITED)
-            self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_show_firewall_policy(self):
-        name = "firewall_policy1"
-        attrs = self._get_test_firewall_policy_attrs(name)
-
-        with self.firewall_policy(name=name, shared=SHARED,
-                                  firewall_rules=None,
-                                  audited=AUDITED) as fwp:
-            req = self.new_show_request('firewall_policies',
-                                        fwp['firewall_policy']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_list_firewall_policies(self):
-        with contextlib.nested(self.firewall_policy(name='fwp1',
-                                                    description='fwp'),
-                               self.firewall_policy(name='fwp2',
-                                                    description='fwp'),
-                               self.firewall_policy(name='fwp3',
-                                                    description='fwp')
-                               ) as fw_policies:
-            self._test_list_resources('firewall_policy',
-                                      fw_policies,
-                                      query_params='description=fwp')
-
-    def test_update_firewall_policy(self):
-        name = "new_firewall_policy1"
-        attrs = self._get_test_firewall_policy_attrs(name, audited=False)
-
-        with self.firewall_policy(shared=SHARED,
-                                  firewall_rules=None,
-                                  audited=AUDITED) as fwp:
-            data = {'firewall_policy': {'name': name}}
-            req = self.new_update_request('firewall_policies', data,
-                                          fwp['firewall_policy']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_update_firewall_policy_set_audited_false(self):
-        attrs = self._get_test_firewall_policy_attrs(audited=False)
-
-        with self.firewall_policy(name='firewall_policy1',
-                                  description='fwp',
-                                  audited=AUDITED) as fwp:
-            data = {'firewall_policy':
-                    {'description': 'fw_p1'}}
-            req = self.new_update_request('firewall_policies', data,
-                                          fwp['firewall_policy']['id'])
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            attrs['description'] = 'fw_p1'
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_update_firewall_policy_with_rules(self):
-        attrs = self._get_test_firewall_policy_attrs()
-
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as fr:
-            with self.firewall_policy() as fwp:
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
-                attrs['firewall_rules'] = fw_rule_ids
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                attrs['audited'] = False
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_update_firewall_policy_replace_rules(self):
-        attrs = self._get_test_firewall_policy_attrs()
-
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3'),
-                               self.firewall_rule(name='fwr4')) as frs:
-            fr1 = frs[0:2]
-            fr2 = frs[2:4]
-            with self.firewall_policy() as fwp:
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr2]
-                attrs['firewall_rules'] = fw_rule_ids
-                new_data = {'firewall_policy':
-                            {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', new_data,
-                                              fwp['firewall_policy']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                attrs['audited'] = False
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_update_firewall_policy_reorder_rules(self):
-        attrs = self._get_test_firewall_policy_attrs()
-
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3'),
-                               self.firewall_rule(name='fwr4')) as fr:
-            with self.firewall_policy() as fwp:
-                fw_rule_ids = [fr[2]['firewall_rule']['id'],
-                               fr[3]['firewall_rule']['id']]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-                # shuffle the rules, add more rules
-                fw_rule_ids = [fr[1]['firewall_rule']['id'],
-                               fr[3]['firewall_rule']['id'],
-                               fr[2]['firewall_rule']['id'],
-                               fr[0]['firewall_rule']['id']]
-                attrs['firewall_rules'] = fw_rule_ids
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                rules = []
-                for rule_id in fw_rule_ids:
-                    req = self.new_show_request('firewall_rules',
-                                                rule_id,
-                                                fmt=self.fmt)
-                    res = self.deserialize(self.fmt,
-                                           req.get_response(self.ext_api))
-                    rules.append(res['firewall_rule'])
-                self.assertEqual(rules[0]['position'], 1)
-                self.assertEqual(rules[0]['id'], fr[1]['firewall_rule']['id'])
-                self.assertEqual(rules[1]['position'], 2)
-                self.assertEqual(rules[1]['id'], fr[3]['firewall_rule']['id'])
-                self.assertEqual(rules[2]['position'], 3)
-                self.assertEqual(rules[2]['id'], fr[2]['firewall_rule']['id'])
-                self.assertEqual(rules[3]['position'], 4)
-                self.assertEqual(rules[3]['id'], fr[0]['firewall_rule']['id'])
-
-    def test_update_firewall_policy_with_non_existing_rule(self):
-        attrs = self._get_test_firewall_policy_attrs()
-
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2')) as fr:
-            with self.firewall_policy() as fwp:
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
-                # appending non-existent rule
-                fw_rule_ids.append(uuidutils.generate_uuid())
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = req.get_response(self.ext_api)
-                #check that the firewall_rule was not found
-                self.assertEqual(res.status_int, 404)
-                #check if none of the rules got added to the policy
-                req = self.new_show_request('firewall_policies',
-                                            fwp['firewall_policy']['id'],
-                                            fmt=self.fmt)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall_policy'][k], v)
-
-    def test_update_shared_firewall_policy_with_unshared_rule(self):
-        with self.firewall_rule(name='fwr1', shared=False) as fr:
-            with self.firewall_policy() as fwp:
-                fw_rule_ids = [fr['firewall_rule']['id']]
-                # update shared policy with unshared rule
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_update_firewall_policy_with_shared_attr_unshared_rule(self):
-        with self.firewall_rule(name='fwr1', shared=False) as fr:
-            with self.firewall_policy(shared=False) as fwp:
-                fw_rule_ids = [fr['firewall_rule']['id']]
-                # update shared policy with shared attr and unshared rule
-                data = {'firewall_policy': {'shared': True,
-                                            'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_update_firewall_policy_with_shared_attr_exist_unshare_rule(self):
-        with self.firewall_rule(name='fwr1', shared=False) as fr:
-            fw_rule_ids = [fr['firewall_rule']['id']]
-            with self.firewall_policy(shared=False,
-                                      firewall_rules=fw_rule_ids) as fwp:
-                # update policy with shared attr
-                data = {'firewall_policy': {'shared': True}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_update_firewall_policy_assoc_with_other_tenant_firewall(self):
-        with self.firewall_policy(shared=True, tenant_id='tenant1') as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(firewall_policy_id=fwp_id):
-                data = {'firewall_policy': {'shared': False}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_delete_firewall_policy(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy(do_delete=False) as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            req = self.new_delete_request('firewall_policies', fwp_id)
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-            self.assertRaises(firewall.FirewallPolicyNotFound,
-                              self.plugin.get_firewall_policy,
-                              ctx, fwp_id)
-
-    def test_delete_firewall_policy_with_rule(self):
-        ctx = context.get_admin_context()
-        attrs = self._get_test_firewall_policy_attrs()
-        with self.firewall_policy(do_delete=False) as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall_rule(name='fwr1') as fr:
-                fr_id = fr['firewall_rule']['id']
-                fw_rule_ids = [fr_id]
-                attrs['firewall_rules'] = fw_rule_ids
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-                fw_rule = self.plugin.get_firewall_rule(ctx, fr_id)
-                self.assertEqual(fw_rule['firewall_policy_id'], fwp_id)
-                req = self.new_delete_request('firewall_policies', fwp_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, 204)
-                self.assertRaises(firewall.FirewallPolicyNotFound,
-                                  self.plugin.get_firewall_policy,
-                                  ctx, fwp_id)
-                fw_rule = self.plugin.get_firewall_rule(ctx, fr_id)
-                self.assertIsNone(fw_rule['firewall_policy_id'])
-
-    def test_delete_firewall_policy_with_firewall_association(self):
-        attrs = self._get_test_firewall_attrs()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                    firewall_policy_id=fwp_id,
-                    admin_state_up=ADMIN_STATE_UP):
-                req = self.new_delete_request('firewall_policies', fwp_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, 409)
-
-    def test_create_firewall_rule(self):
-        attrs = self._get_test_firewall_rule_attrs()
-
-        with self.firewall_rule() as firewall_rule:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_rule['firewall_rule'][k], v)
-
-        attrs['source_port'] = None
-        attrs['destination_port'] = None
-        with self.firewall_rule(source_port=None,
-                                destination_port=None) as firewall_rule:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_rule['firewall_rule'][k], v)
-
-        attrs['source_port'] = '10000'
-        attrs['destination_port'] = '80'
-        with self.firewall_rule(source_port=10000,
-                                destination_port=80) as firewall_rule:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_rule['firewall_rule'][k], v)
-
-        attrs['source_port'] = '10000'
-        attrs['destination_port'] = '80'
-        with self.firewall_rule(source_port='10000',
-                                destination_port='80') as firewall_rule:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_rule['firewall_rule'][k], v)
-
-    def test_create_firewall_rule_icmp_with_port(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        attrs['protocol'] = 'icmp'
-        res = self._create_firewall_rule(self.fmt, **attrs)
-        self.assertEqual(400, res.status_int)
-
-    def test_create_firewall_rule_icmp_without_port(self):
-        attrs = self._get_test_firewall_rule_attrs()
-
-        attrs['protocol'] = 'icmp'
-        attrs['source_port'] = None
-        attrs['destination_port'] = None
-        with self.firewall_rule(source_port=None,
-                                destination_port=None,
-                                protocol='icmp') as firewall_rule:
-            for k, v in attrs.iteritems():
-                self.assertEqual(firewall_rule['firewall_rule'][k], v)
-
-    def test_create_firewall_rule_without_protocol_with_dport(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        attrs['protocol'] = None
-        attrs['source_port'] = None
-        res = self._create_firewall_rule(self.fmt, **attrs)
-        self.assertEqual(400, res.status_int)
-
-    def test_create_firewall_rule_without_protocol_with_sport(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        attrs['protocol'] = None
-        attrs['destination_port'] = None
-        res = self._create_firewall_rule(self.fmt, **attrs)
-        self.assertEqual(400, res.status_int)
-
-    def test_show_firewall_rule_with_fw_policy_not_associated(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        with self.firewall_rule() as fw_rule:
-            req = self.new_show_request('firewall_rules',
-                                        fw_rule['firewall_rule']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_rule'][k], v)
-
-    def test_show_firewall_rule_with_fw_policy_associated(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        with self.firewall_rule() as fw_rule:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['firewall_policy_id'] = fwp_id
-                data = {'firewall_policy':
-                        {'firewall_rules':
-                         [fw_rule['firewall_rule']['id']]}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-                req = self.new_show_request('firewall_rules',
-                                            fw_rule['firewall_rule']['id'],
-                                            fmt=self.fmt)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall_rule'][k], v)
-
-    def test_list_firewall_rules(self):
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as fr:
-            query_params = 'protocol=tcp'
-            self._test_list_resources('firewall_rule', fr,
-                                      query_params=query_params)
-
-    def test_update_firewall_rule(self):
-        name = "new_firewall_rule1"
-        attrs = self._get_test_firewall_rule_attrs(name)
-
-        attrs['source_port'] = '10:20'
-        attrs['destination_port'] = '30:40'
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'name': name,
-                                      'source_port': '10:20',
-                                      'destination_port': '30:40'}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_rule'][k], v)
-
-        attrs['source_port'] = '10000'
-        attrs['destination_port'] = '80'
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'name': name,
-                                      'source_port': 10000,
-                                      'destination_port': 80}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_rule'][k], v)
-
-        attrs['source_port'] = '10000'
-        attrs['destination_port'] = '80'
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'name': name,
-                                      'source_port': '10000',
-                                      'destination_port': '80'}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_rule'][k], v)
-
-        attrs['source_port'] = None
-        attrs['destination_port'] = None
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'name': name,
-                                      'source_port': None,
-                                      'destination_port': None}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = self.deserialize(self.fmt,
-                                   req.get_response(self.ext_api))
-            for k, v in attrs.iteritems():
-                self.assertEqual(res['firewall_rule'][k], v)
-
-    def test_update_firewall_rule_with_port_and_no_proto(self):
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'protocol': None,
-                                      'destination_port': 80}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(400, res.status_int)
-
-    def test_update_firewall_rule_without_ports_and_no_proto(self):
-        with self.firewall_rule() as fwr:
-            data = {'firewall_rule': {'protocol': None,
-                                      'destination_port': None,
-                                      'source_port': None}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(200, res.status_int)
-
-    def test_update_firewall_rule_with_port(self):
-        with self.firewall_rule(source_port=None,
-                                destination_port=None,
-                                protocol=None) as fwr:
-            data = {'firewall_rule': {'destination_port': 80}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(400, res.status_int)
-
-    def test_update_firewall_rule_with_port_and_protocol(self):
-        with self.firewall_rule(source_port=None,
-                                destination_port=None,
-                                protocol=None) as fwr:
-            data = {'firewall_rule': {'destination_port': 80,
-                                      'protocol': 'tcp'}}
-            req = self.new_update_request('firewall_rules', data,
-                                          fwr['firewall_rule']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(200, res.status_int)
-
-    def test_update_firewall_rule_with_policy_associated(self):
-        name = "new_firewall_rule1"
-        attrs = self._get_test_firewall_rule_attrs(name)
-        with self.firewall_rule() as fwr:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['firewall_policy_id'] = fwp_id
-                fwr_id = fwr['firewall_rule']['id']
-                data = {'firewall_policy': {'firewall_rules': [fwr_id]}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-                data = {'firewall_rule': {'name': name}}
-                req = self.new_update_request('firewall_rules', data,
-                                              fwr['firewall_rule']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                attrs['firewall_policy_id'] = fwp_id
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall_rule'][k], v)
-                req = self.new_show_request('firewall_policies',
-                                            fwp['firewall_policy']['id'],
-                                            fmt=self.fmt)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                self.assertEqual(res['firewall_policy']['firewall_rules'],
-                                 [fwr_id])
-                self.assertEqual(res['firewall_policy']['audited'], False)
-
-    def test_update_firewall_rule_associated_with_other_tenant_policy(self):
-        with self.firewall_rule(shared=True, tenant_id='tenant1') as fwr:
-            fwr_id = [fwr['firewall_rule']['id']]
-            with self.firewall_policy(shared=False,
-                                      firewall_rules=fwr_id):
-                data = {'firewall_rule': {'shared': False}}
-                req = self.new_update_request('firewall_rules', data,
-                                              fwr['firewall_rule']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
-
-    def test_delete_firewall_rule(self):
-        ctx = context.get_admin_context()
-        with self.firewall_rule(do_delete=False) as fwr:
-            fwr_id = fwr['firewall_rule']['id']
-            req = self.new_delete_request('firewall_rules', fwr_id)
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-            self.assertRaises(firewall.FirewallRuleNotFound,
-                              self.plugin.get_firewall_rule,
-                              ctx, fwr_id)
-
-    def test_delete_firewall_rule_with_policy_associated(self):
-        attrs = self._get_test_firewall_rule_attrs()
-        with self.firewall_rule() as fwr:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['firewall_policy_id'] = fwp_id
-                fwr_id = fwr['firewall_rule']['id']
-                data = {'firewall_policy': {'firewall_rules': [fwr_id]}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp['firewall_policy']['id'])
-                req.get_response(self.ext_api)
-                req = self.new_delete_request('firewall_rules', fwr_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, 409)
-
-    def _test_create_firewall(self, attrs):
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                name=attrs['name'],
-                firewall_policy_id=fwp_id,
-                admin_state_up=ADMIN_STATE_UP
-            ) as firewall:
-                for k, v in attrs.iteritems():
-                    self.assertEqual(firewall['firewall'][k], v)
-
-    def test_create_firewall(self):
-        attrs = self._get_test_firewall_attrs("firewall1")
-        self._test_create_firewall(attrs)
-
-    def test_create_firewall_with_dvr(self):
-        cfg.CONF.set_override('router_distributed', True)
-        attrs = self._get_test_firewall_attrs("firewall1", "CREATED")
-        self._test_create_firewall(attrs)
-
-    def test_show_firewall(self):
-        name = "firewall1"
-        attrs = self._get_test_firewall_attrs(name)
-
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                    name=name,
-                    firewall_policy_id=fwp_id,
-                    admin_state_up=ADMIN_STATE_UP) as firewall:
-                req = self.new_show_request('firewalls',
-                                            firewall['firewall']['id'],
-                                            fmt=self.fmt)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall'][k], v)
-
-    def test_list_firewalls(self):
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with contextlib.nested(self.firewall(name='fw1',
-                                                 firewall_policy_id=fwp_id,
-                                                 description='fw'),
-                                   self.firewall(name='fw2',
-                                                 firewall_policy_id=fwp_id,
-                                                 description='fw'),
-                                   self.firewall(name='fw3',
-                                                 firewall_policy_id=fwp_id,
-                                                 description='fw')) as fwalls:
-                self._test_list_resources('firewall', fwalls,
-                                          query_params='description=fw')
-
-    def test_update_firewall(self):
-        name = "new_firewall1"
-        attrs = self._get_test_firewall_attrs(name)
-
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                    firewall_policy_id=fwp_id,
-                    admin_state_up=ADMIN_STATE_UP) as firewall:
-                data = {'firewall': {'name': name}}
-                req = self.new_update_request('firewalls', data,
-                                              firewall['firewall']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall'][k], v)
-
-    def test_delete_firewall(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(firewall_policy_id=fwp_id,
-                               do_delete=False) as fw:
-                fw_id = fw['firewall']['id']
-                req = self.new_delete_request('firewalls', fw_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, 204)
-                self.assertRaises(firewall.FirewallNotFound,
-                                  self.plugin.get_firewall,
-                                  ctx, fw_id)
-
-    def test_insert_rule_in_policy_with_prior_rules_added_via_update(self):
-        attrs = self._get_test_firewall_policy_attrs()
-        attrs['audited'] = False
-        attrs['firewall_list'] = []
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as frs:
-            fr1 = frs[0:2]
-            fwr3 = frs[2]
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['id'] = fwp_id
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
-                attrs['firewall_rules'] = fw_rule_ids[:]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                self._rule_action('insert', fwp_id, fw_rule_ids[0],
-                                  insert_before=fw_rule_ids[0],
-                                  insert_after=None,
-                                  expected_code=webob.exc.HTTPConflict.code,
-                                  expected_body=None)
-                fwr3_id = fwr3['firewall_rule']['id']
-                attrs['firewall_rules'].insert(0, fwr3_id)
-                self._rule_action('insert', fwp_id, fwr3_id,
-                                  insert_before=fw_rule_ids[0],
-                                  insert_after=None,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-
-    def test_insert_rule_in_policy_failures(self):
-        with self.firewall_rule(name='fwr1') as fr1:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                fr1_id = fr1['firewall_rule']['id']
-                fw_rule_ids = [fr1_id]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                # test inserting with empty request body
-                self._rule_action('insert', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPBadRequest.code,
-                                  expected_body=None, body_data={})
-                # test inserting when firewall_rule_id is missing in
-                # request body
-                insert_data = {'insert_before': '123',
-                               'insert_after': '456'}
-                self._rule_action('insert', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPBadRequest.code,
-                                  expected_body=None,
-                                  body_data=insert_data)
-                # test inserting when firewall_rule_id is None
-                insert_data = {'firewall_rule_id': None,
-                               'insert_before': '123',
-                               'insert_after': '456'}
-                self._rule_action('insert', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPNotFound.code,
-                                  expected_body=None,
-                                  body_data=insert_data)
-                # test inserting when firewall_policy_id is incorrect
-                self._rule_action('insert', '123', fr1_id,
-                                  expected_code=webob.exc.HTTPNotFound.code,
-                                  expected_body=None)
-                # test inserting when firewall_policy_id is None
-                self._rule_action('insert', None, fr1_id,
-                                  expected_code=webob.exc.HTTPBadRequest.code,
-                                  expected_body=None)
-
-    def test_insert_rule_for_previously_associated_rule(self):
-        with self.firewall_rule() as fwr:
-            fwr_id = fwr['firewall_rule']['id']
-            fw_rule_ids = [fwr_id]
-            with self.firewall_policy(firewall_rules=fw_rule_ids):
-                with self.firewall_policy(name='firewall_policy2') as fwp:
-                    fwp_id = fwp['firewall_policy']['id']
-                    insert_data = {'firewall_rule_id': fwr_id}
-                    self._rule_action(
-                        'insert', fwp_id, fwr_id, insert_before=None,
-                        insert_after=None,
-                        expected_code=webob.exc.HTTPConflict.code,
-                        expected_body=None, body_data=insert_data)
-
-    def test_insert_rule_for_prev_associated_ref_rule(self):
-        with contextlib.nested(self.firewall_rule(name='fwr0'),
-                               self.firewall_rule(name='fwr1')) as fwr:
-            fwr0_id = fwr[0]['firewall_rule']['id']
-            fwr1_id = fwr[1]['firewall_rule']['id']
-            with contextlib.nested(
-                self.firewall_policy(name='fwp0'),
-                    self.firewall_policy(name='fwp1',
-                                         firewall_rules=[fwr1_id])) as fwp:
-                fwp0_id = fwp[0]['firewall_policy']['id']
-                #test inserting before a rule which is associated
-                #with different policy
-                self._rule_action(
-                    'insert', fwp0_id, fwr0_id,
-                    insert_before=fwr1_id,
-                    expected_code=webob.exc.HTTPBadRequest.code,
-                    expected_body=None)
-                #test inserting  after a rule which is associated
-                #with different policy
-                self._rule_action(
-                    'insert', fwp0_id, fwr0_id,
-                    insert_after=fwr1_id,
-                    expected_code=webob.exc.HTTPBadRequest.code,
-                    expected_body=None)
-
-    def test_insert_rule_for_policy_of_other_tenant(self):
-        with self.firewall_rule(tenant_id='tenant-2', shared=False) as fwr:
-            fwr_id = fwr['firewall_rule']['id']
-            with self.firewall_policy(name='firewall_policy') as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                insert_data = {'firewall_rule_id': fwr_id}
-                self._rule_action(
-                    'insert', fwp_id, fwr_id, insert_before=None,
-                    insert_after=None,
-                    expected_code=webob.exc.HTTPConflict.code,
-                    expected_body=None, body_data=insert_data)
-
-    def test_insert_rule_in_policy(self):
-        attrs = self._get_test_firewall_policy_attrs()
-        attrs['audited'] = False
-        attrs['firewall_list'] = []
-        with contextlib.nested(self.firewall_rule(name='fwr0'),
-                               self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3'),
-                               self.firewall_rule(name='fwr4'),
-                               self.firewall_rule(name='fwr5'),
-                               self.firewall_rule(name='fwr6')) as fwr:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['id'] = fwp_id
-                # test insert when rule list is empty
-                fwr0_id = fwr[0]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(0, fwr0_id)
-                self._rule_action('insert', fwp_id, fwr0_id,
-                                  insert_before=None,
-                                  insert_after=None,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-                # test insert at top of rule list, insert_before and
-                # insert_after not provided
-                fwr1_id = fwr[1]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(0, fwr1_id)
-                insert_data = {'firewall_rule_id': fwr1_id}
-                self._rule_action('insert', fwp_id, fwr0_id,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs, body_data=insert_data)
-                # test insert at top of list above existing rule
-                fwr2_id = fwr[2]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(0, fwr2_id)
-                self._rule_action('insert', fwp_id, fwr2_id,
-                                  insert_before=fwr1_id,
-                                  insert_after=None,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-                # test insert at bottom of list
-                fwr3_id = fwr[3]['firewall_rule']['id']
-                attrs['firewall_rules'].append(fwr3_id)
-                self._rule_action('insert', fwp_id, fwr3_id,
-                                  insert_before=None,
-                                  insert_after=fwr0_id,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-                # test insert in the middle of the list using
-                # insert_before
-                fwr4_id = fwr[4]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(1, fwr4_id)
-                self._rule_action('insert', fwp_id, fwr4_id,
-                                  insert_before=fwr1_id,
-                                  insert_after=None,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-                # test insert in the middle of the list using
-                # insert_after
-                fwr5_id = fwr[5]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(1, fwr5_id)
-                self._rule_action('insert', fwp_id, fwr5_id,
-                                  insert_before=None,
-                                  insert_after=fwr2_id,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-                # test insert when both insert_before and
-                # insert_after are set
-                fwr6_id = fwr[6]['firewall_rule']['id']
-                attrs['firewall_rules'].insert(1, fwr6_id)
-                self._rule_action('insert', fwp_id, fwr6_id,
-                                  insert_before=fwr5_id,
-                                  insert_after=fwr5_id,
-                                  expected_code=webob.exc.HTTPOk.code,
-                                  expected_body=attrs)
-
-    def test_remove_rule_from_policy(self):
-        attrs = self._get_test_firewall_policy_attrs()
-        attrs['audited'] = False
-        attrs['firewall_list'] = []
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as fr1:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                attrs['id'] = fwp_id
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr1]
-                attrs['firewall_rules'] = fw_rule_ids[:]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                # test removing a rule from a policy that does not exist
-                self._rule_action('remove', '123', fw_rule_ids[1],
-                                  expected_code=webob.exc.HTTPNotFound.code,
-                                  expected_body=None)
-                # test removing a rule in the middle of the list
-                attrs['firewall_rules'].remove(fw_rule_ids[1])
-                self._rule_action('remove', fwp_id, fw_rule_ids[1],
-                                  expected_body=attrs)
-                # test removing a rule at the top of the list
-                attrs['firewall_rules'].remove(fw_rule_ids[0])
-                self._rule_action('remove', fwp_id, fw_rule_ids[0],
-                                  expected_body=attrs)
-                # test removing remaining rule in the list
-                attrs['firewall_rules'].remove(fw_rule_ids[2])
-                self._rule_action('remove', fwp_id, fw_rule_ids[2],
-                                  expected_body=attrs)
-                # test removing rule that is not associated with the policy
-                self._rule_action('remove', fwp_id, fw_rule_ids[2],
-                                  expected_code=webob.exc.HTTPBadRequest.code,
-                                  expected_body=None)
-
-    def test_remove_rule_from_policy_failures(self):
-        with self.firewall_rule(name='fwr1') as fr1:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                fw_rule_ids = [fr1['firewall_rule']['id']]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                # test removing rule that does not exist
-                self._rule_action('remove', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPNotFound.code,
-                                  expected_body=None)
-                # test removing rule with bad request
-                self._rule_action('remove', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPBadRequest.code,
-                                  expected_body=None, body_data={})
-                # test removing rule with firewall_rule_id set to None
-                self._rule_action('remove', fwp_id, '123',
-                                  expected_code=webob.exc.HTTPNotFound.code,
-                                  expected_body=None,
-                                  body_data={'firewall_rule_id': None})
diff --git a/neutron/tests/unit/db/loadbalancer/__init__.py b/neutron/tests/unit/db/loadbalancer/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py b/neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py
deleted file mode 100644 (file)
index 69a15a4..0000000
+++ /dev/null
@@ -1,1594 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-
-import mock
-from oslo.config import cfg
-import testtools
-import webob.exc
-
-from neutron.api import extensions
-from neutron.common import config
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db as ldb
-from neutron.db import servicetype_db as sdb
-import neutron.extensions
-from neutron.extensions import loadbalancer
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer import (
-    plugin as loadbalancer_plugin
-)
-from neutron.services.loadbalancer.drivers import abstract_driver
-from neutron.services import provider_configuration as pconf
-from neutron.tests.unit import test_db_plugin
-
-
-DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-DB_LB_PLUGIN_KLASS = (
-    "neutron.services.loadbalancer."
-    "plugin.LoadBalancerPlugin"
-)
-NOOP_DRIVER_KLASS = ('neutron.tests.unit.db.loadbalancer.test_db_loadbalancer.'
-                     'NoopLbaaSDriver')
-
-extensions_path = ':'.join(neutron.extensions.__path__)
-
-_subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14"
-
-
-class NoopLbaaSDriver(abstract_driver.LoadBalancerAbstractDriver):
-    """A dummy lbass driver that that only performs object deletion."""
-
-    def __init__(self, plugin):
-        self.plugin = plugin
-
-    def create_vip(self, context, vip):
-        pass
-
-    def update_vip(self, context, old_vip, vip):
-        pass
-
-    def delete_vip(self, context, vip):
-        self.plugin._delete_db_vip(context, vip["id"])
-
-    def create_pool(self, context, pool):
-        pass
-
-    def update_pool(self, context, old_pool, pool):
-        pass
-
-    def delete_pool(self, context, pool):
-        self.plugin._delete_db_pool(context, pool["id"])
-
-    def stats(self, context, pool_id):
-        return {"bytes_in": 0,
-                "bytes_out": 0,
-                "active_connections": 0,
-                "total_connections": 0}
-
-    def create_member(self, context, member):
-        pass
-
-    def update_member(self, context, old_member, member):
-        pass
-
-    def delete_member(self, context, member):
-        self.plugin._delete_db_member(context, member["id"])
-
-    def update_pool_health_monitor(self, context, old_health_monitor,
-                                   health_monitor,
-                                   pool_association):
-        pass
-
-    def create_pool_health_monitor(self, context,
-                                   health_monitor, pool_id):
-        pass
-
-    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
-        self.plugin._delete_db_pool_health_monitor(
-            context, health_monitor["id"],
-            pool_id
-        )
-
-
-class LoadBalancerTestMixin(object):
-    resource_prefix_map = dict(
-        (k, constants.COMMON_PREFIXES[constants.LOADBALANCER])
-        for k in loadbalancer.RESOURCE_ATTRIBUTE_MAP.keys()
-    )
-
-    def _get_vip_optional_args(self):
-        return ('description', 'subnet_id', 'address',
-                'session_persistence', 'connection_limit')
-
-    def _create_vip(self, fmt, name, pool_id, protocol, protocol_port,
-                    admin_state_up, expected_res_status=None, **kwargs):
-        data = {'vip': {'name': name,
-                        'pool_id': pool_id,
-                        'protocol': protocol,
-                        'protocol_port': protocol_port,
-                        'admin_state_up': admin_state_up,
-                        'tenant_id': self._tenant_id}}
-        args = self._get_vip_optional_args()
-        for arg in args:
-            if arg in kwargs and kwargs[arg] is not None:
-                data['vip'][arg] = kwargs[arg]
-
-        vip_req = self.new_create_request('vips', data, fmt)
-        vip_res = vip_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(vip_res.status_int, expected_res_status)
-
-        return vip_res
-
-    def _create_pool(self, fmt, name, lb_method, protocol, admin_state_up,
-                     expected_res_status=None, **kwargs):
-        data = {'pool': {'name': name,
-                         'subnet_id': _subnet_id,
-                         'lb_method': lb_method,
-                         'protocol': protocol,
-                         'admin_state_up': admin_state_up,
-                         'tenant_id': self._tenant_id}}
-        for arg in ('description', 'provider', 'subnet_id'):
-            if arg in kwargs and kwargs[arg] is not None:
-                data['pool'][arg] = kwargs[arg]
-        pool_req = self.new_create_request('pools', data, fmt)
-        pool_res = pool_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(pool_res.status_int, expected_res_status)
-
-        return pool_res
-
-    def _create_member(self, fmt, address, protocol_port, admin_state_up,
-                       expected_res_status=None, **kwargs):
-        data = {'member': {'address': address,
-                           'protocol_port': protocol_port,
-                           'admin_state_up': admin_state_up,
-                           'tenant_id': self._tenant_id}}
-        for arg in ('weight', 'pool_id'):
-            if arg in kwargs and kwargs[arg] is not None:
-                data['member'][arg] = kwargs[arg]
-
-        member_req = self.new_create_request('members', data, fmt)
-        member_res = member_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(member_res.status_int, expected_res_status)
-
-        return member_res
-
-    def _create_health_monitor(self, fmt, type, delay, timeout, max_retries,
-                               admin_state_up, expected_res_status=None,
-                               **kwargs):
-        data = {'health_monitor': {'type': type,
-                                   'delay': delay,
-                                   'timeout': timeout,
-                                   'max_retries': max_retries,
-                                   'admin_state_up': admin_state_up,
-                                   'tenant_id': self._tenant_id}}
-        for arg in ('http_method', 'path', 'expected_code'):
-            if arg in kwargs and kwargs[arg] is not None:
-                data['health_monitor'][arg] = kwargs[arg]
-
-        req = self.new_create_request('health_monitors', data, fmt)
-
-        res = req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(res.status_int, expected_res_status)
-
-        return res
-
-    @contextlib.contextmanager
-    def vip(self, fmt=None, name='vip1', pool=None, subnet=None,
-            protocol='HTTP', protocol_port=80, admin_state_up=True,
-            do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-
-        with test_db_plugin.optional_ctx(subnet, self.subnet) as tmp_subnet:
-            with test_db_plugin.optional_ctx(pool, self.pool) as tmp_pool:
-                pool_id = tmp_pool['pool']['id']
-                res = self._create_vip(fmt,
-                                       name,
-                                       pool_id,
-                                       protocol,
-                                       protocol_port,
-                                       admin_state_up,
-                                       subnet_id=tmp_subnet['subnet']['id'],
-                                       **kwargs)
-                if res.status_int >= webob.exc.HTTPClientError.code:
-                    raise webob.exc.HTTPClientError(
-                        explanation=_("Unexpected error code: %s") %
-                        res.status_int
-                    )
-                vip = self.deserialize(fmt or self.fmt, res)
-                yield vip
-                if do_delete:
-                    self._delete('vips', vip['vip']['id'])
-
-    @contextlib.contextmanager
-    def pool(self, fmt=None, name='pool1', lb_method='ROUND_ROBIN',
-             protocol='HTTP', admin_state_up=True, do_delete=True,
-             **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_pool(fmt,
-                                name,
-                                lb_method,
-                                protocol,
-                                admin_state_up,
-                                **kwargs)
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(
-                explanation=_("Unexpected error code: %s") % res.status_int
-            )
-        pool = self.deserialize(fmt or self.fmt, res)
-        yield pool
-        if do_delete:
-            self._delete('pools', pool['pool']['id'])
-
-    @contextlib.contextmanager
-    def member(self, fmt=None, address='192.168.1.100', protocol_port=80,
-               admin_state_up=True, do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_member(fmt,
-                                  address,
-                                  protocol_port,
-                                  admin_state_up,
-                                  **kwargs)
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(
-                explanation=_("Unexpected error code: %s") % res.status_int
-            )
-        member = self.deserialize(fmt or self.fmt, res)
-        yield member
-        if do_delete:
-            self._delete('members', member['member']['id'])
-
-    @contextlib.contextmanager
-    def health_monitor(self, fmt=None, type='TCP',
-                       delay=30, timeout=10, max_retries=3,
-                       admin_state_up=True,
-                       do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_health_monitor(fmt,
-                                          type,
-                                          delay,
-                                          timeout,
-                                          max_retries,
-                                          admin_state_up,
-                                          **kwargs)
-        if res.status_int >= webob.exc.HTTPClientError.code:
-            raise webob.exc.HTTPClientError(
-                explanation=_("Unexpected error code: %s") % res.status_int
-            )
-        health_monitor = self.deserialize(fmt or self.fmt, res)
-        the_health_monitor = health_monitor['health_monitor']
-        # make sure:
-        # 1. When the type is HTTP/S we have HTTP related attributes in
-        #    the result
-        # 2. When the type is not HTTP/S we do not have HTTP related
-        #    attributes in the result
-        http_related_attributes = ('http_method', 'url_path', 'expected_codes')
-        if type in ['HTTP', 'HTTPS']:
-            for arg in http_related_attributes:
-                self.assertIsNotNone(the_health_monitor.get(arg))
-        else:
-            for arg in http_related_attributes:
-                self.assertIsNone(the_health_monitor.get(arg))
-        yield health_monitor
-        if do_delete:
-            self._delete('health_monitors', the_health_monitor['id'])
-
-
-class LoadBalancerPluginDbTestCase(LoadBalancerTestMixin,
-                                   test_db_plugin.NeutronDbPluginV2TestCase):
-    def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None,
-              ext_mgr=None):
-        service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS}
-        if not lbaas_provider:
-            lbaas_provider = (
-                constants.LOADBALANCER +
-                ':lbaas:' + NOOP_DRIVER_KLASS + ':default')
-        cfg.CONF.set_override('service_provider',
-                              [lbaas_provider],
-                              'service_providers')
-        #force service type manager to reload configuration:
-        sdb.ServiceTypeManager._instance = None
-
-        super(LoadBalancerPluginDbTestCase, self).setUp(
-            ext_mgr=ext_mgr,
-            service_plugins=service_plugins
-        )
-
-        if not ext_mgr:
-            self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
-            ext_mgr = extensions.PluginAwareExtensionManager(
-                extensions_path,
-                {constants.LOADBALANCER: self.plugin}
-            )
-            app = config.load_paste_app('extensions_test_app')
-            self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
-        get_lbaas_agent_patcher = mock.patch(
-            'neutron.services.loadbalancer.agent_scheduler'
-            '.LbaasAgentSchedulerDbMixin.get_lbaas_agent_hosting_pool')
-        mock_lbaas_agent = mock.MagicMock()
-        get_lbaas_agent_patcher.start().return_value = mock_lbaas_agent
-        mock_lbaas_agent.__getitem__.return_value = {'host': 'host'}
-
-        self._subnet_id = _subnet_id
-
-
-class TestLoadBalancer(LoadBalancerPluginDbTestCase):
-
-    def test_create_vip(self, **extras):
-        expected = {
-            'name': 'vip1',
-            'description': '',
-            'protocol_port': 80,
-            'protocol': 'HTTP',
-            'connection_limit': -1,
-            'admin_state_up': True,
-            'status': 'PENDING_CREATE',
-            'tenant_id': self._tenant_id,
-        }
-
-        expected.update(extras)
-
-        with self.subnet() as subnet:
-            expected['subnet_id'] = subnet['subnet']['id']
-            name = expected['name']
-
-            with self.vip(name=name, subnet=subnet, **extras) as vip:
-                for k in ('id', 'address', 'port_id', 'pool_id'):
-                    self.assertTrue(vip['vip'].get(k, None))
-
-                self.assertEqual(
-                    dict((k, v)
-                         for k, v in vip['vip'].items() if k in expected),
-                    expected
-                )
-            return vip
-
-    def test_create_vip_create_port_fails(self):
-        with self.subnet() as subnet:
-            with self.pool() as pool:
-                lb_plugin = (manager.NeutronManager.
-                             get_instance().
-                             get_service_plugins()[constants.LOADBALANCER])
-                with mock.patch.object(
-                    lb_plugin, '_create_port_for_vip') as cp:
-                    #some exception that can show up in port creation
-                    cp.side_effect = n_exc.IpAddressGenerationFailure(
-                        net_id=subnet['subnet']['network_id'])
-                    self._create_vip(self.fmt, "vip",
-                                     pool['pool']['id'], "HTTP", "80", True,
-                                     subnet_id=subnet['subnet']['id'],
-                                     expected_res_status=409)
-                req = self.new_list_request('vips')
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                self.assertFalse(res['vips'])
-
-    def test_create_vip_twice_for_same_pool(self):
-        """Test loadbalancer db plugin via extension and directly."""
-        with self.subnet() as subnet:
-            with self.pool(name="pool1") as pool:
-                with self.vip(name='vip1', subnet=subnet, pool=pool):
-                    vip_data = {
-                        'name': 'vip1',
-                        'pool_id': pool['pool']['id'],
-                        'description': '',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'connection_limit': -1,
-                        'admin_state_up': True,
-                        'status': 'PENDING_CREATE',
-                        'tenant_id': self._tenant_id,
-                        'session_persistence': ''
-                    }
-                    self.assertRaises(loadbalancer.VipExists,
-                                      self.plugin.create_vip,
-                                      context.get_admin_context(),
-                                      {'vip': vip_data})
-
-    def test_update_vip_raises_vip_exists(self):
-        with self.subnet() as subnet:
-            with contextlib.nested(
-                self.pool(name="pool1"),
-                self.pool(name="pool2")
-            ) as (pool1, pool2):
-                with contextlib.nested(
-                    self.vip(name='vip1', subnet=subnet, pool=pool1),
-                    self.vip(name='vip2', subnet=subnet, pool=pool2)
-                ) as (vip1, vip2):
-                    vip_data = {
-                        'id': vip2['vip']['id'],
-                        'name': 'vip1',
-                        'pool_id': pool1['pool']['id'],
-                    }
-                    self.assertRaises(loadbalancer.VipExists,
-                                      self.plugin.update_vip,
-                                      context.get_admin_context(),
-                                      vip2['vip']['id'],
-                                      {'vip': vip_data})
-
-    def test_update_vip_change_pool(self):
-        with self.subnet() as subnet:
-            with contextlib.nested(
-                self.pool(name="pool1"),
-                self.pool(name="pool2")
-            ) as (pool1, pool2):
-                with self.vip(name='vip1', subnet=subnet, pool=pool1) as vip:
-                    # change vip from pool1 to pool2
-                    vip_data = {
-                        'id': vip['vip']['id'],
-                        'name': 'vip1',
-                        'pool_id': pool2['pool']['id'],
-                    }
-                    ctx = context.get_admin_context()
-                    self.plugin.update_vip(ctx,
-                                           vip['vip']['id'],
-                                           {'vip': vip_data})
-                    db_pool2 = (ctx.session.query(ldb.Pool).
-                                filter_by(id=pool2['pool']['id']).one())
-                    db_pool1 = (ctx.session.query(ldb.Pool).
-                                filter_by(id=pool1['pool']['id']).one())
-                    # check that pool1.vip became None
-                    self.assertIsNone(db_pool1.vip)
-                    # and pool2 got vip
-                    self.assertEqual(db_pool2.vip.id, vip['vip']['id'])
-
-    def test_create_vip_with_invalid_values(self):
-        invalid = {
-            'protocol': 'UNSUPPORTED',
-            'protocol_port': 'NOT_AN_INT',
-            'protocol_port': 1000500,
-            'subnet': {'subnet': {'id': 'invalid-subnet'}}
-        }
-
-        for param, value in invalid.items():
-            kwargs = {'name': 'the-vip', param: value}
-            with testtools.ExpectedException(webob.exc.HTTPClientError):
-                with self.vip(**kwargs):
-                    pass
-
-    def test_create_vip_with_address(self):
-        self.test_create_vip(address='10.0.0.7')
-
-    def test_create_vip_with_address_outside_subnet(self):
-        with testtools.ExpectedException(webob.exc.HTTPClientError):
-            self.test_create_vip(address='9.9.9.9')
-
-    def test_create_vip_with_session_persistence(self):
-        self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'})
-
-    def test_create_vip_with_session_persistence_with_app_cookie(self):
-        sp = {'type': 'APP_COOKIE', 'cookie_name': 'sessionId'}
-        self.test_create_vip(session_persistence=sp)
-
-    def test_create_vip_with_session_persistence_unsupported_type(self):
-        with testtools.ExpectedException(webob.exc.HTTPClientError):
-            self.test_create_vip(session_persistence={'type': 'UNSUPPORTED'})
-
-    def test_create_vip_with_unnecessary_cookie_name(self):
-        sp = {'type': "SOURCE_IP", 'cookie_name': 'sessionId'}
-        with testtools.ExpectedException(webob.exc.HTTPClientError):
-            self.test_create_vip(session_persistence=sp)
-
-    def test_create_vip_with_session_persistence_without_cookie_name(self):
-        sp = {'type': "APP_COOKIE"}
-        with testtools.ExpectedException(webob.exc.HTTPClientError):
-            self.test_create_vip(session_persistence=sp)
-
-    def test_create_vip_with_protocol_mismatch(self):
-        with self.pool(protocol='TCP') as pool:
-            with testtools.ExpectedException(webob.exc.HTTPClientError):
-                self.test_create_vip(pool=pool, protocol='HTTP')
-
-    def test_update_vip_with_protocol_mismatch(self):
-        with self.pool(protocol='TCP') as pool:
-            with self.vip(protocol='HTTP') as vip:
-                data = {'vip': {'pool_id': pool['pool']['id']}}
-                req = self.new_update_request('vips', data, vip['vip']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int,
-                                 webob.exc.HTTPClientError.code)
-
-    def test_reset_session_persistence(self):
-        name = 'vip4'
-        session_persistence = {'type': "HTTP_COOKIE"}
-
-        update_info = {'vip': {'session_persistence': None}}
-
-        with self.vip(name=name, session_persistence=session_persistence) as v:
-            # Ensure that vip has been created properly
-            self.assertEqual(v['vip']['session_persistence'],
-                             session_persistence)
-
-            # Try resetting session_persistence
-            req = self.new_update_request('vips', update_info, v['vip']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-
-            self.assertIsNone(res['vip']['session_persistence'])
-
-    def test_update_vip(self):
-        name = 'new_vip'
-        keys = [('name', name),
-                ('address', "10.0.0.2"),
-                ('protocol_port', 80),
-                ('connection_limit', 100),
-                ('admin_state_up', False),
-                ('status', 'PENDING_UPDATE')]
-
-        with self.vip(name=name) as vip:
-            keys.append(('subnet_id', vip['vip']['subnet_id']))
-            data = {'vip': {'name': name,
-                            'connection_limit': 100,
-                            'session_persistence':
-                            {'type': "APP_COOKIE",
-                             'cookie_name': "jesssionId"},
-                            'admin_state_up': False}}
-            req = self.new_update_request('vips', data, vip['vip']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['vip'][k], v)
-
-    def test_delete_vip(self):
-        with self.pool():
-            with self.vip(do_delete=False) as vip:
-                req = self.new_delete_request('vips',
-                                              vip['vip']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_show_vip(self):
-        name = "vip_show"
-        keys = [('name', name),
-                ('address', "10.0.0.10"),
-                ('protocol_port', 80),
-                ('protocol', 'HTTP'),
-                ('connection_limit', -1),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-        with self.vip(name=name, address='10.0.0.10') as vip:
-            req = self.new_show_request('vips',
-                                        vip['vip']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['vip'][k], v)
-
-    def test_list_vips(self):
-        name = "vips_list"
-        keys = [('name', name),
-                ('address', "10.0.0.2"),
-                ('protocol_port', 80),
-                ('protocol', 'HTTP'),
-                ('connection_limit', -1),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-        with self.vip(name=name) as vip:
-            keys.append(('subnet_id', vip['vip']['subnet_id']))
-            req = self.new_list_request('vips')
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(len(res['vips']), 1)
-            for k, v in keys:
-                self.assertEqual(res['vips'][0][k], v)
-
-    def test_list_vips_with_sort_emulated(self):
-        with self.subnet() as subnet:
-            with contextlib.nested(
-                self.vip(name='vip1', subnet=subnet, protocol_port=81),
-                self.vip(name='vip2', subnet=subnet, protocol_port=82),
-                self.vip(name='vip3', subnet=subnet, protocol_port=82)
-            ) as (vip1, vip2, vip3):
-                self._test_list_with_sort(
-                    'vip',
-                    (vip1, vip3, vip2),
-                    [('protocol_port', 'asc'), ('name', 'desc')]
-                )
-
-    def test_list_vips_with_pagination_emulated(self):
-        with self.subnet() as subnet:
-            with contextlib.nested(self.vip(name='vip1', subnet=subnet),
-                                   self.vip(name='vip2', subnet=subnet),
-                                   self.vip(name='vip3', subnet=subnet)
-                                   ) as (vip1, vip2, vip3):
-                self._test_list_with_pagination('vip',
-                                                (vip1, vip2, vip3),
-                                                ('name', 'asc'), 2, 2)
-
-    def test_list_vips_with_pagination_reverse_emulated(self):
-        with self.subnet() as subnet:
-            with contextlib.nested(self.vip(name='vip1', subnet=subnet),
-                                   self.vip(name='vip2', subnet=subnet),
-                                   self.vip(name='vip3', subnet=subnet)
-                                   ) as (vip1, vip2, vip3):
-                self._test_list_with_pagination_reverse('vip',
-                                                        (vip1, vip2, vip3),
-                                                        ('name', 'asc'), 2, 2)
-
-    def test_create_pool_with_invalid_values(self):
-        name = 'pool3'
-
-        pool = self.pool(name=name, protocol='UNSUPPORTED')
-        self.assertRaises(webob.exc.HTTPClientError, pool.__enter__)
-
-        pool = self.pool(name=name, lb_method='UNSUPPORTED')
-        self.assertRaises(webob.exc.HTTPClientError, pool.__enter__)
-
-    def _create_pool_directly_via_plugin(self, provider_name):
-        #default provider will be haproxy
-        prov1 = (constants.LOADBALANCER +
-                 ':lbaas:' + NOOP_DRIVER_KLASS)
-        prov2 = (constants.LOADBALANCER +
-                 ':haproxy:neutron.services.loadbalancer.'
-                 'drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver'
-                 ':default')
-        cfg.CONF.set_override('service_provider',
-                              [prov1, prov2],
-                              'service_providers')
-        sdb.ServiceTypeManager._instance = None
-        self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
-        with self.subnet() as subnet:
-            ctx = context.get_admin_context()
-            #create pool with another provider - lbaas
-            #which is noop driver
-            pool = {'name': 'pool1',
-                    'subnet_id': subnet['subnet']['id'],
-                    'lb_method': 'ROUND_ROBIN',
-                    'protocol': 'HTTP',
-                    'admin_state_up': True,
-                    'tenant_id': self._tenant_id,
-                    'provider': provider_name,
-                    'description': ''}
-            self.plugin.create_pool(ctx, {'pool': pool})
-            assoc = ctx.session.query(sdb.ProviderResourceAssociation).one()
-            self.assertEqual(assoc.provider_name,
-                             pconf.normalize_provider_name(provider_name))
-
-    def test_create_pool_another_provider(self):
-        self._create_pool_directly_via_plugin('lbaas')
-
-    def test_create_pool_unnormalized_provider_name(self):
-        self._create_pool_directly_via_plugin('LBAAS')
-
-    def test_create_pool_unexisting_provider(self):
-        self.assertRaises(
-            pconf.ServiceProviderNotFound,
-            self._create_pool_directly_via_plugin, 'unexisting')
-
-    def test_create_pool(self):
-        name = "pool1"
-        keys = [('name', name),
-                ('subnet_id', self._subnet_id),
-                ('tenant_id', self._tenant_id),
-                ('protocol', 'HTTP'),
-                ('lb_method', 'ROUND_ROBIN'),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-
-        with self.pool(name=name) as pool:
-            for k, v in keys:
-                self.assertEqual(pool['pool'][k], v)
-
-    def test_create_pool_with_members(self):
-        name = "pool2"
-        with self.pool(name=name) as pool:
-            pool_id = pool['pool']['id']
-            res1 = self._create_member(self.fmt,
-                                       '192.168.1.100',
-                                       '80',
-                                       True,
-                                       pool_id=pool_id,
-                                       weight=1)
-            req = self.new_show_request('pools',
-                                        pool_id,
-                                        fmt=self.fmt)
-            pool_updated = self.deserialize(
-                self.fmt,
-                req.get_response(self.ext_api)
-            )
-
-            member1 = self.deserialize(self.fmt, res1)
-            self.assertEqual(member1['member']['id'],
-                             pool_updated['pool']['members'][0])
-            self.assertEqual(len(pool_updated['pool']['members']), 1)
-
-            keys = [('address', '192.168.1.100'),
-                    ('protocol_port', 80),
-                    ('weight', 1),
-                    ('pool_id', pool_id),
-                    ('admin_state_up', True),
-                    ('status', 'PENDING_CREATE')]
-            for k, v in keys:
-                self.assertEqual(member1['member'][k], v)
-            self._delete('members', member1['member']['id'])
-
-    def test_delete_pool(self):
-        with self.pool(do_delete=False) as pool:
-            with self.member(do_delete=False,
-                             pool_id=pool['pool']['id']):
-                req = self.new_delete_request('pools',
-                                              pool['pool']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_delete_pool_preserve_state(self):
-        with self.pool(do_delete=False) as pool:
-            with self.vip(pool=pool):
-                req = self.new_delete_request('pools',
-                                              pool['pool']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-                req = self.new_show_request('pools',
-                                            pool['pool']['id'],
-                                            fmt=self.fmt)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                self.assertEqual(res['pool']['status'],
-                                 constants.PENDING_CREATE)
-            req = self.new_delete_request('pools',
-                                          pool['pool']['id'])
-
-    def test_show_pool(self):
-        name = "pool1"
-        keys = [('name', name),
-                ('subnet_id', self._subnet_id),
-                ('tenant_id', self._tenant_id),
-                ('protocol', 'HTTP'),
-                ('lb_method', 'ROUND_ROBIN'),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-        with self.pool(name=name) as pool:
-            req = self.new_show_request('pools',
-                                        pool['pool']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['pool'][k], v)
-
-    def test_list_pools_with_sort_emulated(self):
-        with contextlib.nested(self.pool(name='p1'),
-                               self.pool(name='p2'),
-                               self.pool(name='p3')
-                               ) as (p1, p2, p3):
-            self._test_list_with_sort('pool', (p3, p2, p1),
-                                      [('name', 'desc')])
-
-    def test_list_pools_with_pagination_emulated(self):
-        with contextlib.nested(self.pool(name='p1'),
-                               self.pool(name='p2'),
-                               self.pool(name='p3')
-                               ) as (p1, p2, p3):
-            self._test_list_with_pagination('pool',
-                                            (p1, p2, p3),
-                                            ('name', 'asc'), 2, 2)
-
-    def test_list_pools_with_pagination_reverse_emulated(self):
-        with contextlib.nested(self.pool(name='p1'),
-                               self.pool(name='p2'),
-                               self.pool(name='p3')
-                               ) as (p1, p2, p3):
-            self._test_list_with_pagination_reverse('pool',
-                                                    (p1, p2, p3),
-                                                    ('name', 'asc'), 2, 2)
-
-    def test_create_member(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(address='192.168.1.100',
-                             protocol_port=80,
-                             pool_id=pool_id) as member1:
-                with self.member(address='192.168.1.101',
-                                 protocol_port=80,
-                                 pool_id=pool_id) as member2:
-                    req = self.new_show_request('pools',
-                                                pool_id,
-                                                fmt=self.fmt)
-                    pool_update = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    self.assertIn(member1['member']['id'],
-                                  pool_update['pool']['members'])
-                    self.assertIn(member2['member']['id'],
-                                  pool_update['pool']['members'])
-
-    def test_create_same_member_in_same_pool_raises_member_exists(self):
-        with self.subnet():
-            with self.pool(name="pool1") as pool:
-                pool_id = pool['pool']['id']
-                with self.member(address='192.168.1.100',
-                                 protocol_port=80,
-                                 pool_id=pool_id):
-                    member_data = {
-                        'address': '192.168.1.100',
-                        'protocol_port': 80,
-                        'weight': 1,
-                        'admin_state_up': True,
-                        'pool_id': pool_id
-                    }
-                    self.assertRaises(loadbalancer.MemberExists,
-                                      self.plugin.create_member,
-                                      context.get_admin_context(),
-                                      {'member': member_data})
-
-    def test_update_member(self):
-        with self.pool(name="pool1") as pool1:
-            with self.pool(name="pool2") as pool2:
-                keys = [('address', "192.168.1.100"),
-                        ('tenant_id', self._tenant_id),
-                        ('protocol_port', 80),
-                        ('weight', 10),
-                        ('pool_id', pool2['pool']['id']),
-                        ('admin_state_up', False),
-                        ('status', 'PENDING_UPDATE')]
-                with self.member(pool_id=pool1['pool']['id']) as member:
-                    req = self.new_show_request('pools',
-                                                pool1['pool']['id'],
-                                                fmt=self.fmt)
-                    pool1_update = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    self.assertEqual(len(pool1_update['pool']['members']), 1)
-
-                    req = self.new_show_request('pools',
-                                                pool2['pool']['id'],
-                                                fmt=self.fmt)
-                    pool2_update = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    self.assertEqual(len(pool1_update['pool']['members']), 1)
-                    self.assertEqual(len(pool2_update['pool']['members']), 0)
-
-                    data = {'member': {'pool_id': pool2['pool']['id'],
-                                       'weight': 10,
-                                       'admin_state_up': False}}
-                    req = self.new_update_request('members',
-                                                  data,
-                                                  member['member']['id'])
-                    res = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    for k, v in keys:
-                        self.assertEqual(res['member'][k], v)
-
-                    req = self.new_show_request('pools',
-                                                pool1['pool']['id'],
-                                                fmt=self.fmt)
-                    pool1_update = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-
-                    req = self.new_show_request('pools',
-                                                pool2['pool']['id'],
-                                                fmt=self.fmt)
-                    pool2_update = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-
-                    self.assertEqual(len(pool2_update['pool']['members']), 1)
-                    self.assertEqual(len(pool1_update['pool']['members']), 0)
-
-    def test_delete_member(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(pool_id=pool_id,
-                             do_delete=False) as member:
-                req = self.new_delete_request('members',
-                                              member['member']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-                req = self.new_show_request('pools',
-                                            pool_id,
-                                            fmt=self.fmt)
-                pool_update = self.deserialize(
-                    self.fmt,
-                    req.get_response(self.ext_api)
-                )
-                self.assertEqual(len(pool_update['pool']['members']), 0)
-
-    def test_show_member(self):
-        with self.pool() as pool:
-            keys = [('address', "192.168.1.100"),
-                    ('tenant_id', self._tenant_id),
-                    ('protocol_port', 80),
-                    ('weight', 1),
-                    ('pool_id', pool['pool']['id']),
-                    ('admin_state_up', True),
-                    ('status', 'PENDING_CREATE')]
-            with self.member(pool_id=pool['pool']['id']) as member:
-                req = self.new_show_request('members',
-                                            member['member']['id'],
-                                            fmt=self.fmt)
-                res = self.deserialize(
-                    self.fmt,
-                    req.get_response(self.ext_api)
-                )
-                for k, v in keys:
-                    self.assertEqual(res['member'][k], v)
-
-    def test_list_members_with_sort_emulated(self):
-        with self.pool() as pool:
-            with contextlib.nested(self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=81),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=82),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=83)
-                                   ) as (m1, m2, m3):
-                self._test_list_with_sort('member', (m3, m2, m1),
-                                          [('protocol_port', 'desc')])
-
-    def test_list_members_with_pagination_emulated(self):
-        with self.pool() as pool:
-            with contextlib.nested(self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=81),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=82),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=83)
-                                   ) as (m1, m2, m3):
-                self._test_list_with_pagination(
-                    'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2
-                )
-
-    def test_list_members_with_pagination_reverse_emulated(self):
-        with self.pool() as pool:
-            with contextlib.nested(self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=81),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=82),
-                                   self.member(pool_id=pool['pool']['id'],
-                                               protocol_port=83)
-                                   ) as (m1, m2, m3):
-                self._test_list_with_pagination_reverse(
-                    'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2
-                )
-
-    def test_create_healthmonitor(self):
-        keys = [('type', "TCP"),
-                ('tenant_id', self._tenant_id),
-                ('delay', 30),
-                ('timeout', 10),
-                ('max_retries', 3),
-                ('admin_state_up', True)]
-        with self.health_monitor() as monitor:
-            for k, v in keys:
-                self.assertEqual(monitor['health_monitor'][k], v)
-
-    def test_create_health_monitor_with_timeout_delay_invalid(self):
-        data = {'health_monitor': {'type': type,
-                                   'delay': 3,
-                                   'timeout': 6,
-                                   'max_retries': 2,
-                                   'admin_state_up': True,
-                                   'tenant_id': self._tenant_id}}
-        req = self.new_create_request('health_monitors', data, self.fmt)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_update_health_monitor_with_timeout_delay_invalid(self):
-        with self.health_monitor() as monitor:
-            data = {'health_monitor': {'delay': 10,
-                                       'timeout': 20,
-                                       'max_retries': 2,
-                                       'admin_state_up': False}}
-            req = self.new_update_request("health_monitors",
-                                          data,
-                                          monitor['health_monitor']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_update_healthmonitor(self):
-        keys = [('type', "TCP"),
-                ('tenant_id', self._tenant_id),
-                ('delay', 20),
-                ('timeout', 20),
-                ('max_retries', 2),
-                ('admin_state_up', False)]
-        with self.health_monitor() as monitor:
-            data = {'health_monitor': {'delay': 20,
-                                       'timeout': 20,
-                                       'max_retries': 2,
-                                       'admin_state_up': False}}
-            req = self.new_update_request("health_monitors",
-                                          data,
-                                          monitor['health_monitor']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['health_monitor'][k], v)
-
-    def test_delete_healthmonitor(self):
-        with self.health_monitor(do_delete=False) as monitor:
-            ctx = context.get_admin_context()
-            qry = ctx.session.query(ldb.HealthMonitor)
-            qry = qry.filter_by(id=monitor['health_monitor']['id'])
-            self.assertIsNotNone(qry.first())
-
-            req = self.new_delete_request('health_monitors',
-                                          monitor['health_monitor']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-            qry = ctx.session.query(ldb.HealthMonitor)
-            qry = qry.filter_by(id=monitor['health_monitor']['id'])
-            self.assertIsNone(qry.first())
-
-    def test_delete_healthmonitor_with_associations_raises(self):
-        with self.health_monitor(type='HTTP') as monitor:
-            with self.pool() as pool:
-                data = {
-                    'health_monitor': {
-                        'id': monitor['health_monitor']['id'],
-                        'tenant_id': self._tenant_id
-                    }
-                }
-                req = self.new_create_request(
-                    'pools',
-                    data,
-                    fmt=self.fmt,
-                    id=pool['pool']['id'],
-                    subresource='health_monitors')
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-                ctx = context.get_admin_context()
-
-                # check if we actually have corresponding Pool associations
-                qry = ctx.session.query(ldb.PoolMonitorAssociation)
-                qry = qry.filter_by(monitor_id=monitor['health_monitor']['id'])
-                self.assertTrue(qry.all())
-                # try to delete the HealthMonitor instance
-                req = self.new_delete_request(
-                    'health_monitors',
-                    monitor['health_monitor']['id']
-                )
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
-
-                qry = ctx.session.query(ldb.HealthMonitor)
-                qry = qry.filter_by(id=monitor['health_monitor']['id'])
-                self.assertIsNotNone(qry.first())
-                # check if all corresponding Pool associations are not deleted
-                qry = ctx.session.query(ldb.PoolMonitorAssociation)
-                qry = qry.filter_by(monitor_id=monitor['health_monitor']['id'])
-                self.assertTrue(qry.all())
-
-    def test_show_healthmonitor(self):
-        with self.health_monitor() as monitor:
-            keys = [('type', "TCP"),
-                    ('tenant_id', self._tenant_id),
-                    ('delay', 30),
-                    ('timeout', 10),
-                    ('max_retries', 3),
-                    ('admin_state_up', True)]
-            req = self.new_show_request('health_monitors',
-                                        monitor['health_monitor']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['health_monitor'][k], v)
-
-    def test_list_healthmonitors_with_sort_emulated(self):
-        with contextlib.nested(self.health_monitor(delay=30),
-                               self.health_monitor(delay=31),
-                               self.health_monitor(delay=32)
-                               ) as (m1, m2, m3):
-            self._test_list_with_sort('health_monitor', (m3, m2, m1),
-                                      [('delay', 'desc')])
-
-    def test_list_healthmonitors_with_pagination_emulated(self):
-        with contextlib.nested(self.health_monitor(delay=30),
-                               self.health_monitor(delay=31),
-                               self.health_monitor(delay=32)
-                               ) as (m1, m2, m3):
-            self._test_list_with_pagination('health_monitor',
-                                            (m1, m2, m3),
-                                            ('delay', 'asc'), 2, 2)
-
-    def test_list_healthmonitors_with_pagination_reverse_emulated(self):
-        with contextlib.nested(self.health_monitor(delay=30),
-                               self.health_monitor(delay=31),
-                               self.health_monitor(delay=32)
-                               ) as (m1, m2, m3):
-            self._test_list_with_pagination_reverse('health_monitor',
-                                                    (m1, m2, m3),
-                                                    ('delay', 'asc'), 2, 2)
-
-    def test_update_pool_invalid_lb_method(self):
-        with self.pool() as pool:
-            update_data = {'pool': {'lb_method': 'dummy'}}
-            req = self.new_update_request('pools', update_data,
-                                          pool['pool']['id'], fmt=self.fmt)
-            res = req.get_response(self.ext_api)
-            self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
-
-    def test_update_pool_stats_with_no_stats(self):
-        keys = ["bytes_in", "bytes_out",
-                "active_connections",
-                "total_connections"]
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.plugin.update_pool_stats(ctx, pool_id)
-            pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one()
-            for key in keys:
-                self.assertEqual(pool_obj.stats.__dict__[key], 0)
-
-    def test_update_pool_stats_with_negative_values(self):
-        stats_data = {"bytes_in": -1,
-                      "bytes_out": -2,
-                      "active_connections": -3,
-                      "total_connections": -4}
-        for k, v in stats_data.items():
-            self._test_update_pool_stats_with_negative_value(k, v)
-
-    def _test_update_pool_stats_with_negative_value(self, k, v):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.assertRaises(ValueError, self.plugin.update_pool_stats,
-                              ctx, pool_id, {k: v})
-
-    def test_update_pool_stats(self):
-        stats_data = {"bytes_in": 1,
-                      "bytes_out": 2,
-                      "active_connections": 3,
-                      "total_connections": 4}
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.plugin.update_pool_stats(ctx, pool_id, stats_data)
-            pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one()
-            for k, v in stats_data.items():
-                self.assertEqual(pool_obj.stats.__dict__[k], v)
-
-    def test_update_pool_stats_members_statuses(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(pool_id=pool_id) as member:
-                member_id = member['member']['id']
-                stats_data = {'members': {
-                    member_id: {
-                        'status': 'INACTIVE'
-                    }
-                }}
-                ctx = context.get_admin_context()
-                member = self.plugin.get_member(ctx, member_id)
-                self.assertEqual('PENDING_CREATE', member['status'])
-                self.plugin.update_pool_stats(ctx, pool_id, stats_data)
-                member = self.plugin.get_member(ctx, member_id)
-                self.assertEqual('INACTIVE', member['status'])
-
-    def test_get_pool_stats(self):
-        keys = [("bytes_in", 0),
-                ("bytes_out", 0),
-                ("active_connections", 0),
-                ("total_connections", 0)]
-        with self.pool() as pool:
-            req = self.new_show_request("pools",
-                                        pool['pool']['id'],
-                                        subresource="stats",
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['stats'][k], v)
-
-    def test_create_healthmonitor_of_pool(self):
-        with self.health_monitor(type="TCP") as monitor1:
-            with self.health_monitor(type="HTTP") as monitor2:
-                with self.pool() as pool:
-                    data = {"health_monitor": {
-                            "id": monitor1['health_monitor']['id'],
-                            'tenant_id': self._tenant_id}}
-                    req = self.new_create_request(
-                        "pools",
-                        data,
-                        fmt=self.fmt,
-                        id=pool['pool']['id'],
-                        subresource="health_monitors")
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPCreated.code)
-
-                    data = {"health_monitor": {
-                            "id": monitor2['health_monitor']['id'],
-                            'tenant_id': self._tenant_id}}
-                    req = self.new_create_request(
-                        "pools",
-                        data,
-                        fmt=self.fmt,
-                        id=pool['pool']['id'],
-                        subresource="health_monitors")
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPCreated.code)
-
-                    req = self.new_show_request(
-                        'pools',
-                        pool['pool']['id'],
-                        fmt=self.fmt)
-                    res = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    self.assertIn(monitor1['health_monitor']['id'],
-                                  res['pool']['health_monitors'])
-                    self.assertIn(monitor2['health_monitor']['id'],
-                                  res['pool']['health_monitors'])
-                    expected = [
-                        {'monitor_id': monitor1['health_monitor']['id'],
-                         'status': 'PENDING_CREATE',
-                         'status_description': None},
-                        {'monitor_id': monitor2['health_monitor']['id'],
-                         'status': 'PENDING_CREATE',
-                         'status_description': None}]
-                    self.assertEqual(
-                        sorted(expected),
-                        sorted(res['pool']['health_monitors_status']))
-
-    def test_delete_healthmonitor_of_pool(self):
-        with self.health_monitor(type="TCP") as monitor1:
-            with self.health_monitor(type="HTTP") as monitor2:
-                with self.pool() as pool:
-                    # add the monitors to the pool
-                    data = {"health_monitor": {
-                            "id": monitor1['health_monitor']['id'],
-                            'tenant_id': self._tenant_id}}
-                    req = self.new_create_request(
-                        "pools",
-                        data,
-                        fmt=self.fmt,
-                        id=pool['pool']['id'],
-                        subresource="health_monitors")
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPCreated.code)
-
-                    data = {"health_monitor": {
-                            "id": monitor2['health_monitor']['id'],
-                            'tenant_id': self._tenant_id}}
-                    req = self.new_create_request(
-                        "pools",
-                        data,
-                        fmt=self.fmt,
-                        id=pool['pool']['id'],
-                        subresource="health_monitors")
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPCreated.code)
-
-                    # remove one of healthmonitor from the pool
-                    req = self.new_delete_request(
-                        "pools",
-                        fmt=self.fmt,
-                        id=pool['pool']['id'],
-                        sub_id=monitor1['health_monitor']['id'],
-                        subresource="health_monitors")
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int,
-                                     webob.exc.HTTPNoContent.code)
-
-                    req = self.new_show_request(
-                        'pools',
-                        pool['pool']['id'],
-                        fmt=self.fmt)
-                    res = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-                    self.assertNotIn(monitor1['health_monitor']['id'],
-                                     res['pool']['health_monitors'])
-                    self.assertIn(monitor2['health_monitor']['id'],
-                                  res['pool']['health_monitors'])
-                    expected = [
-                        {'monitor_id': monitor2['health_monitor']['id'],
-                         'status': 'PENDING_CREATE',
-                         'status_description': None}
-                    ]
-                    self.assertEqual(expected,
-                                     res['pool']['health_monitors_status'])
-
-    def test_create_loadbalancer(self):
-        vip_name = "vip3"
-        pool_name = "pool3"
-
-        with self.pool(name=pool_name) as pool:
-            with self.vip(name=vip_name, pool=pool) as vip:
-                pool_id = pool['pool']['id']
-                vip_id = vip['vip']['id']
-                # Add two members
-                res1 = self._create_member(self.fmt,
-                                           '192.168.1.100',
-                                           '80',
-                                           True,
-                                           pool_id=pool_id,
-                                           weight=1)
-                res2 = self._create_member(self.fmt,
-                                           '192.168.1.101',
-                                           '80',
-                                           True,
-                                           pool_id=pool_id,
-                                           weight=2)
-                # Add a health_monitor
-                req = self._create_health_monitor(self.fmt,
-                                                  'HTTP',
-                                                  '10',
-                                                  '10',
-                                                  '3',
-                                                  True)
-                health_monitor = self.deserialize(self.fmt, req)
-                self.assertEqual(req.status_int, webob.exc.HTTPCreated.code)
-
-                # Associate the health_monitor to the pool
-                data = {"health_monitor": {
-                        "id": health_monitor['health_monitor']['id'],
-                        'tenant_id': self._tenant_id}}
-                req = self.new_create_request("pools",
-                                              data,
-                                              fmt=self.fmt,
-                                              id=pool['pool']['id'],
-                                              subresource="health_monitors")
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
-
-                # Get pool and vip
-                req = self.new_show_request('pools',
-                                            pool_id,
-                                            fmt=self.fmt)
-                pool_updated = self.deserialize(
-                    self.fmt,
-                    req.get_response(self.ext_api)
-                )
-                member1 = self.deserialize(self.fmt, res1)
-                member2 = self.deserialize(self.fmt, res2)
-                self.assertIn(member1['member']['id'],
-                              pool_updated['pool']['members'])
-                self.assertIn(member2['member']['id'],
-                              pool_updated['pool']['members'])
-                self.assertIn(health_monitor['health_monitor']['id'],
-                              pool_updated['pool']['health_monitors'])
-                expected = [
-                    {'monitor_id': health_monitor['health_monitor']['id'],
-                     'status': 'PENDING_CREATE',
-                     'status_description': None}
-                ]
-                self.assertEqual(
-                    expected, pool_updated['pool']['health_monitors_status'])
-
-                req = self.new_show_request('vips',
-                                            vip_id,
-                                            fmt=self.fmt)
-                vip_updated = self.deserialize(
-                    self.fmt,
-                    req.get_response(self.ext_api)
-                )
-                self.assertEqual(vip_updated['vip']['pool_id'],
-                                 pool_updated['pool']['id'])
-
-                # clean up
-                # disassociate the health_monitor from the pool first
-                req = self.new_delete_request(
-                    "pools",
-                    fmt=self.fmt,
-                    id=pool['pool']['id'],
-                    subresource="health_monitors",
-                    sub_id=health_monitor['health_monitor']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-                self._delete('health_monitors',
-                             health_monitor['health_monitor']['id'])
-                self._delete('members', member1['member']['id'])
-                self._delete('members', member2['member']['id'])
-
-    def test_create_pool_health_monitor(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.health_monitor(),
-            self.pool(name="pool")
-        ) as (health_mon1, health_mon2, pool):
-                res = self.plugin.create_pool_health_monitor(
-                    context.get_admin_context(),
-                    health_mon1, pool['pool']['id']
-                )
-                self.assertEqual({'health_monitor':
-                                  [health_mon1['health_monitor']['id']]},
-                                 res)
-
-                res = self.plugin.create_pool_health_monitor(
-                    context.get_admin_context(),
-                    health_mon2, pool['pool']['id']
-                )
-                self.assertEqual({'health_monitor':
-                                  [health_mon1['health_monitor']['id'],
-                                   health_mon2['health_monitor']['id']]},
-                                 res)
-
-                res = self.plugin.get_pool_health_monitor(
-                    context.get_admin_context(),
-                    health_mon2['health_monitor']['id'], pool['pool']['id'])
-                self.assertEqual(res['tenant_id'],
-                                 health_mon1['health_monitor']['tenant_id'])
-
-    def test_driver_call_create_pool_health_monitor(self):
-        with mock.patch.object(self.plugin.drivers['lbaas'],
-                               'create_pool_health_monitor') as driver_call:
-            with contextlib.nested(
-                self.health_monitor(),
-                self.pool()
-            ) as (hm, pool):
-                data = {'health_monitor': {
-                        'id': hm['health_monitor']['id'],
-                        'tenant_id': self._tenant_id}}
-                self.plugin.create_pool_health_monitor(
-                    context.get_admin_context(),
-                    data, pool['pool']['id']
-                )
-                hm['health_monitor']['pools'] = [
-                    {'pool_id': pool['pool']['id'],
-                     'status': 'PENDING_CREATE',
-                     'status_description': None}]
-                driver_call.assert_called_once_with(
-                    mock.ANY, hm['health_monitor'], pool['pool']['id'])
-
-    def test_pool_monitor_list_of_pools(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.pool(),
-            self.pool()
-        ) as (hm, p1, p2):
-            ctx = context.get_admin_context()
-            data = {'health_monitor': {
-                    'id': hm['health_monitor']['id'],
-                    'tenant_id': self._tenant_id}}
-            self.plugin.create_pool_health_monitor(
-                ctx, data, p1['pool']['id'])
-            self.plugin.create_pool_health_monitor(
-                ctx, data, p2['pool']['id'])
-            healthmon = self.plugin.get_health_monitor(
-                ctx, hm['health_monitor']['id'])
-            pool_data = [{'pool_id': p1['pool']['id'],
-                          'status': 'PENDING_CREATE',
-                          'status_description': None},
-                         {'pool_id': p2['pool']['id'],
-                          'status': 'PENDING_CREATE',
-                          'status_description': None}]
-            self.assertEqual(sorted(healthmon['pools']),
-                             sorted(pool_data))
-            req = self.new_show_request(
-                'health_monitors',
-                hm['health_monitor']['id'],
-                fmt=self.fmt)
-            hm = self.deserialize(
-                self.fmt,
-                req.get_response(self.ext_api)
-            )
-            self.assertEqual(sorted(hm['health_monitor']['pools']),
-                             sorted(pool_data))
-
-    def test_create_pool_health_monitor_already_associated(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.pool(name="pool")
-        ) as (hm, pool):
-            res = self.plugin.create_pool_health_monitor(
-                context.get_admin_context(),
-                hm, pool['pool']['id']
-            )
-            self.assertEqual({'health_monitor':
-                              [hm['health_monitor']['id']]},
-                             res)
-            self.assertRaises(loadbalancer.PoolMonitorAssociationExists,
-                              self.plugin.create_pool_health_monitor,
-                              context.get_admin_context(),
-                              hm,
-                              pool['pool']['id'])
-
-    def test_create_pool_healthmon_invalid_pool_id(self):
-        with self.health_monitor() as healthmon:
-            self.assertRaises(loadbalancer.PoolNotFound,
-                              self.plugin.create_pool_health_monitor,
-                              context.get_admin_context(),
-                              healthmon,
-                              "123-456-789"
-                              )
-
-    def test_update_status(self):
-        with self.pool() as pool:
-            self.assertEqual(pool['pool']['status'], 'PENDING_CREATE')
-            self.assertFalse(pool['pool']['status_description'])
-
-            self.plugin.update_status(context.get_admin_context(), ldb.Pool,
-                                      pool['pool']['id'], 'ERROR', 'unknown')
-            updated_pool = self.plugin.get_pool(context.get_admin_context(),
-                                                pool['pool']['id'])
-            self.assertEqual(updated_pool['status'], 'ERROR')
-            self.assertEqual(updated_pool['status_description'], 'unknown')
-
-            # update status to ACTIVE, status_description should be cleared
-            self.plugin.update_status(context.get_admin_context(), ldb.Pool,
-                                      pool['pool']['id'], 'ACTIVE')
-            updated_pool = self.plugin.get_pool(context.get_admin_context(),
-                                                pool['pool']['id'])
-            self.assertEqual(updated_pool['status'], 'ACTIVE')
-            self.assertFalse(updated_pool['status_description'])
-
-    def test_update_pool_health_monitor(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.pool(name="pool")
-        ) as (hm, pool):
-            res = self.plugin.create_pool_health_monitor(
-                context.get_admin_context(),
-                hm, pool['pool']['id'])
-            self.assertEqual({'health_monitor':
-                              [hm['health_monitor']['id']]},
-                             res)
-
-            assoc = self.plugin.get_pool_health_monitor(
-                context.get_admin_context(),
-                hm['health_monitor']['id'],
-                pool['pool']['id'])
-            self.assertEqual(assoc['status'], 'PENDING_CREATE')
-            self.assertIsNone(assoc['status_description'])
-
-            self.plugin.update_pool_health_monitor(
-                context.get_admin_context(),
-                hm['health_monitor']['id'],
-                pool['pool']['id'],
-                'ACTIVE', 'ok')
-            assoc = self.plugin.get_pool_health_monitor(
-                context.get_admin_context(),
-                hm['health_monitor']['id'],
-                pool['pool']['id'])
-            self.assertEqual(assoc['status'], 'ACTIVE')
-            self.assertEqual(assoc['status_description'], 'ok')
-
-    def test_check_orphan_pool_associations(self):
-        with contextlib.nested(
-            #creating pools with default noop driver
-            self.pool(),
-            self.pool()
-        ) as (p1, p2):
-            #checking that 3 associations exist
-            ctx = context.get_admin_context()
-            qry = ctx.session.query(sdb.ProviderResourceAssociation)
-            self.assertEqual(qry.count(), 2)
-            #removing driver
-            cfg.CONF.set_override('service_provider',
-                                  [constants.LOADBALANCER +
-                                   ':lbaas1:' + NOOP_DRIVER_KLASS +
-                                   ':default'],
-                                  'service_providers')
-            sdb.ServiceTypeManager._instance = None
-            # calling _remove_orphan... in constructor
-            self.assertRaises(
-                SystemExit,
-                loadbalancer_plugin.LoadBalancerPlugin
-            )
diff --git a/neutron/tests/unit/db/vpn/__init__.py b/neutron/tests/unit/db/vpn/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/db/vpn/test_db_vpnaas.py b/neutron/tests/unit/db/vpn/test_db_vpnaas.py
deleted file mode 100644 (file)
index 4859b39..0000000
+++ /dev/null
@@ -1,1598 +0,0 @@
-#    (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
-#    All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import os
-
-from oslo.config import cfg
-import webob.exc
-
-from neutron.api import extensions as api_extensions
-from neutron.common import config
-from neutron import context
-from neutron.db import agentschedulers_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import servicetype_db as sdb
-from neutron.db.vpn import vpn_db
-from neutron import extensions
-from neutron.extensions import vpnaas
-from neutron import manager
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.scheduler import l3_agent_scheduler
-from neutron.services.vpn import plugin as vpn_plugin
-from neutron.tests.unit import test_db_plugin
-from neutron.tests.unit import test_l3_plugin
-
-DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
-DB_VPN_PLUGIN_KLASS = "neutron.services.vpn.plugin.VPNPlugin"
-ROOTDIR = os.path.normpath(os.path.join(
-    os.path.dirname(__file__),
-    '..', '..', '..', '..'))
-
-extensions_path = ':'.join(extensions.__path__)
-
-
-class TestVpnCorePlugin(test_l3_plugin.TestL3NatIntPlugin,
-                        l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                        agentschedulers_db.DhcpAgentSchedulerDbMixin):
-    def __init__(self, configfile=None):
-        super(TestVpnCorePlugin, self).__init__()
-        self.router_scheduler = l3_agent_scheduler.ChanceScheduler()
-
-
-class VPNTestMixin(object):
-    resource_prefix_map = dict(
-        (k.replace('_', '-'),
-         constants.COMMON_PREFIXES[constants.VPN])
-        for k in vpnaas.RESOURCE_ATTRIBUTE_MAP
-    )
-
-    def _create_ikepolicy(self, fmt,
-                          name='ikepolicy1',
-                          auth_algorithm='sha1',
-                          encryption_algorithm='aes-128',
-                          phase1_negotiation_mode='main',
-                          lifetime_units='seconds',
-                          lifetime_value=3600,
-                          ike_version='v1',
-                          pfs='group5',
-                          expected_res_status=None, **kwargs):
-
-        data = {'ikepolicy': {
-                'name': name,
-                'auth_algorithm': auth_algorithm,
-                'encryption_algorithm': encryption_algorithm,
-                'phase1_negotiation_mode': phase1_negotiation_mode,
-                'lifetime': {
-                    'units': lifetime_units,
-                    'value': lifetime_value},
-                'ike_version': ike_version,
-                'pfs': pfs,
-                'tenant_id': self._tenant_id
-                }}
-        if kwargs.get('description') is not None:
-            data['ikepolicy']['description'] = kwargs['description']
-
-        ikepolicy_req = self.new_create_request('ikepolicies', data, fmt)
-        ikepolicy_res = ikepolicy_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(ikepolicy_res.status_int, expected_res_status)
-
-        return ikepolicy_res
-
-    @contextlib.contextmanager
-    def ikepolicy(self, fmt=None,
-                  name='ikepolicy1',
-                  auth_algorithm='sha1',
-                  encryption_algorithm='aes-128',
-                  phase1_negotiation_mode='main',
-                  lifetime_units='seconds',
-                  lifetime_value=3600,
-                  ike_version='v1',
-                  pfs='group5',
-                  do_delete=True,
-                  **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_ikepolicy(fmt,
-                                     name,
-                                     auth_algorithm,
-                                     encryption_algorithm,
-                                     phase1_negotiation_mode,
-                                     lifetime_units,
-                                     lifetime_value,
-                                     ike_version,
-                                     pfs,
-                                     **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        ikepolicy = self.deserialize(fmt or self.fmt, res)
-        yield ikepolicy
-        if do_delete:
-            self._delete('ikepolicies', ikepolicy['ikepolicy']['id'])
-
-    def _create_ipsecpolicy(self, fmt,
-                            name='ipsecpolicy1',
-                            auth_algorithm='sha1',
-                            encryption_algorithm='aes-128',
-                            encapsulation_mode='tunnel',
-                            transform_protocol='esp',
-                            lifetime_units='seconds',
-                            lifetime_value=3600,
-                            pfs='group5',
-                            expected_res_status=None,
-                            **kwargs):
-
-        data = {'ipsecpolicy': {'name': name,
-                                'auth_algorithm': auth_algorithm,
-                                'encryption_algorithm': encryption_algorithm,
-                                'encapsulation_mode': encapsulation_mode,
-                                'transform_protocol': transform_protocol,
-                                'lifetime': {'units': lifetime_units,
-                                             'value': lifetime_value},
-                                'pfs': pfs,
-                                'tenant_id': self._tenant_id}}
-        if kwargs.get('description') is not None:
-            data['ipsecpolicy']['description'] = kwargs['description']
-        ipsecpolicy_req = self.new_create_request('ipsecpolicies', data, fmt)
-        ipsecpolicy_res = ipsecpolicy_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(ipsecpolicy_res.status_int, expected_res_status)
-
-        return ipsecpolicy_res
-
-    @contextlib.contextmanager
-    def ipsecpolicy(self, fmt=None,
-                    name='ipsecpolicy1',
-                    auth_algorithm='sha1',
-                    encryption_algorithm='aes-128',
-                    encapsulation_mode='tunnel',
-                    transform_protocol='esp',
-                    lifetime_units='seconds',
-                    lifetime_value=3600,
-                    pfs='group5',
-                    do_delete=True, **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        res = self._create_ipsecpolicy(fmt,
-                                       name,
-                                       auth_algorithm,
-                                       encryption_algorithm,
-                                       encapsulation_mode,
-                                       transform_protocol,
-                                       lifetime_units,
-                                       lifetime_value,
-                                       pfs,
-                                       **kwargs)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-        ipsecpolicy = self.deserialize(fmt or self.fmt, res)
-        yield ipsecpolicy
-        if do_delete:
-            self._delete('ipsecpolicies', ipsecpolicy['ipsecpolicy']['id'])
-
-    def _create_vpnservice(self, fmt, name,
-                           admin_state_up,
-                           router_id, subnet_id,
-                           expected_res_status=None, **kwargs):
-        tenant_id = kwargs.get('tenant_id', self._tenant_id)
-        data = {'vpnservice': {'name': name,
-                               'subnet_id': subnet_id,
-                               'router_id': router_id,
-                               'admin_state_up': admin_state_up,
-                               'tenant_id': tenant_id}}
-        if kwargs.get('description') is not None:
-            data['vpnservice']['description'] = kwargs['description']
-        vpnservice_req = self.new_create_request('vpnservices', data, fmt)
-        if (kwargs.get('set_context') and
-                'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            vpnservice_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-        vpnservice_res = vpnservice_req.get_response(self.ext_api)
-        if expected_res_status:
-            self.assertEqual(vpnservice_res.status_int, expected_res_status)
-        return vpnservice_res
-
-    @contextlib.contextmanager
-    def vpnservice(self, fmt=None, name='vpnservice1',
-                   subnet=None,
-                   router=None,
-                   admin_state_up=True,
-                   do_delete=True,
-                   plug_subnet=True,
-                   external_subnet_cidr='192.168.100.0/24',
-                   external_router=True,
-                   **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        with contextlib.nested(
-            test_db_plugin.optional_ctx(subnet, self.subnet),
-            test_db_plugin.optional_ctx(router, self.router),
-            self.subnet(cidr=external_subnet_cidr)) as (tmp_subnet,
-                                                        tmp_router,
-                                                        public_sub):
-            if external_router:
-                self._set_net_external(
-                    public_sub['subnet']['network_id'])
-                self._add_external_gateway_to_router(
-                    tmp_router['router']['id'],
-                    public_sub['subnet']['network_id'])
-                tmp_router['router']['external_gateway_info'] = {
-                    'network_id': public_sub['subnet']['network_id']}
-            if plug_subnet:
-                self._router_interface_action(
-                    'add',
-                    tmp_router['router']['id'],
-                    tmp_subnet['subnet']['id'], None)
-
-            res = self._create_vpnservice(fmt,
-                                          name,
-                                          admin_state_up,
-                                          router_id=(tmp_router['router']
-                                                     ['id']),
-                                          subnet_id=(tmp_subnet['subnet']
-                                                     ['id']),
-                                          **kwargs)
-            vpnservice = self.deserialize(fmt or self.fmt, res)
-            if res.status_int < 400:
-                yield vpnservice
-
-            if do_delete and vpnservice.get('vpnservice'):
-                self._delete('vpnservices',
-                             vpnservice['vpnservice']['id'])
-            if plug_subnet:
-                self._router_interface_action(
-                    'remove',
-                    tmp_router['router']['id'],
-                    tmp_subnet['subnet']['id'], None)
-            if external_router:
-                external_gateway = tmp_router['router'].get(
-                    'external_gateway_info')
-                if external_gateway:
-                    network_id = external_gateway['network_id']
-                    self._remove_external_gateway_from_router(
-                        tmp_router['router']['id'], network_id)
-            if res.status_int >= 400:
-                raise webob.exc.HTTPClientError(
-                    code=res.status_int, detail=vpnservice)
-            self._delete('subnets', public_sub['subnet']['id'])
-        if not subnet:
-            self._delete('subnets', tmp_subnet['subnet']['id'])
-
-    def _create_ipsec_site_connection(self, fmt, name='test',
-                                      peer_address='192.168.1.10',
-                                      peer_id='192.168.1.10',
-                                      peer_cidrs=None,
-                                      mtu=1500,
-                                      psk='abcdefg',
-                                      initiator='bi-directional',
-                                      dpd_action='hold',
-                                      dpd_interval=30,
-                                      dpd_timeout=120,
-                                      vpnservice_id='fake_id',
-                                      ikepolicy_id='fake_id',
-                                      ipsecpolicy_id='fake_id',
-                                      admin_state_up=True,
-                                      expected_res_status=None, **kwargs):
-        data = {
-            'ipsec_site_connection': {'name': name,
-                                      'peer_address': peer_address,
-                                      'peer_id': peer_id,
-                                      'peer_cidrs': peer_cidrs,
-                                      'mtu': mtu,
-                                      'psk': psk,
-                                      'initiator': initiator,
-                                      'dpd': {
-                                          'action': dpd_action,
-                                          'interval': dpd_interval,
-                                          'timeout': dpd_timeout,
-                                      },
-                                      'vpnservice_id': vpnservice_id,
-                                      'ikepolicy_id': ikepolicy_id,
-                                      'ipsecpolicy_id': ipsecpolicy_id,
-                                      'admin_state_up': admin_state_up,
-                                      'tenant_id': self._tenant_id}
-        }
-        if kwargs.get('description') is not None:
-            data['ipsec_site_connection'][
-                'description'] = kwargs['description']
-
-        ipsec_site_connection_req = self.new_create_request(
-            'ipsec-site-connections', data, fmt
-        )
-        ipsec_site_connection_res = ipsec_site_connection_req.get_response(
-            self.ext_api
-        )
-        if expected_res_status:
-            self.assertEqual(
-                ipsec_site_connection_res.status_int, expected_res_status
-            )
-
-        return ipsec_site_connection_res
-
-    @contextlib.contextmanager
-    def ipsec_site_connection(self, fmt=None, name='ipsec_site_connection1',
-                              peer_address='192.168.1.10',
-                              peer_id='192.168.1.10',
-                              peer_cidrs=None,
-                              mtu=1500,
-                              psk='abcdefg',
-                              initiator='bi-directional',
-                              dpd_action='hold',
-                              dpd_interval=30,
-                              dpd_timeout=120,
-                              vpnservice=None,
-                              ikepolicy=None,
-                              ipsecpolicy=None,
-                              admin_state_up=True, do_delete=True,
-                              **kwargs):
-        if not fmt:
-            fmt = self.fmt
-        with contextlib.nested(
-            test_db_plugin.optional_ctx(vpnservice,
-                                        self.vpnservice),
-            test_db_plugin.optional_ctx(ikepolicy,
-                                        self.ikepolicy),
-            test_db_plugin.optional_ctx(ipsecpolicy,
-                                        self.ipsecpolicy)
-        ) as (tmp_vpnservice, tmp_ikepolicy, tmp_ipsecpolicy):
-            vpnservice_id = tmp_vpnservice['vpnservice']['id']
-            ikepolicy_id = tmp_ikepolicy['ikepolicy']['id']
-            ipsecpolicy_id = tmp_ipsecpolicy['ipsecpolicy']['id']
-            res = self._create_ipsec_site_connection(fmt,
-                                                     name,
-                                                     peer_address,
-                                                     peer_id,
-                                                     peer_cidrs,
-                                                     mtu,
-                                                     psk,
-                                                     initiator,
-                                                     dpd_action,
-                                                     dpd_interval,
-                                                     dpd_timeout,
-                                                     vpnservice_id,
-                                                     ikepolicy_id,
-                                                     ipsecpolicy_id,
-                                                     admin_state_up,
-                                                     **kwargs)
-            if res.status_int >= 400:
-                raise webob.exc.HTTPClientError(code=res.status_int)
-
-            ipsec_site_connection = self.deserialize(
-                fmt or self.fmt, res
-            )
-            yield ipsec_site_connection
-
-            if do_delete:
-                self._delete(
-                    'ipsec-site-connections',
-                    ipsec_site_connection[
-                        'ipsec_site_connection']['id']
-                )
-
-    def _check_ipsec_site_connection(self, ipsec_site_connection, keys, dpd):
-        self.assertEqual(
-            keys,
-            dict((k, v) for k, v
-                 in ipsec_site_connection.items()
-                 if k in keys))
-        self.assertEqual(
-            dpd,
-            dict((k, v) for k, v
-                 in ipsec_site_connection['dpd'].items()
-                 if k in dpd))
-
-    def _set_active(self, model, resource_id):
-        service_plugin = manager.NeutronManager.get_service_plugins()[
-            constants.VPN]
-        adminContext = context.get_admin_context()
-        with adminContext.session.begin(subtransactions=True):
-            resource_db = service_plugin._get_resource(
-                adminContext,
-                model,
-                resource_id)
-            resource_db.status = constants.ACTIVE
-
-
-class VPNPluginDbTestCase(VPNTestMixin,
-                          test_l3_plugin.L3NatTestCaseMixin,
-                          test_db_plugin.NeutronDbPluginV2TestCase):
-    def setUp(self, core_plugin=None, vpnaas_plugin=DB_VPN_PLUGIN_KLASS,
-              vpnaas_provider=None):
-        if not vpnaas_provider:
-            vpnaas_provider = (
-                constants.VPN +
-                ':vpnaas:neutron.services.vpn.'
-                'service_drivers.ipsec.IPsecVPNDriver:default')
-
-        cfg.CONF.set_override('service_provider',
-                              [vpnaas_provider],
-                              'service_providers')
-        # force service type manager to reload configuration:
-        sdb.ServiceTypeManager._instance = None
-
-        service_plugins = {'vpnaas_plugin': vpnaas_plugin}
-        plugin_str = ('neutron.tests.unit.db.vpn.'
-                      'test_db_vpnaas.TestVpnCorePlugin')
-
-        super(VPNPluginDbTestCase, self).setUp(
-            plugin_str,
-            service_plugins=service_plugins
-        )
-        self._subnet_id = uuidutils.generate_uuid()
-        self.core_plugin = TestVpnCorePlugin()
-        self.plugin = vpn_plugin.VPNPlugin()
-        ext_mgr = api_extensions.PluginAwareExtensionManager(
-            extensions_path,
-            {constants.CORE: self.core_plugin,
-             constants.VPN: self.plugin}
-        )
-        app = config.load_paste_app('extensions_test_app')
-        self.ext_api = api_extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
-
-class TestVpnaas(VPNPluginDbTestCase):
-
-    def _check_policy(self, policy, keys, lifetime):
-        for k, v in keys:
-            self.assertEqual(policy[k], v)
-        for k, v in lifetime.iteritems():
-            self.assertEqual(policy['lifetime'][k], v)
-
-    def test_create_ikepolicy(self):
-        """Test case to create an ikepolicy."""
-        name = "ikepolicy1"
-        description = 'ipsec-ikepolicy'
-        keys = [('name', name),
-                ('description', 'ipsec-ikepolicy'),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('phase1_negotiation_mode', 'main'),
-                ('ike_version', 'v1'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ikepolicy(name=name, description=description) as ikepolicy:
-            self._check_policy(ikepolicy['ikepolicy'], keys, lifetime)
-
-    def test_delete_ikepolicy(self):
-        """Test case to delete an ikepolicy."""
-        with self.ikepolicy(do_delete=False) as ikepolicy:
-            req = self.new_delete_request('ikepolicies',
-                                          ikepolicy['ikepolicy']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-
-    def test_show_ikepolicy(self):
-        """Test case to show or get an ikepolicy."""
-        name = "ikepolicy1"
-        description = 'ipsec-ikepolicy'
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('phase1_negotiation_mode', 'main'),
-                ('ike_version', 'v1'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ikepolicy(name=name, description=description) as ikepolicy:
-            req = self.new_show_request('ikepolicies',
-                                        ikepolicy['ikepolicy']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self._check_policy(res['ikepolicy'], keys, lifetime)
-
-    def test_list_ikepolicies(self):
-        """Test case to list all ikepolicies."""
-        name = "ikepolicy_list"
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('phase1_negotiation_mode', 'main'),
-                ('ike_version', 'v1'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ikepolicy(name=name) as ikepolicy:
-            keys.append(('id', ikepolicy['ikepolicy']['id']))
-            req = self.new_list_request('ikepolicies')
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(len(res), 1)
-            for k, v in keys:
-                self.assertEqual(res['ikepolicies'][0][k], v)
-            for k, v in lifetime.iteritems():
-                self.assertEqual(res['ikepolicies'][0]['lifetime'][k], v)
-
-    def test_list_ikepolicies_with_sort_emulated(self):
-        """Test case to list all ikepolicies."""
-        with contextlib.nested(self.ikepolicy(name='ikepolicy1'),
-                               self.ikepolicy(name='ikepolicy2'),
-                               self.ikepolicy(name='ikepolicy3')
-                               ) as (ikepolicy1, ikepolicy2, ikepolicy3):
-            self._test_list_with_sort('ikepolicy', (ikepolicy3,
-                                                    ikepolicy2,
-                                                    ikepolicy1),
-                                      [('name', 'desc')],
-                                      'ikepolicies')
-
-    def test_list_ikepolicies_with_pagination_emulated(self):
-        """Test case to list all ikepolicies with pagination."""
-        with contextlib.nested(self.ikepolicy(name='ikepolicy1'),
-                               self.ikepolicy(name='ikepolicy2'),
-                               self.ikepolicy(name='ikepolicy3')
-                               ) as (ikepolicy1, ikepolicy2, ikepolicy3):
-            self._test_list_with_pagination('ikepolicy',
-                                            (ikepolicy1,
-                                             ikepolicy2,
-                                             ikepolicy3),
-                                            ('name', 'asc'), 2, 2,
-                                            'ikepolicies')
-
-    def test_list_ikepolicies_with_pagination_reverse_emulated(self):
-        """Test case to list all ikepolicies with reverse pagination."""
-        with contextlib.nested(self.ikepolicy(name='ikepolicy1'),
-                               self.ikepolicy(name='ikepolicy2'),
-                               self.ikepolicy(name='ikepolicy3')
-                               ) as (ikepolicy1, ikepolicy2, ikepolicy3):
-            self._test_list_with_pagination_reverse('ikepolicy',
-                                                    (ikepolicy1,
-                                                     ikepolicy2,
-                                                     ikepolicy3),
-                                                    ('name', 'asc'), 2, 2,
-                                                    'ikepolicies')
-
-    def test_update_ikepolicy(self):
-        """Test case to update an ikepolicy."""
-        name = "new_ikepolicy1"
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('phase1_negotiation_mode', 'main'),
-                ('ike_version', 'v1'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id),
-                ('lifetime', {'units': 'seconds',
-                              'value': 60})]
-        with self.ikepolicy(name=name) as ikepolicy:
-            data = {'ikepolicy': {'name': name,
-                                  'lifetime': {'units': 'seconds',
-                                               'value': 60}}}
-            req = self.new_update_request("ikepolicies",
-                                          data,
-                                          ikepolicy['ikepolicy']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['ikepolicy'][k], v)
-
-    def test_create_ikepolicy_with_invalid_values(self):
-        """Test case to test invalid values."""
-        name = 'ikepolicy1'
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               auth_algorithm='md5',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               auth_algorithm=200,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               encryption_algorithm='des',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               encryption_algorithm=100,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               phase1_negotiation_mode='aggressive',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               phase1_negotiation_mode=-100,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               ike_version='v6',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               ike_version=500,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               pfs='group1',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               pfs=120,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               lifetime_units='Megabytes',
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               lifetime_units=20000,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               lifetime_value=-20,
-                               expected_res_status=400)
-        self._create_ikepolicy(name=name,
-                               fmt=self.fmt,
-                               lifetime_value='Megabytes',
-                               expected_res_status=400)
-
-    def test_create_ipsecpolicy(self):
-        """Test case to create an ipsecpolicy."""
-        name = "ipsecpolicy1"
-        description = 'my-ipsecpolicy'
-        keys = [('name', name),
-                ('description', 'my-ipsecpolicy'),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('encapsulation_mode', 'tunnel'),
-                ('transform_protocol', 'esp'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ipsecpolicy(name=name,
-                              description=description) as ipsecpolicy:
-            self._check_policy(ipsecpolicy['ipsecpolicy'], keys, lifetime)
-
-    def test_delete_ipsecpolicy(self):
-        """Test case to delete an ipsecpolicy."""
-        with self.ipsecpolicy(do_delete=False) as ipsecpolicy:
-            req = self.new_delete_request('ipsecpolicies',
-                                          ipsecpolicy['ipsecpolicy']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-
-    def test_show_ipsecpolicy(self):
-        """Test case to show or get an ipsecpolicy."""
-        name = "ipsecpolicy1"
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('encapsulation_mode', 'tunnel'),
-                ('transform_protocol', 'esp'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ipsecpolicy(name=name) as ipsecpolicy:
-            req = self.new_show_request('ipsecpolicies',
-                                        ipsecpolicy['ipsecpolicy']['id'],
-                                        fmt=self.fmt)
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self._check_policy(res['ipsecpolicy'], keys, lifetime)
-
-    def test_list_ipsecpolicies(self):
-        """Test case to list all ipsecpolicies."""
-        name = "ipsecpolicy_list"
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('encapsulation_mode', 'tunnel'),
-                ('transform_protocol', 'esp'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id)]
-        lifetime = {
-            'units': 'seconds',
-            'value': 3600}
-        with self.ipsecpolicy(name=name) as ipsecpolicy:
-            keys.append(('id', ipsecpolicy['ipsecpolicy']['id']))
-            req = self.new_list_request('ipsecpolicies')
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(len(res), 1)
-            self._check_policy(res['ipsecpolicies'][0], keys, lifetime)
-
-    def test_list_ipsecpolicies_with_sort_emulated(self):
-        """Test case to list all ipsecpolicies."""
-        with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'),
-                               self.ipsecpolicy(name='ipsecpolicy2'),
-                               self.ipsecpolicy(name='ipsecpolicy3')
-                               ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3):
-            self._test_list_with_sort('ipsecpolicy', (ipsecpolicy3,
-                                                      ipsecpolicy2,
-                                                      ipsecpolicy1),
-                                      [('name', 'desc')],
-                                      'ipsecpolicies')
-
-    def test_list_ipsecpolicies_with_pagination_emulated(self):
-        """Test case to list all ipsecpolicies with pagination."""
-        with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'),
-                               self.ipsecpolicy(name='ipsecpolicy2'),
-                               self.ipsecpolicy(name='ipsecpolicy3')
-                               ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3):
-            self._test_list_with_pagination('ipsecpolicy',
-                                            (ipsecpolicy1,
-                                             ipsecpolicy2,
-                                             ipsecpolicy3),
-                                            ('name', 'asc'), 2, 2,
-                                            'ipsecpolicies')
-
-    def test_list_ipsecpolicies_with_pagination_reverse_emulated(self):
-        """Test case to list all ipsecpolicies with reverse pagination."""
-        with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'),
-                               self.ipsecpolicy(name='ipsecpolicy2'),
-                               self.ipsecpolicy(name='ipsecpolicy3')
-                               ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3):
-            self._test_list_with_pagination_reverse('ipsecpolicy',
-                                                    (ipsecpolicy1,
-                                                     ipsecpolicy2,
-                                                     ipsecpolicy3),
-                                                    ('name', 'asc'), 2, 2,
-                                                    'ipsecpolicies')
-
-    def test_update_ipsecpolicy(self):
-        """Test case to update an ipsecpolicy."""
-        name = "new_ipsecpolicy1"
-        keys = [('name', name),
-                ('auth_algorithm', 'sha1'),
-                ('encryption_algorithm', 'aes-128'),
-                ('encapsulation_mode', 'tunnel'),
-                ('transform_protocol', 'esp'),
-                ('pfs', 'group5'),
-                ('tenant_id', self._tenant_id),
-                ('lifetime', {'units': 'seconds',
-                              'value': 60})]
-        with self.ipsecpolicy(name=name) as ipsecpolicy:
-            data = {'ipsecpolicy': {'name': name,
-                                    'lifetime': {'units': 'seconds',
-                                                 'value': 60}}}
-            req = self.new_update_request("ipsecpolicies",
-                                          data,
-                                          ipsecpolicy['ipsecpolicy']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['ipsecpolicy'][k], v)
-
-    def test_update_ipsecpolicy_lifetime(self):
-        with self.ipsecpolicy() as ipsecpolicy:
-            data = {'ipsecpolicy': {'lifetime': {'units': 'seconds'}}}
-            req = self.new_update_request("ipsecpolicies",
-                                          data,
-                                          ipsecpolicy['ipsecpolicy']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(res['ipsecpolicy']['lifetime']['units'],
-                             'seconds')
-
-            data = {'ipsecpolicy': {'lifetime': {'value': 60}}}
-            req = self.new_update_request("ipsecpolicies",
-                                          data,
-                                          ipsecpolicy['ipsecpolicy']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(res['ipsecpolicy']['lifetime']['value'], 60)
-
-    def test_create_ipsecpolicy_with_invalid_values(self):
-        """Test case to test invalid values."""
-        name = 'ipsecpolicy1'
-
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, auth_algorithm='md5', expected_res_status=400)
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, auth_algorithm=100, expected_res_status=400)
-
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, encryption_algorithm='des', expected_res_status=400)
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, encryption_algorithm=200, expected_res_status=400)
-
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, transform_protocol='abcd', expected_res_status=400)
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name, transform_protocol=500, expected_res_status=400)
-
-        self._create_ipsecpolicy(
-            fmt=self.fmt,
-            name=name,
-            encapsulation_mode='unsupported', expected_res_status=400)
-        self._create_ipsecpolicy(name=name,
-                                 fmt=self.fmt,
-                                 encapsulation_mode=100,
-                                 expected_res_status=400)
-
-        self._create_ipsecpolicy(name=name,
-                                 fmt=self.fmt,
-                                 pfs='group9', expected_res_status=400)
-        self._create_ipsecpolicy(
-            fmt=self.fmt, name=name, pfs=-1, expected_res_status=400)
-
-        self._create_ipsecpolicy(
-            fmt=self.fmt, name=name, lifetime_units='minutes',
-            expected_res_status=400)
-
-        self._create_ipsecpolicy(fmt=self.fmt, name=name, lifetime_units=100,
-                                 expected_res_status=400)
-
-        self._create_ipsecpolicy(fmt=self.fmt, name=name,
-                                 lifetime_value=-800, expected_res_status=400)
-        self._create_ipsecpolicy(fmt=self.fmt, name=name,
-                                 lifetime_value='Megabytes',
-                                 expected_res_status=400)
-
-    def test_create_vpnservice(self, **extras):
-        """Test case to create a vpnservice."""
-        description = 'my-vpn-service'
-        expected = {'name': 'vpnservice1',
-                    'description': 'my-vpn-service',
-                    'admin_state_up': True,
-                    'status': 'PENDING_CREATE',
-                    'tenant_id': self._tenant_id, }
-
-        expected.update(extras)
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                expected['router_id'] = router['router']['id']
-                expected['subnet_id'] = subnet['subnet']['id']
-                name = expected['name']
-                with self.vpnservice(name=name,
-                                     subnet=subnet,
-                                     router=router,
-                                     description=description,
-                                     **extras) as vpnservice:
-                    self.assertEqual(dict((k, v) for k, v in
-                                          vpnservice['vpnservice'].items()
-                                          if k in expected),
-                                     expected)
-
-    def test_delete_router_interface_in_use_by_vpnservice(self):
-        """Test delete router interface in use by vpn service."""
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                with self.vpnservice(subnet=subnet,
-                                     router=router):
-                    self._router_interface_action('remove',
-                                                  router['router']['id'],
-                                                  subnet['subnet']['id'],
-                                                  None,
-                                                  expected_code=webob.exc.
-                                                  HTTPConflict.code)
-
-    def test_delete_external_gateway_interface_in_use_by_vpnservice(self):
-        """Test delete external gateway interface in use by vpn service."""
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                with self.subnet(cidr='11.0.0.0/24') as public_sub:
-                    self._set_net_external(
-                        public_sub['subnet']['network_id'])
-                    self._add_external_gateway_to_router(
-                        router['router']['id'],
-                        public_sub['subnet']['network_id'])
-                    with self.vpnservice(subnet=subnet,
-                                         router=router):
-                        self._remove_external_gateway_from_router(
-                            router['router']['id'],
-                            public_sub['subnet']['network_id'],
-                            expected_code=webob.exc.HTTPConflict.code)
-
-    def test_router_update_after_ipsec_site_connection(self):
-        """Test case to update router after vpn connection."""
-        rname1 = "router_one"
-        rname2 = "router_two"
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router(name=rname1) as r:
-                with self.vpnservice(subnet=subnet,
-                                     router=r
-                                     ) as vpnservice:
-                    self.ipsec_site_connection(
-                        name='connection1', vpnservice=vpnservice
-                    )
-                    body = self._show('routers', r['router']['id'])
-                    self.assertEqual(body['router']['name'], rname1)
-                    body = self._update('routers', r['router']['id'],
-                                        {'router': {'name': rname2}})
-                    body = self._show('routers', r['router']['id'])
-                    self.assertEqual(body['router']['name'], rname2)
-
-    def test_update_vpnservice(self):
-        """Test case to update a vpnservice."""
-        name = 'new_vpnservice1'
-        keys = [('name', name)]
-        with contextlib.nested(
-            self.subnet(cidr='10.2.0.0/24'),
-            self.router()) as (subnet, router):
-            with self.vpnservice(name=name,
-                                 subnet=subnet,
-                                 router=router) as vpnservice:
-                keys.append(('subnet_id',
-                             vpnservice['vpnservice']['subnet_id']))
-                keys.append(('router_id',
-                             vpnservice['vpnservice']['router_id']))
-                data = {'vpnservice': {'name': name}}
-                self._set_active(vpn_db.VPNService,
-                                 vpnservice['vpnservice']['id'])
-                req = self.new_update_request(
-                    'vpnservices',
-                    data,
-                    vpnservice['vpnservice']['id'])
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                for k, v in keys:
-                    self.assertEqual(res['vpnservice'][k], v)
-
-    def test_update_vpnservice_with_invalid_state(self):
-        """Test case to update a vpnservice in invalid state ."""
-        name = 'new_vpnservice1'
-        keys = [('name', name)]
-        with contextlib.nested(
-            self.subnet(cidr='10.2.0.0/24'),
-            self.router()) as (subnet, router):
-            with self.vpnservice(name=name,
-                                 subnet=subnet,
-                                 router=router) as vpnservice:
-                keys.append(('subnet_id',
-                             vpnservice['vpnservice']['subnet_id']))
-                keys.append(('router_id',
-                             vpnservice['vpnservice']['router_id']))
-                data = {'vpnservice': {'name': name}}
-                req = self.new_update_request(
-                    'vpnservices',
-                    data,
-                    vpnservice['vpnservice']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(400, res.status_int)
-                res = self.deserialize(self.fmt, res)
-                self.assertIn(vpnservice['vpnservice']['id'],
-                              res['NeutronError']['message'])
-
-    def test_delete_vpnservice(self):
-        """Test case to delete a vpnservice."""
-        with self.vpnservice(name='vpnserver',
-                             do_delete=False) as vpnservice:
-            req = self.new_delete_request('vpnservices',
-                                          vpnservice['vpnservice']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-
-    def test_show_vpnservice(self):
-        """Test case to show or get a vpnservice."""
-        name = "vpnservice1"
-        keys = [('name', name),
-                ('description', ''),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-        with self.vpnservice(name=name) as vpnservice:
-            req = self.new_show_request('vpnservices',
-                                        vpnservice['vpnservice']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            for k, v in keys:
-                self.assertEqual(res['vpnservice'][k], v)
-
-    def test_list_vpnservices(self):
-        """Test case to list all vpnservices."""
-        name = "vpnservice_list"
-        keys = [('name', name),
-                ('description', ''),
-                ('admin_state_up', True),
-                ('status', 'PENDING_CREATE')]
-        with self.vpnservice(name=name) as vpnservice:
-            keys.append(('subnet_id', vpnservice['vpnservice']['subnet_id']))
-            keys.append(('router_id', vpnservice['vpnservice']['router_id']))
-            req = self.new_list_request('vpnservices')
-            res = self.deserialize(self.fmt, req.get_response(self.ext_api))
-            self.assertEqual(len(res), 1)
-            for k, v in keys:
-                self.assertEqual(res['vpnservices'][0][k], v)
-
-    def test_list_vpnservices_with_sort_emulated(self):
-        """Test case to list all vpnservices with sorting."""
-        with self.subnet() as subnet:
-            with self.router() as router:
-                with contextlib.nested(
-                    self.vpnservice(name='vpnservice1',
-                                    subnet=subnet,
-                                    router=router,
-                                    external_subnet_cidr='192.168.10.0/24',),
-                    self.vpnservice(name='vpnservice2',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_router=False,
-                                    external_subnet_cidr='192.168.11.0/24',),
-                    self.vpnservice(name='vpnservice3',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_router=False,
-                                    external_subnet_cidr='192.168.13.0/24',)
-                ) as(vpnservice1, vpnservice2, vpnservice3):
-                    self._test_list_with_sort('vpnservice', (vpnservice3,
-                                                             vpnservice2,
-                                                             vpnservice1),
-                                              [('name', 'desc')])
-
-    def test_list_vpnservice_with_pagination_emulated(self):
-        """Test case to list all vpnservices with pagination."""
-        with self.subnet() as subnet:
-            with self.router() as router:
-                with contextlib.nested(
-                    self.vpnservice(name='vpnservice1',
-                                    subnet=subnet,
-                                    router=router,
-                                    external_subnet_cidr='192.168.10.0/24'),
-                    self.vpnservice(name='vpnservice2',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_subnet_cidr='192.168.20.0/24',
-                                    external_router=False),
-                    self.vpnservice(name='vpnservice3',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_subnet_cidr='192.168.30.0/24',
-                                    external_router=False)
-                ) as(vpnservice1, vpnservice2, vpnservice3):
-                    self._test_list_with_pagination('vpnservice',
-                                                    (vpnservice1,
-                                                     vpnservice2,
-                                                     vpnservice3),
-                                                    ('name', 'asc'), 2, 2)
-
-    def test_list_vpnservice_with_pagination_reverse_emulated(self):
-        """Test case to list all vpnservices with reverse pagination."""
-        with self.subnet() as subnet:
-            with self.router() as router:
-                with contextlib.nested(
-                    self.vpnservice(name='vpnservice1',
-                                    subnet=subnet,
-                                    router=router,
-                                    external_subnet_cidr='192.168.10.0/24'),
-                    self.vpnservice(name='vpnservice2',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_subnet_cidr='192.168.11.0/24',
-                                    external_router=False),
-                    self.vpnservice(name='vpnservice3',
-                                    subnet=subnet,
-                                    router=router,
-                                    plug_subnet=False,
-                                    external_subnet_cidr='192.168.12.0/24',
-                                    external_router=False)
-                ) as(vpnservice1, vpnservice2, vpnservice3):
-                    self._test_list_with_pagination_reverse('vpnservice',
-                                                            (vpnservice1,
-                                                             vpnservice2,
-                                                             vpnservice3),
-                                                            ('name', 'asc'),
-                                                            2, 2)
-
-    def test_create_ipsec_site_connection_with_invalid_values(self):
-        """Test case to create an ipsec_site_connection with invalid values."""
-        name = 'connection1'
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, peer_cidrs='myname', expected_status_int=400)
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, mtu=-100, expected_status_int=400)
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, dpd_action='unsupported', expected_status_int=400)
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, dpd_interval=-1, expected_status_int=400)
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, dpd_timeout=-200, expected_status_int=400)
-        self._create_ipsec_site_connection(
-            fmt=self.fmt,
-            name=name, initiator='unsupported', expected_status_int=400)
-
-    def _test_create_ipsec_site_connection(self, key_overrides=None,
-                                           setup_overrides=None,
-                                           expected_status_int=200):
-        """Create ipsec_site_connection and check results."""
-        params = {'ikename': 'ikepolicy1',
-                  'ipsecname': 'ipsecpolicy1',
-                  'vpnsname': 'vpnservice1',
-                  'subnet_cidr': '10.2.0.0/24',
-                  'subnet_version': 4}
-        if setup_overrides is not None:
-            params.update(setup_overrides)
-        keys = {'name': 'connection1',
-                'description': 'my-ipsec-connection',
-                'peer_address': '192.168.1.10',
-                'peer_id': '192.168.1.10',
-                'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                'initiator': 'bi-directional',
-                'mtu': 1500,
-                'tenant_id': self._tenant_id,
-                'psk': 'abcd',
-                'status': 'PENDING_CREATE',
-                'admin_state_up': True}
-        if key_overrides is not None:
-            keys.update(key_overrides)
-        dpd = {'action': 'hold',
-               'interval': 40,
-               'timeout': 120}
-        with contextlib.nested(
-            self.ikepolicy(name=params['ikename']),
-            self.ipsecpolicy(name=params['ipsecname']),
-            self.subnet(cidr=params['subnet_cidr'],
-                        ip_version=params['subnet_version']),
-            self.router()) as (
-                ikepolicy, ipsecpolicy, subnet, router):
-                with self.vpnservice(name=params['vpnsname'], subnet=subnet,
-                                     router=router) as vpnservice1:
-                    keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
-                    keys['ipsecpolicy_id'] = (
-                        ipsecpolicy['ipsecpolicy']['id']
-                    )
-                    keys['vpnservice_id'] = (
-                        vpnservice1['vpnservice']['id']
-                    )
-                    try:
-                        with self.ipsec_site_connection(
-                                self.fmt,
-                                keys['name'],
-                                keys['peer_address'],
-                                keys['peer_id'],
-                                keys['peer_cidrs'],
-                                keys['mtu'],
-                                keys['psk'],
-                                keys['initiator'],
-                                dpd['action'],
-                                dpd['interval'],
-                                dpd['timeout'],
-                                vpnservice1,
-                                ikepolicy,
-                                ipsecpolicy,
-                                keys['admin_state_up'],
-                                description=keys['description']
-                        ) as ipsec_site_connection:
-                            if expected_status_int != 200:
-                                self.fail("Expected failure on create")
-                            self._check_ipsec_site_connection(
-                                ipsec_site_connection['ipsec_site_connection'],
-                                keys,
-                                dpd)
-                    except webob.exc.HTTPClientError as ce:
-                        self.assertEqual(ce.code, expected_status_int)
-        self._delete('subnets', subnet['subnet']['id'])
-
-    def test_create_ipsec_site_connection(self, **extras):
-        """Test case to create an ipsec_site_connection."""
-        self._test_create_ipsec_site_connection(key_overrides=extras)
-
-    def test_delete_ipsec_site_connection(self):
-        """Test case to delete a ipsec_site_connection."""
-        with self.ipsec_site_connection(
-                do_delete=False) as ipsec_site_connection:
-            req = self.new_delete_request(
-                'ipsec-site-connections',
-                ipsec_site_connection['ipsec_site_connection']['id']
-            )
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, 204)
-
-    def test_update_ipsec_site_connection(self):
-        """Test case for valid updates to IPSec site connection."""
-        dpd = {'action': 'hold',
-               'interval': 40,
-               'timeout': 120}
-        self._test_update_ipsec_site_connection(update={'dpd': dpd})
-        self._test_update_ipsec_site_connection(update={'mtu': 2000})
-        ipv6_settings = {
-            'peer_address': 'fe80::c0a8:10a',
-            'peer_id': 'fe80::c0a8:10a',
-            'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'],
-            'subnet_cidr': 'fe80::a02:0/120',
-            'subnet_version': 6}
-        self._test_update_ipsec_site_connection(update={'mtu': 2000},
-                                                overrides=ipv6_settings)
-
-    def test_update_ipsec_site_connection_with_invalid_state(self):
-        """Test updating an ipsec_site_connection in invalid state."""
-        self._test_update_ipsec_site_connection(
-            overrides={'make_active': False},
-            expected_status_int=400)
-
-    def test_update_ipsec_site_connection_peer_cidrs(self):
-        """Test updating an ipsec_site_connection for peer_cidrs."""
-        new_peers = {'peer_cidrs': ['192.168.4.0/24',
-                                    '192.168.5.0/24']}
-        self._test_update_ipsec_site_connection(
-            update=new_peers)
-
-    def _test_update_ipsec_site_connection(self,
-                                           update={'name': 'new name'},
-                                           overrides=None,
-                                           expected_status_int=200):
-        """Creates and then updates ipsec_site_connection."""
-        keys = {'name': 'new_ipsec_site_connection',
-                'ikename': 'ikepolicy1',
-                'ipsecname': 'ipsecpolicy1',
-                'vpnsname': 'vpnservice1',
-                'description': 'my-ipsec-connection',
-                'peer_address': '192.168.1.10',
-                'peer_id': '192.168.1.10',
-                'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                'initiator': 'bi-directional',
-                'mtu': 1500,
-                'tenant_id': self._tenant_id,
-                'psk': 'abcd',
-                'status': 'ACTIVE',
-                'admin_state_up': True,
-                'action': 'hold',
-                'interval': 40,
-                'timeout': 120,
-                'subnet_cidr': '10.2.0.0/24',
-                'subnet_version': 4,
-                'make_active': True}
-        if overrides is not None:
-            keys.update(overrides)
-
-        with contextlib.nested(
-                self.ikepolicy(name=keys['ikename']),
-                self.ipsecpolicy(name=keys['ipsecname']),
-                self.subnet(cidr=keys['subnet_cidr'],
-                            ip_version=keys['subnet_version']),
-                self.router()) as (
-                    ikepolicy, ipsecpolicy, subnet, router):
-            with self.vpnservice(name=keys['vpnsname'], subnet=subnet,
-                                 router=router) as vpnservice1:
-                keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
-                keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
-                keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
-                with self.ipsec_site_connection(
-                    self.fmt,
-                    keys['name'],
-                    keys['peer_address'],
-                    keys['peer_id'],
-                    keys['peer_cidrs'],
-                    keys['mtu'],
-                    keys['psk'],
-                    keys['initiator'],
-                    keys['action'],
-                    keys['interval'],
-                    keys['timeout'],
-                    vpnservice1,
-                    ikepolicy,
-                    ipsecpolicy,
-                    keys['admin_state_up'],
-                    description=keys['description']
-                ) as ipsec_site_connection:
-                    data = {'ipsec_site_connection': update}
-                    if keys.get('make_active', None):
-                        self._set_active(
-                            vpn_db.IPsecSiteConnection,
-                            (ipsec_site_connection['ipsec_site_connection']
-                             ['id']))
-                    req = self.new_update_request(
-                        'ipsec-site-connections',
-                        data,
-                        ipsec_site_connection['ipsec_site_connection']['id'])
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(expected_status_int, res.status_int)
-                    if expected_status_int == 200:
-                        res_dict = self.deserialize(self.fmt, res)
-                        actual = res_dict['ipsec_site_connection']
-                        for k, v in update.items():
-                            # Sort lists before checking equality
-                            if isinstance(actual[k], list):
-                                self.assertEqual(v, sorted(actual[k]))
-                            else:
-                                self.assertEqual(v, actual[k])
-        self._delete('networks', subnet['subnet']['network_id'])
-
-    def test_show_ipsec_site_connection(self):
-        """Test case to show a ipsec_site_connection."""
-        ikename = "ikepolicy1"
-        ipsecname = "ipsecpolicy1"
-        vpnsname = "vpnservice1"
-        name = "connection1"
-        description = "my-ipsec-connection"
-        keys = {'name': name,
-                'description': "my-ipsec-connection",
-                'peer_address': '192.168.1.10',
-                'peer_id': '192.168.1.10',
-                'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                'initiator': 'bi-directional',
-                'mtu': 1500,
-                'tenant_id': self._tenant_id,
-                'psk': 'abcd',
-                'status': 'PENDING_CREATE',
-                'admin_state_up': True}
-        dpd = {'action': 'hold',
-               'interval': 40,
-               'timeout': 120}
-        with contextlib.nested(
-            self.ikepolicy(name=ikename),
-            self.ipsecpolicy(name=ipsecname),
-            self.subnet(),
-            self.router()) as (
-                ikepolicy, ipsecpolicy, subnet, router):
-            with self.vpnservice(name=vpnsname, subnet=subnet,
-                                 router=router) as vpnservice1:
-                keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
-                keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
-                keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
-                with self.ipsec_site_connection(
-                    self.fmt,
-                    name,
-                    keys['peer_address'],
-                    keys['peer_id'],
-                    keys['peer_cidrs'],
-                    keys['mtu'],
-                    keys['psk'],
-                    keys['initiator'],
-                    dpd['action'],
-                    dpd['interval'],
-                    dpd['timeout'],
-                    vpnservice1,
-                    ikepolicy,
-                    ipsecpolicy,
-                    keys['admin_state_up'],
-                    description=description,
-                ) as ipsec_site_connection:
-
-                    req = self.new_show_request(
-                        'ipsec-site-connections',
-                        ipsec_site_connection[
-                            'ipsec_site_connection']['id'],
-                        fmt=self.fmt
-                    )
-                    res = self.deserialize(
-                        self.fmt,
-                        req.get_response(self.ext_api)
-                    )
-
-                    self._check_ipsec_site_connection(
-                        res['ipsec_site_connection'],
-                        keys,
-                        dpd)
-
-    def test_list_ipsec_site_connections_with_sort_emulated(self):
-        """Test case to list all ipsec_site_connections with sort."""
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                with self.vpnservice(subnet=subnet,
-                                     router=router
-                                     ) as vpnservice:
-                    with contextlib.nested(
-                        self.ipsec_site_connection(
-                            name='connection1', vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='connection2', vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='connection3', vpnservice=vpnservice
-                        )
-                    ) as(ipsec_site_connection1,
-                         ipsec_site_connection2,
-                         ipsec_site_connection3):
-                        self._test_list_with_sort('ipsec-site-connection',
-                                                  (ipsec_site_connection3,
-                                                   ipsec_site_connection2,
-                                                   ipsec_site_connection1),
-                                                  [('name', 'desc')])
-
-    def test_list_ipsec_site_connections_with_pagination_emulated(self):
-        """Test case to list all ipsec_site_connections with pagination."""
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                with self.vpnservice(subnet=subnet,
-                                     router=router
-                                     ) as vpnservice:
-                    with contextlib.nested(
-                        self.ipsec_site_connection(
-                            name='ipsec_site_connection1',
-                            vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='ipsec_site_connection1',
-                            vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='ipsec_site_connection1',
-                            vpnservice=vpnservice
-                        )
-                    ) as(ipsec_site_connection1,
-                         ipsec_site_connection2,
-                         ipsec_site_connection3):
-                        self._test_list_with_pagination(
-                            'ipsec-site-connection',
-                            (ipsec_site_connection1,
-                             ipsec_site_connection2,
-                             ipsec_site_connection3),
-                            ('name', 'asc'), 2, 2)
-
-    def test_list_ipsec_site_conns_with_pagination_reverse_emulated(self):
-        """Test to list all ipsec_site_connections with reverse pagination."""
-        with self.subnet(cidr='10.2.0.0/24') as subnet:
-            with self.router() as router:
-                with self.vpnservice(subnet=subnet,
-                                     router=router
-                                     ) as vpnservice:
-                    with contextlib.nested(
-                        self.ipsec_site_connection(
-                            name='connection1', vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='connection2', vpnservice=vpnservice
-                        ),
-                        self.ipsec_site_connection(
-                            name='connection3', vpnservice=vpnservice
-                        )
-                    ) as(ipsec_site_connection1,
-                         ipsec_site_connection2,
-                         ipsec_site_connection3):
-                        self._test_list_with_pagination_reverse(
-                            'ipsec-site-connection',
-                            (ipsec_site_connection1,
-                             ipsec_site_connection2,
-                             ipsec_site_connection3),
-                            ('name', 'asc'), 2, 2
-                        )
-
-    def test_create_vpn(self):
-        """Test case to create a vpn."""
-        vpns_name = "vpnservice1"
-        ike_name = "ikepolicy1"
-        ipsec_name = "ipsecpolicy1"
-        name1 = "ipsec_site_connection1"
-        with contextlib.nested(
-            self.ikepolicy(name=ike_name),
-            self.ipsecpolicy(name=ipsec_name),
-            self.vpnservice(name=vpns_name)) as (
-                ikepolicy, ipsecpolicy, vpnservice):
-            vpnservice_id = vpnservice['vpnservice']['id']
-            ikepolicy_id = ikepolicy['ikepolicy']['id']
-            ipsecpolicy_id = ipsecpolicy['ipsecpolicy']['id']
-            with self.ipsec_site_connection(
-                self.fmt,
-                name1,
-                '192.168.1.10',
-                '192.168.1.10',
-                ['192.168.2.0/24',
-                 '192.168.3.0/24'],
-                1500,
-                'abcdef',
-                'bi-directional',
-                'hold',
-                30,
-                120,
-                vpnservice,
-                ikepolicy,
-                ipsecpolicy,
-                True
-            ) as vpnconn1:
-
-                vpnservice_req = self.new_show_request(
-                    'vpnservices',
-                    vpnservice_id,
-                    fmt=self.fmt)
-                vpnservice_updated = self.deserialize(
-                    self.fmt,
-                    vpnservice_req.get_response(self.ext_api)
-                )
-                self.assertEqual(
-                    vpnservice_updated['vpnservice']['id'],
-                    vpnconn1['ipsec_site_connection']['vpnservice_id']
-                )
-                ikepolicy_req = self.new_show_request('ikepolicies',
-                                                      ikepolicy_id,
-                                                      fmt=self.fmt)
-                ikepolicy_res = self.deserialize(
-                    self.fmt,
-                    ikepolicy_req.get_response(self.ext_api)
-                )
-                self.assertEqual(
-                    ikepolicy_res['ikepolicy']['id'],
-                    vpnconn1['ipsec_site_connection']['ikepolicy_id'])
-                ipsecpolicy_req = self.new_show_request(
-                    'ipsecpolicies',
-                    ipsecpolicy_id,
-                    fmt=self.fmt)
-                ipsecpolicy_res = self.deserialize(
-                    self.fmt,
-                    ipsecpolicy_req.get_response(self.ext_api)
-                )
-                self.assertEqual(
-                    ipsecpolicy_res['ipsecpolicy']['id'],
-                    vpnconn1['ipsec_site_connection']['ipsecpolicy_id']
-                )
-
-    def test_delete_ikepolicy_inuse(self):
-        """Test case to delete an ikepolicy, that is in use."""
-        vpns_name = "vpnservice1"
-        ike_name = "ikepolicy1"
-        ipsec_name = "ipsecpolicy1"
-        name1 = "ipsec_site_connection1"
-        with self.ikepolicy(name=ike_name) as ikepolicy:
-            with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
-                with self.vpnservice(name=vpns_name) as vpnservice:
-                    with self.ipsec_site_connection(
-                        self.fmt,
-                        name1,
-                        '192.168.1.10',
-                        '192.168.1.10',
-                        ['192.168.2.0/24',
-                         '192.168.3.0/24'],
-                        1500,
-                        'abcdef',
-                        'bi-directional',
-                        'hold',
-                        30,
-                        120,
-                        vpnservice,
-                        ikepolicy,
-                        ipsecpolicy,
-                        True
-                    ):
-                        delete_req = self.new_delete_request(
-                            'ikepolicies',
-                            ikepolicy['ikepolicy']['id']
-                        )
-                        delete_res = delete_req.get_response(self.ext_api)
-                        self.assertEqual(409, delete_res.status_int)
-
-    def test_delete_ipsecpolicy_inuse(self):
-        """Test case to delete an ipsecpolicy, that is in use."""
-        vpns_name = "vpnservice1"
-        ike_name = "ikepolicy1"
-        ipsec_name = "ipsecpolicy1"
-        name1 = "ipsec_site_connection1"
-        with self.ikepolicy(name=ike_name) as ikepolicy:
-            with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
-                with self.vpnservice(name=vpns_name) as vpnservice:
-                    with self.ipsec_site_connection(
-                        self.fmt,
-                        name1,
-                        '192.168.1.10',
-                        '192.168.1.10',
-                        ['192.168.2.0/24',
-                         '192.168.3.0/24'],
-                        1500,
-                        'abcdef',
-                        'bi-directional',
-                        'hold',
-                        30,
-                        120,
-                        vpnservice,
-                        ikepolicy,
-                        ipsecpolicy,
-                        True
-                    ):
-
-                        delete_req = self.new_delete_request(
-                            'ipsecpolicies',
-                            ipsecpolicy['ipsecpolicy']['id']
-                        )
-                        delete_res = delete_req.get_response(self.ext_api)
-                        self.assertEqual(409, delete_res.status_int)
diff --git a/neutron/tests/unit/services/firewall/__init__.py b/neutron/tests/unit/services/firewall/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/agents/__init__.py b/neutron/tests/unit/services/firewall/agents/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/agents/l3reference/__init__.py b/neutron/tests/unit/services/firewall/agents/l3reference/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py b/neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py
deleted file mode 100644 (file)
index 22b4169..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import uuid
-
-import mock
-from oslo.config import cfg
-
-from neutron.agent.common import config as agent_config
-from neutron.agent import l3_agent
-from neutron.agent import l3_ha_agent
-from neutron.agent.linux import ip_lib
-from neutron.common import config as base_config
-from neutron import context
-from neutron.plugins.common import constants
-from neutron.services.firewall.agents import firewall_agent_api
-from neutron.services.firewall.agents.l3reference import firewall_l3_agent
-from neutron.tests import base
-from neutron.tests.unit.services.firewall.agents import test_firewall_agent_api
-
-
-class FWaasHelper(object):
-    def __init__(self, host):
-        pass
-
-
-class FWaasAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, FWaasHelper):
-    neutron_service_plugins = []
-
-
-def _setup_test_agent_class(service_plugins):
-    class FWaasTestAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
-                         FWaasHelper):
-        neutron_service_plugins = service_plugins
-
-    return FWaasTestAgent
-
-
-class TestFwaasL3AgentRpcCallback(base.BaseTestCase):
-    def setUp(self):
-        super(TestFwaasL3AgentRpcCallback, self).setUp()
-
-        self.conf = cfg.ConfigOpts()
-        self.conf.register_opts(base_config.core_opts)
-        self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
-        self.conf.register_opts(l3_ha_agent.OPTS)
-        agent_config.register_use_namespaces_opts_helper(self.conf)
-        agent_config.register_root_helper(self.conf)
-        self.conf.root_helper = 'sudo'
-        self.conf.register_opts(firewall_agent_api.FWaaSOpts, 'fwaas')
-        self.api = FWaasAgent(self.conf)
-        self.api.fwaas_driver = test_firewall_agent_api.NoopFwaasDriver()
-
-    def test_fw_config_match(self):
-        test_agent_class = _setup_test_agent_class([constants.FIREWALL])
-        cfg.CONF.set_override('enabled', True, 'fwaas')
-        with mock.patch('oslo.utils.importutils.import_object'):
-            test_agent_class(cfg.CONF)
-
-    def test_fw_config_mismatch_plugin_enabled_agent_disabled(self):
-        test_agent_class = _setup_test_agent_class([constants.FIREWALL])
-        cfg.CONF.set_override('enabled', False, 'fwaas')
-        self.assertRaises(SystemExit, test_agent_class, cfg.CONF)
-
-    def test_fw_plugin_list_unavailable(self):
-        test_agent_class = _setup_test_agent_class(None)
-        cfg.CONF.set_override('enabled', False, 'fwaas')
-        with mock.patch('oslo.utils.importutils.import_object'):
-            test_agent_class(cfg.CONF)
-
-    def test_create_firewall(self):
-        fake_firewall = {'id': 0}
-        with mock.patch.object(
-            self.api,
-            '_invoke_driver_for_plugin_api'
-        ) as mock_driver:
-            self.assertEqual(
-                self.api.create_firewall(
-                    mock.sentinel.context,
-                    fake_firewall,
-                    'host'),
-                mock_driver.return_value)
-
-    def test_update_firewall(self):
-        fake_firewall = {'id': 0}
-        with mock.patch.object(
-            self.api,
-            '_invoke_driver_for_plugin_api'
-        ) as mock_driver:
-            self.assertEqual(
-                self.api.update_firewall(
-                    mock.sentinel.context,
-                    fake_firewall,
-                    'host'),
-                mock_driver.return_value)
-
-    def test_delete_firewall(self):
-        fake_firewall = {'id': 0}
-        with mock.patch.object(
-            self.api,
-            '_invoke_driver_for_plugin_api'
-        ) as mock_driver:
-            self.assertEqual(
-                self.api.delete_firewall(
-                    mock.sentinel.context,
-                    fake_firewall,
-                    'host'),
-                mock_driver.return_value)
-
-    def test_invoke_driver_for_plugin_api(self):
-        fake_firewall = {'id': 0, 'tenant_id': 1,
-                         'admin_state_up': True}
-        self.api.plugin_rpc = mock.Mock()
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwaas_driver, 'create_firewall'),
-            mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_driver_create_firewall,
-            mock_set_firewall_status):
-
-            mock_driver_create_firewall.return_value = True
-            self.api.create_firewall(
-                context=mock.sentinel.context,
-                firewall=fake_firewall, host='host')
-
-            mock_get_routers.assert_called_once_with(
-                mock.sentinel.context)
-
-            mock_get_router_info_list_for_tenant.assert_called_once_with(
-                mock_get_routers.return_value, fake_firewall['tenant_id'])
-
-            mock_set_firewall_status.assert_called_once_with(
-                mock.sentinel.context,
-                fake_firewall['id'],
-                'ACTIVE')
-
-    def test_invoke_driver_for_plugin_api_admin_state_down(self):
-        fake_firewall = {'id': 0, 'tenant_id': 1,
-                         'admin_state_up': False}
-        self.api.plugin_rpc = mock.Mock()
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwaas_driver, 'update_firewall'),
-            mock.patch.object(self.api.fwplugin_rpc,
-                              'get_firewalls_for_tenant'),
-            mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_driver_update_firewall,
-            mock_get_firewalls_for_tenant,
-            mock_set_firewall_status):
-
-            mock_driver_update_firewall.return_value = True
-            self.api.update_firewall(
-                context=mock.sentinel.context,
-                firewall=fake_firewall, host='host')
-
-            mock_get_routers.assert_called_once_with(
-                mock.sentinel.context)
-
-            mock_get_router_info_list_for_tenant.assert_called_once_with(
-                mock_get_routers.return_value, fake_firewall['tenant_id'])
-
-            mock_set_firewall_status.assert_called_once_with(
-                mock.sentinel.context,
-                fake_firewall['id'],
-                'DOWN')
-
-    def test_invoke_driver_for_plugin_api_delete(self):
-        fake_firewall = {'id': 0, 'tenant_id': 1,
-                         'admin_state_up': True}
-        self.api.plugin_rpc = mock.Mock()
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwaas_driver, 'delete_firewall'),
-            mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_driver_delete_firewall,
-            mock_firewall_deleted):
-
-            mock_driver_delete_firewall.return_value = True
-            self.api.delete_firewall(
-                context=mock.sentinel.context,
-                firewall=fake_firewall, host='host')
-
-            mock_get_routers.assert_called_once_with(
-                mock.sentinel.context)
-
-            mock_get_router_info_list_for_tenant.assert_called_once_with(
-                mock_get_routers.return_value, fake_firewall['tenant_id'])
-
-            mock_firewall_deleted.assert_called_once_with(
-                mock.sentinel.context,
-                fake_firewall['id'])
-
-    def test_delete_firewall_no_router(self):
-        fake_firewall = {'id': 0, 'tenant_id': 1}
-        self.api.plugin_rpc = mock.Mock()
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_firewall_deleted):
-
-            mock_get_router_info_list_for_tenant.return_value = []
-            self.api.delete_firewall(
-                context=mock.sentinel.context,
-                firewall=fake_firewall, host='host')
-
-            mock_get_routers.assert_called_once_with(
-                mock.sentinel.context)
-
-            mock_get_router_info_list_for_tenant.assert_called_once_with(
-                mock_get_routers.return_value, fake_firewall['tenant_id'])
-
-            mock_firewall_deleted.assert_called_once_with(
-                mock.sentinel.context,
-                fake_firewall['id'])
-
-    def test_process_router_add_fw_update(self):
-        fake_firewall_list = [{'id': 0, 'tenant_id': 1,
-                               'status': constants.PENDING_UPDATE,
-                               'admin_state_up': True}]
-        fake_router = {'id': 1111, 'tenant_id': 2}
-        self.api.plugin_rpc = mock.Mock()
-        agent_mode = 'legacy'
-        ri = mock.Mock()
-        ri.router = fake_router
-        routers = [ri.router]
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwaas_driver, 'update_firewall'),
-            mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status'),
-            mock.patch.object(self.api.fwplugin_rpc,
-                              'get_firewalls_for_tenant'),
-            mock.patch.object(context, 'Context')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_driver_update_firewall,
-            mock_set_firewall_status,
-            mock_get_firewalls_for_tenant,
-            mock_Context):
-
-            mock_driver_update_firewall.return_value = True
-            ctx = mock.sentinel.context
-            mock_Context.return_value = ctx
-            mock_get_router_info_list_for_tenant.return_value = routers
-            mock_get_firewalls_for_tenant.return_value = fake_firewall_list
-
-            self.api._process_router_add(ri)
-            mock_get_router_info_list_for_tenant.assert_called_with(
-                routers,
-                ri.router['tenant_id'])
-            mock_get_firewalls_for_tenant.assert_called_once_with(ctx)
-            mock_driver_update_firewall.assert_called_once_with(
-                agent_mode,
-                routers,
-                fake_firewall_list[0])
-
-            mock_set_firewall_status.assert_called_once_with(
-                ctx,
-                fake_firewall_list[0]['id'],
-                constants.ACTIVE)
-
-    def test_process_router_add_fw_delete(self):
-        fake_firewall_list = [{'id': 0, 'tenant_id': 1,
-                               'status': constants.PENDING_DELETE}]
-        fake_router = {'id': 1111, 'tenant_id': 2}
-        agent_mode = 'legacy'
-        self.api.plugin_rpc = mock.Mock()
-        ri = mock.Mock()
-        ri.router = fake_router
-        routers = [ri.router]
-        with contextlib.nested(
-            mock.patch.object(self.api.plugin_rpc, 'get_routers'),
-            mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
-            mock.patch.object(self.api.fwaas_driver, 'delete_firewall'),
-            mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted'),
-            mock.patch.object(self.api.fwplugin_rpc,
-                              'get_firewalls_for_tenant'),
-            mock.patch.object(context, 'Context')
-        ) as (
-            mock_get_routers,
-            mock_get_router_info_list_for_tenant,
-            mock_driver_delete_firewall,
-            mock_firewall_deleted,
-            mock_get_firewalls_for_tenant,
-            mock_Context):
-
-            mock_driver_delete_firewall.return_value = True
-            ctx = mock.sentinel.context
-            mock_Context.return_value = ctx
-            mock_get_router_info_list_for_tenant.return_value = routers
-            mock_get_firewalls_for_tenant.return_value = fake_firewall_list
-
-            self.api._process_router_add(ri)
-            mock_get_router_info_list_for_tenant.assert_called_with(
-                routers,
-                ri.router['tenant_id'])
-            mock_get_firewalls_for_tenant.assert_called_once_with(ctx)
-            mock_driver_delete_firewall.assert_called_once_with(
-                agent_mode,
-                routers,
-                fake_firewall_list[0])
-
-            mock_firewall_deleted.assert_called_once_with(
-                ctx,
-                fake_firewall_list[0]['id'])
-
-    def _prepare_router_data(self):
-        router = {'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4())}
-        ns = "ns-" + router['id']
-        return l3_agent.RouterInfo(router['id'], self.conf.root_helper,
-                                   router=router, ns_name=ns)
-
-    def _get_router_info_list_helper(self, use_namespaces):
-        self.conf.set_override('use_namespaces', use_namespaces)
-        ri = self._prepare_router_data()
-        routers = [ri.router]
-        self.api.router_info = {ri.router_id: ri}
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_namespaces') as mock_get_namespaces:
-            mock_get_namespaces.return_value = []
-            router_info_list = self.api._get_router_info_list_for_tenant(
-                routers,
-                ri.router['tenant_id'])
-        if use_namespaces:
-            mock_get_namespaces.assert_called_once_with(self.conf.root_helper)
-            self.assertFalse(router_info_list)
-        else:
-            self.assertEqual([ri], router_info_list)
-
-    def test_get_router_info_list_for_tenant_for_namespaces_disabled(self):
-        self._get_router_info_list_helper(use_namespaces=False)
-
-    def test_get_router_info_list_for_tenant(self):
-        self._get_router_info_list_helper(use_namespaces=True)
-
-    def _get_router_info_list_router_without_router_info_helper(self,
-                                                                rtr_with_ri):
-        self.conf.set_override('use_namespaces', True)
-        # ri.router with associated router_info (ri)
-        # rtr2 has no router_info
-        ri = self._prepare_router_data()
-        rtr2 = {'id': str(uuid.uuid4()), 'tenant_id': ri.router['tenant_id']}
-        routers = [rtr2]
-        self.api.router_info = {}
-        ri_expected = []
-        if rtr_with_ri:
-            self.api.router_info[ri.router_id] = ri
-            routers.append(ri.router)
-            ri_expected.append(ri)
-        with mock.patch.object(ip_lib.IPWrapper,
-                               'get_namespaces') as mock_get_namespaces:
-            mock_get_namespaces.return_value = ri.ns_name
-            router_info_list = self.api._get_router_info_list_for_tenant(
-                routers,
-                ri.router['tenant_id'])
-            self.assertEqual(ri_expected, router_info_list)
-
-    def test_get_router_info_list_router_without_router_info(self):
-        self._get_router_info_list_router_without_router_info_helper(
-            rtr_with_ri=False)
-
-    def test_get_router_info_list_two_routers_one_without_router_info(self):
-        self._get_router_info_list_router_without_router_info_helper(
-            rtr_with_ri=True)
diff --git a/neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py b/neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py
deleted file mode 100644 (file)
index 63266d9..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import mock
-
-from neutron.services.firewall.agents import firewall_agent_api as api
-from neutron.services.firewall.drivers import fwaas_base as base_driver
-from neutron.tests import base
-
-
-class NoopFwaasDriver(base_driver.FwaasDriverBase):
-    """Noop Fwaas Driver.
-
-    Firewall driver which does nothing.
-    This driver is for disabling Fwaas functionality.
-    """
-
-    def create_firewall(self, apply_list, firewall):
-        pass
-
-    def delete_firewall(self, apply_list, firewall):
-        pass
-
-    def update_firewall(self, apply_list, firewall):
-        pass
-
-    def apply_default_policy(self, apply_list, firewall):
-        pass
-
-
-class TestFWaaSAgentApi(base.BaseTestCase):
-    def setUp(self):
-        super(TestFWaaSAgentApi, self).setUp()
-
-        self.api = api.FWaaSPluginApiMixin(
-            'topic',
-            'host')
-
-    def test_init(self):
-        self.assertEqual(self.api.host, 'host')
-
-    def _test_firewall_method(self, method_name, **kwargs):
-        with contextlib.nested(
-            mock.patch.object(self.api.client, 'call'),
-            mock.patch.object(self.api.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.api.client
-            getattr(self.api, method_name)(mock.sentinel.context, 'test',
-                                           **kwargs)
-
-        prepare_args = {}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
-                                         firewall_id='test', host='host',
-                                         **kwargs)
-
-    def test_set_firewall_status(self):
-        self._test_firewall_method('set_firewall_status', status='fake_status')
-
-    def test_firewall_deleted(self):
-        self._test_firewall_method('firewall_deleted')
diff --git a/neutron/tests/unit/services/firewall/agents/varmour/__init__.py b/neutron/tests/unit/services/firewall/agents/varmour/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py b/neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py
deleted file mode 100644 (file)
index b48a301..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import mock
-
-from neutron.agent.common import config as agent_config
-from neutron.agent import l3_agent
-from neutron.agent import l3_ha_agent
-from neutron.agent.linux import interface
-from neutron.common import config as base_config
-from neutron.common import constants as l3_constants
-from neutron.openstack.common import uuidutils
-from neutron.services.firewall.agents.varmour import varmour_router
-from neutron.services.firewall.agents.varmour import varmour_utils
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-HOSTNAME = 'myhost'
-FAKE_DIRECTOR = '1.1.1.1'
-
-
-class TestVarmourRouter(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestVarmourRouter, self).setUp()
-        self.conf = agent_config.setup_conf()
-        self.conf.register_opts(base_config.core_opts)
-        self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS)
-        self.conf.register_opts(l3_ha_agent.OPTS)
-        agent_config.register_interface_driver_opts_helper(self.conf)
-        agent_config.register_use_namespaces_opts_helper(self.conf)
-        agent_config.register_root_helper(self.conf)
-        self.conf.register_opts(interface.OPTS)
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.conf.root_helper = 'sudo'
-        self.conf.state_path = ''
-
-        self.device_exists_p = mock.patch(
-            'neutron.agent.linux.ip_lib.device_exists')
-        self.device_exists = self.device_exists_p.start()
-
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-
-        self.external_process_p = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager')
-        self.external_process = self.external_process_p.start()
-
-        self.makedirs_p = mock.patch('os.makedirs')
-        self.makedirs = self.makedirs_p.start()
-
-        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        driver_cls = self.dvr_cls_p.start()
-        self.mock_driver = mock.MagicMock()
-        self.mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        driver_cls.return_value = self.mock_driver
-
-        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ip_cls = self.ip_cls_p.start()
-        self.mock_ip = mock.MagicMock()
-        ip_cls.return_value = self.mock_ip
-
-        mock.patch('neutron.agent.l3_agent.L3PluginApi').start()
-
-        self.looping_call_p = mock.patch(
-            'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
-        self.looping_call_p.start()
-
-    def _create_router(self):
-        router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf)
-        router.rest.server = FAKE_DIRECTOR
-        router.rest.user = 'varmour'
-        router.rest.passwd = 'varmour'
-        return router
-
-    def _del_all_internal_ports(self, router):
-        router[l3_constants.INTERFACE_KEY] = []
-
-    def _del_internal_ports(self, router, port_idx):
-        del router[l3_constants.INTERFACE_KEY][port_idx]
-
-    def _add_internal_ports(self, router, port_count=1):
-        self._del_all_internal_ports(router)
-        for i in range(port_count):
-            port = {'id': _uuid(),
-                    'network_id': _uuid(),
-                    'admin_state_up': True,
-                    'fixed_ips': [{'ip_address': '10.0.%s.4' % i,
-                                   'subnet_id': _uuid()}],
-                    'mac_address': 'ca:fe:de:ad:be:ef',
-                    'subnet': {'cidr': '10.0.%s.0/24' % i,
-                               'gateway_ip': '10.0.%s.1' % i}}
-            router[l3_constants.INTERFACE_KEY].append(port)
-
-    def _del_all_floating_ips(self, router):
-        router[l3_constants.FLOATINGIP_KEY] = []
-
-    def _del_floating_ips(self, router, port_idx):
-        del router[l3_constants.FLOATINGIP_KEY][port_idx]
-
-    def _add_floating_ips(self, router, port_count=1):
-        self._del_all_floating_ips(router)
-        for i in range(port_count):
-            fip = {'id': _uuid(),
-                   'port_id': router['gw_port']['id'],
-                   'floating_ip_address': '172.24.4.%s' % (100 + i),
-                   'fixed_ip_address': '10.0.0.%s' % (100 + i)}
-            router[l3_constants.FLOATINGIP_KEY].append(fip)
-
-    def _prepare_router_data(self, enable_snat=None):
-        router_id = _uuid()
-        ex_gw_port = {'id': _uuid(),
-                      'network_id': _uuid(),
-                      'fixed_ips': [{'ip_address': '172.24.4.2',
-                                     'subnet_id': _uuid()}],
-                      'subnet': {'cidr': '172.24.4.0/24',
-                                 'gateway_ip': '172.24.4.1'},
-                      'ip_cidr': '172.24.4.226/28'}
-        int_ports = []
-
-        router = {
-            'id': router_id,
-            l3_constants.INTERFACE_KEY: int_ports,
-            'routes': [],
-            'gw_port': ex_gw_port}
-        if enable_snat is not None:
-            router['enable_snat'] = enable_snat
-
-        ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
-                                 router=router)
-        return ri
-
-    def test_agent_add_internal_network(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_NAT_RULE
-        prefix = varmour_utils.get_snat_rule_name(ri)
-
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-        self._add_internal_ports(ri.router, port_count=1)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-    def test_agent_remove_internal_network(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_NAT_RULE
-        prefix = varmour_utils.get_snat_rule_name(ri)
-
-        self._add_internal_ports(ri.router, port_count=2)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 2, 'prefix %s' % prefix)
-
-        self._del_internal_ports(ri.router, 0)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        self._del_all_internal_ports(ri.router)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-    def test_agent_add_floating_ips(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        self._add_internal_ports(ri.router, port_count=1)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_NAT_RULE
-        prefix = varmour_utils.get_dnat_rule_name(ri)
-
-        self._add_floating_ips(ri.router, port_count=1)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        self._add_floating_ips(ri.router, port_count=2)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 2, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-    def test_agent_remove_floating_ips(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        self._add_internal_ports(ri.router, port_count=1)
-        self._add_floating_ips(ri.router, port_count=2)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_NAT_RULE
-        prefix = varmour_utils.get_dnat_rule_name(ri)
-
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 2, 'prefix %s' % prefix)
-
-        self._del_floating_ips(ri.router, 0)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        self._del_all_floating_ips(ri.router)
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-    def test_agent_external_gateway(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_ZONE
-        prefix = varmour_utils.get_untrusted_zone_name(ri)
-
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        del ri.router['gw_port']
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 1, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-    def test_agent_snat_enable(self):
-        router = self._create_router()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        router._router_added(ri.router['id'], ri.router)
-
-        url = varmour_utils.REST_URL_CONF_NAT_RULE
-        prefix = varmour_utils.get_snat_rule_name(ri)
-
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-        ri.router['enable_snat'] = False
-        router.process_router(ri)
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
-
-        router._router_removed(ri.router['id'])
-        n = router.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0, 'prefix %s' % prefix)
diff --git a/neutron/tests/unit/services/firewall/drivers/__init__.py b/neutron/tests/unit/services/firewall/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/drivers/linux/__init__.py b/neutron/tests/unit/services/firewall/drivers/linux/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py b/neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py
deleted file mode 100644 (file)
index c4e633a..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright 2013 Dell Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo.config import cfg
-
-from neutron.agent.common import config as a_cfg
-import neutron.services.firewall.drivers.linux.iptables_fwaas as fwaas
-from neutron.tests import base
-from neutron.tests.unit import test_api_v2
-
-
-_uuid = test_api_v2._uuid
-FAKE_SRC_PREFIX = '10.0.0.0/24'
-FAKE_DST_PREFIX = '20.0.0.0/24'
-FAKE_PROTOCOL = 'tcp'
-FAKE_SRC_PORT = 5000
-FAKE_DST_PORT = 22
-FAKE_FW_ID = 'fake-fw-uuid'
-
-
-class IptablesFwaasTestCase(base.BaseTestCase):
-    def setUp(self):
-        super(IptablesFwaasTestCase, self).setUp()
-        cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-        self.iptables_cls_p = mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager')
-        self.iptables_cls_p.start()
-        self.firewall = fwaas.IptablesFwaasDriver()
-
-    def _fake_rules_v4(self, fwid, apply_list):
-        rule_list = []
-        rule1 = {'enabled': True,
-                 'action': 'allow',
-                 'ip_version': 4,
-                 'protocol': 'tcp',
-                 'destination_port': '80',
-                 'source_ip_address': '10.24.4.2'}
-        rule2 = {'enabled': True,
-                 'action': 'deny',
-                 'ip_version': 4,
-                 'protocol': 'tcp',
-                 'destination_port': '22'}
-        ingress_chain = ('iv4%s' % fwid)[:11]
-        egress_chain = ('ov4%s' % fwid)[:11]
-        for router_info_inst in apply_list:
-            v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
-            v4filter_inst.chains.append(ingress_chain)
-            v4filter_inst.chains.append(egress_chain)
-        rule_list.append(rule1)
-        rule_list.append(rule2)
-        return rule_list
-
-    def _fake_firewall_no_rule(self):
-        rule_list = []
-        fw_inst = {'id': FAKE_FW_ID,
-                   'admin_state_up': True,
-                   'tenant_id': 'tenant-uuid',
-                   'firewall_rule_list': rule_list}
-        return fw_inst
-
-    def _fake_firewall(self, rule_list):
-        fw_inst = {'id': FAKE_FW_ID,
-                   'admin_state_up': True,
-                   'tenant_id': 'tenant-uuid',
-                   'firewall_rule_list': rule_list}
-        return fw_inst
-
-    def _fake_firewall_with_admin_down(self, rule_list):
-        fw_inst = {'id': FAKE_FW_ID,
-                   'admin_state_up': False,
-                   'tenant_id': 'tenant-uuid',
-                   'firewall_rule_list': rule_list}
-        return fw_inst
-
-    def _fake_apply_list(self, router_count=1, distributed=False,
-            distributed_mode=None):
-        apply_list = []
-        while router_count > 0:
-            iptables_inst = mock.Mock()
-            router_inst = {'distributed': distributed}
-            v4filter_inst = mock.Mock()
-            v6filter_inst = mock.Mock()
-            v4filter_inst.chains = []
-            v6filter_inst.chains = []
-            iptables_inst.ipv4 = {'filter': v4filter_inst}
-            iptables_inst.ipv6 = {'filter': v6filter_inst}
-            router_info_inst = mock.Mock()
-            router_info_inst.iptables_manager = iptables_inst
-            router_info_inst.snat_iptables_manager = iptables_inst
-            if distributed_mode == 'dvr':
-                router_info_inst.dist_fip_count = 1
-            router_info_inst.router = router_inst
-            apply_list.append(router_info_inst)
-            router_count -= 1
-        return apply_list
-
-    def _setup_firewall_with_rules(self, func, router_count=1,
-            distributed=False, distributed_mode=None):
-        apply_list = self._fake_apply_list(router_count=router_count,
-            distributed=distributed, distributed_mode=distributed_mode)
-        rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
-        firewall = self._fake_firewall(rule_list)
-        if distributed:
-            if distributed_mode == 'dvr_snat':
-                if_prefix = 'sg-+'
-            if distributed_mode == 'dvr':
-                if_prefix = 'rfp-+'
-        else:
-            if_prefix = 'qr-+'
-            distributed_mode = 'legacy'
-        func(distributed_mode, apply_list, firewall)
-        invalid_rule = '-m state --state INVALID -j DROP'
-        est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
-        rule1 = '-p tcp --dport 80  -s 10.24.4.2  -j ACCEPT'
-        rule2 = '-p tcp --dport 22    -j DROP'
-        ingress_chain = 'iv4%s' % firewall['id']
-        egress_chain = 'ov4%s' % firewall['id']
-        bname = fwaas.iptables_manager.binary_name
-        ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
-        ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
-        for router_info_inst in apply_list:
-            v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
-            calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
-                     mock.call.remove_chain('ov4fake-fw-uuid'),
-                     mock.call.remove_chain('fwaas-default-policy'),
-                     mock.call.add_chain('fwaas-default-policy'),
-                     mock.call.add_rule('fwaas-default-policy', '-j DROP'),
-                     mock.call.add_chain(ingress_chain),
-                     mock.call.add_rule(ingress_chain, invalid_rule),
-                     mock.call.add_rule(ingress_chain, est_rule),
-                     mock.call.add_chain(egress_chain),
-                     mock.call.add_rule(egress_chain, invalid_rule),
-                     mock.call.add_rule(egress_chain, est_rule),
-                     mock.call.add_rule(ingress_chain, rule1),
-                     mock.call.add_rule(egress_chain, rule1),
-                     mock.call.add_rule(ingress_chain, rule2),
-                     mock.call.add_rule(egress_chain, rule2),
-                     mock.call.add_rule('FORWARD',
-                                        '-o %s -j %s' % (if_prefix,
-                                        ipt_mgr_ichain)),
-                     mock.call.add_rule('FORWARD',
-                                        '-i %s -j %s' % (if_prefix,
-                                        ipt_mgr_echain)),
-                     mock.call.add_rule('FORWARD',
-                                        '-o %s -j %s-fwaas-defau' % (if_prefix,
-                                        bname)),
-                     mock.call.add_rule('FORWARD',
-                                        '-i %s -j %s-fwaas-defau' % (if_prefix,
-                                        bname))]
-            v4filter_inst.assert_has_calls(calls)
-
-    def test_create_firewall_no_rules(self):
-        apply_list = self._fake_apply_list()
-        firewall = self._fake_firewall_no_rule()
-        self.firewall.create_firewall('legacy', apply_list, firewall)
-        invalid_rule = '-m state --state INVALID -j DROP'
-        est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
-        bname = fwaas.iptables_manager.binary_name
-
-        for ip_version in (4, 6):
-            ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
-            egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
-            calls = [mock.call.remove_chain(
-                     'iv%sfake-fw-uuid' % ip_version),
-                     mock.call.remove_chain(
-                         'ov%sfake-fw-uuid' % ip_version),
-                     mock.call.remove_chain('fwaas-default-policy'),
-                     mock.call.add_chain('fwaas-default-policy'),
-                     mock.call.add_rule('fwaas-default-policy', '-j DROP'),
-                     mock.call.add_chain(ingress_chain),
-                     mock.call.add_rule(ingress_chain, invalid_rule),
-                     mock.call.add_rule(ingress_chain, est_rule),
-                     mock.call.add_chain(egress_chain),
-                     mock.call.add_rule(egress_chain, invalid_rule),
-                     mock.call.add_rule(egress_chain, est_rule),
-                     mock.call.add_rule('FORWARD',
-                                        '-o qr-+ -j %s-fwaas-defau' % bname),
-                     mock.call.add_rule('FORWARD',
-                                        '-i qr-+ -j %s-fwaas-defau' % bname)]
-            if ip_version == 4:
-                v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
-                v4filter_inst.assert_has_calls(calls)
-            else:
-                v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
-                v6filter_inst.assert_has_calls(calls)
-
-    def test_create_firewall_with_rules(self):
-        self._setup_firewall_with_rules(self.firewall.create_firewall)
-
-    def test_create_firewall_with_rules_two_routers(self):
-        self._setup_firewall_with_rules(self.firewall.create_firewall,
-                                        router_count=2)
-
-    def test_update_firewall_with_rules(self):
-        self._setup_firewall_with_rules(self.firewall.update_firewall)
-
-    def test_delete_firewall(self):
-        apply_list = self._fake_apply_list()
-        firewall = self._fake_firewall_no_rule()
-        self.firewall.delete_firewall('legacy', apply_list, firewall)
-        ingress_chain = 'iv4%s' % firewall['id']
-        egress_chain = 'ov4%s' % firewall['id']
-        calls = [mock.call.remove_chain(ingress_chain),
-                 mock.call.remove_chain(egress_chain),
-                 mock.call.remove_chain('fwaas-default-policy')]
-        apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
-
-    def test_create_firewall_with_admin_down(self):
-        apply_list = self._fake_apply_list()
-        rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
-        firewall = self._fake_firewall_with_admin_down(rule_list)
-        self.firewall.create_firewall('legacy', apply_list, firewall)
-        calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
-                 mock.call.remove_chain('ov4fake-fw-uuid'),
-                 mock.call.remove_chain('fwaas-default-policy'),
-                 mock.call.add_chain('fwaas-default-policy'),
-                 mock.call.add_rule('fwaas-default-policy', '-j DROP')]
-        apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
-
-    def test_create_firewall_with_rules_dvr_snat(self):
-        self._setup_firewall_with_rules(self.firewall.create_firewall,
-            distributed=True, distributed_mode='dvr_snat')
-
-    def test_update_firewall_with_rules_dvr_snat(self):
-        self._setup_firewall_with_rules(self.firewall.update_firewall,
-            distributed=True, distributed_mode='dvr_snat')
-
-    def test_create_firewall_with_rules_dvr(self):
-        self._setup_firewall_with_rules(self.firewall.create_firewall,
-            distributed=True, distributed_mode='dvr')
-
-    def test_update_firewall_with_rules_dvr(self):
-        self._setup_firewall_with_rules(self.firewall.update_firewall,
-            distributed=True, distributed_mode='dvr')
diff --git a/neutron/tests/unit/services/firewall/drivers/varmour/__init__.py b/neutron/tests/unit/services/firewall/drivers/varmour/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py b/neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py
deleted file mode 100644 (file)
index c2d996e..0000000
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright 2013 vArmour Networks Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import mock
-
-from neutron.agent.common import config as agent_config
-from neutron.agent import l3_agent
-from neutron.agent import l3_ha_agent
-from neutron.agent.linux import interface
-from neutron.common import config as base_config
-from neutron.common import constants as l3_constants
-from neutron.openstack.common import uuidutils
-from neutron.services.firewall.agents.varmour import varmour_router
-from neutron.services.firewall.agents.varmour import varmour_utils
-from neutron.services.firewall.drivers.varmour import varmour_fwaas
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-HOSTNAME = 'myhost'
-FAKE_DIRECTOR = '1.1.1.1'
-
-
-class TestBasicRouterOperations(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestBasicRouterOperations, self).setUp()
-        self.conf = agent_config.setup_conf()
-        self.conf.register_opts(base_config.core_opts)
-        self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS)
-        self.conf.register_opts(l3_ha_agent.OPTS)
-        agent_config.register_interface_driver_opts_helper(self.conf)
-        agent_config.register_use_namespaces_opts_helper(self.conf)
-        agent_config.register_root_helper(self.conf)
-        self.conf.register_opts(interface.OPTS)
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.conf.root_helper = 'sudo'
-        self.conf.state_path = ''
-
-        self.device_exists_p = mock.patch(
-            'neutron.agent.linux.ip_lib.device_exists')
-        self.device_exists = self.device_exists_p.start()
-
-        self.utils_exec_p = mock.patch(
-            'neutron.agent.linux.utils.execute')
-        self.utils_exec = self.utils_exec_p.start()
-
-        self.external_process_p = mock.patch(
-            'neutron.agent.linux.external_process.ProcessManager')
-        self.external_process = self.external_process_p.start()
-
-        self.makedirs_p = mock.patch('os.makedirs')
-        self.makedirs = self.makedirs_p.start()
-
-        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
-        driver_cls = self.dvr_cls_p.start()
-        self.mock_driver = mock.MagicMock()
-        self.mock_driver.DEV_NAME_LEN = (
-            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
-        driver_cls.return_value = self.mock_driver
-
-        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ip_cls = self.ip_cls_p.start()
-        self.mock_ip = mock.MagicMock()
-        ip_cls.return_value = self.mock_ip
-
-        mock.patch('neutron.agent.l3_agent.L3PluginApi').start()
-
-        self.looping_call_p = mock.patch(
-            'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
-        self.looping_call_p.start()
-
-    def _create_router(self):
-        router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf)
-        router.rest.server = FAKE_DIRECTOR
-        router.rest.user = 'varmour'
-        router.rest.passwd = 'varmour'
-        return router
-
-    def _create_fwaas(self):
-        fwaas = varmour_fwaas.vArmourFwaasDriver()
-        fwaas.rest.server = FAKE_DIRECTOR
-        fwaas.rest.user = 'varmour'
-        fwaas.rest.passwd = 'varmour'
-        return fwaas
-
-    def _del_all_internal_ports(self, router):
-        router[l3_constants.INTERFACE_KEY] = []
-
-    def _del_internal_ports(self, router, port_idx):
-        del router[l3_constants.INTERFACE_KEY][port_idx]
-
-    def _add_internal_ports(self, router, port_count=1):
-        self._del_all_internal_ports(router)
-        for i in range(port_count):
-            port = {'id': _uuid(),
-                    'network_id': _uuid(),
-                    'admin_state_up': True,
-                    'fixed_ips': [{'ip_address': '10.0.%s.4' % i,
-                                   'subnet_id': _uuid()}],
-                    'mac_address': 'ca:fe:de:ad:be:ef',
-                    'subnet': {'cidr': '10.0.%s.0/24' % i,
-                               'gateway_ip': '10.0.%s.1' % i}}
-            router[l3_constants.INTERFACE_KEY].append(port)
-
-    def _del_all_floating_ips(self, router):
-        router[l3_constants.FLOATINGIP_KEY] = []
-
-    def _del_floating_ips(self, router, port_idx):
-        del router[l3_constants.FLOATINGIP_KEY][port_idx]
-
-    def _add_floating_ips(self, router, port_count=1):
-        self._del_all_floating_ips(router)
-        for i in range(port_count):
-            fip = {'id': _uuid(),
-                   'port_id': router['gw_port']['id'],
-                   'floating_ip_address': '172.24.4.%s' % (100 + i),
-                   'fixed_ip_address': '10.0.0.%s' % (100 + i)}
-            router[l3_constants.FLOATINGIP_KEY].append(fip)
-
-    def _prepare_router_data(self, enable_snat=None):
-        router_id = _uuid()
-        ex_gw_port = {'id': _uuid(),
-                      'network_id': _uuid(),
-                      'fixed_ips': [{'ip_address': '172.24.4.2',
-                                     'subnet_id': _uuid()}],
-                      'subnet': {'cidr': '172.24.4.0/24',
-                                 'gateway_ip': '172.24.4.1'},
-                      'ip_cidr': '172.24.4.226/28'}
-        int_ports = []
-
-        router = {
-            'id': router_id,
-            l3_constants.INTERFACE_KEY: int_ports,
-            'routes': [],
-            'gw_port': ex_gw_port}
-        if enable_snat is not None:
-            router['enable_snat'] = enable_snat
-
-        ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
-                                 router=router)
-        return ri
-
-    def _add_firewall_rules(self, fw, rule_count=1):
-        rules = []
-        for i in range(rule_count):
-            rule = {'id': _uuid(),
-                    'enabled': True,
-                    'action': 'deny' if (i % 2 == 0) else 'allow',
-                    'ip_version': 4,
-                    'protocol': 'tcp',
-                    'source_ip_address': '10.0.0.%s/24' % (100 + i),
-                    'destination_port': '%s' % (100 + i)}
-            rules.append(rule)
-        fw['firewall_rule_list'] = rules
-
-    def _prepare_firewall_data(self):
-        fw = {'id': _uuid(),
-              'admin_state_up': True,
-              'firewall_rule_list': []}
-        return fw
-
-    def test_firewall_without_rule(self):
-        router = self._create_router()
-        fwaas = self._create_fwaas()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        self._add_internal_ports(ri.router, port_count=1)
-        self._add_floating_ips(ri.router, port_count=1)
-        router._router_added(ri.router['id'], ri.router)
-
-        rl = [ri]
-
-        fw = self._prepare_firewall_data()
-        fwaas.create_firewall(rl, fw)
-
-        url = varmour_utils.REST_URL_CONF_POLICY
-        prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
-
-        n = fwaas.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0)
-
-        fwaas.delete_firewall(rl, fw)
-        n = fwaas.rest.count_cfg_objs(url, prefix)
-        self.assertEqual(n, 0)
-
-        router._router_removed(ri.router['id'])
-
-    def test_firewall_with_rules(self):
-        router = self._create_router()
-        fwaas = self._create_fwaas()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        self._add_internal_ports(ri.router, port_count=1)
-        self._add_floating_ips(ri.router, port_count=1)
-        router._router_added(ri.router['id'], ri.router)
-
-        rl = [ri]
-
-        fw = self._prepare_firewall_data()
-        self._add_firewall_rules(fw, 2)
-        fwaas.create_firewall(rl, fw)
-
-        prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
-        pol_url = varmour_utils.REST_URL_CONF_POLICY
-        serv_url = varmour_utils.REST_URL_CONF_SERVICE
-        addr_url = varmour_utils.REST_URL_CONF_ADDR
-
-        # 3x number of policies
-        n = fwaas.rest.count_cfg_objs(pol_url, prefix)
-        self.assertEqual(n, 6)
-        n = fwaas.rest.count_cfg_objs(addr_url, prefix)
-        self.assertEqual(n, 2)
-        n = fwaas.rest.count_cfg_objs(serv_url, prefix)
-        self.assertEqual(n, 2)
-
-        fwaas.delete_firewall(rl, fw)
-        n = fwaas.rest.count_cfg_objs(pol_url, prefix)
-        self.assertEqual(n, 0)
-
-        router._router_removed(ri.router['id'])
-
-    def test_firewall_add_remove_rules(self):
-        router = self._create_router()
-        fwaas = self._create_fwaas()
-        try:
-            router.rest.auth()
-        except Exception:
-            # skip the test, firewall is not deployed
-            return
-
-        ri = self._prepare_router_data(enable_snat=True)
-        self._add_internal_ports(ri.router, port_count=1)
-        self._add_floating_ips(ri.router, port_count=1)
-        router._router_added(ri.router['id'], ri.router)
-
-        rl = [ri]
-
-        fw = self._prepare_firewall_data()
-        self._add_firewall_rules(fw, 2)
-        fwaas.create_firewall(rl, fw)
-
-        prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
-        pol_url = varmour_utils.REST_URL_CONF_POLICY
-        serv_url = varmour_utils.REST_URL_CONF_SERVICE
-        addr_url = varmour_utils.REST_URL_CONF_ADDR
-
-        # 3x number of policies
-        n = fwaas.rest.count_cfg_objs(pol_url, prefix)
-        self.assertEqual(n, 6)
-        n = fwaas.rest.count_cfg_objs(addr_url, prefix)
-        self.assertEqual(n, 2)
-        n = fwaas.rest.count_cfg_objs(serv_url, prefix)
-        self.assertEqual(n, 2)
-
-        self._add_firewall_rules(fw, 1)
-        fwaas.create_firewall(rl, fw)
-        n = fwaas.rest.count_cfg_objs(pol_url, prefix)
-        self.assertEqual(n, 3)
-        n = fwaas.rest.count_cfg_objs(addr_url, prefix)
-        self.assertEqual(n, 1)
-        n = fwaas.rest.count_cfg_objs(serv_url, prefix)
-        self.assertEqual(n, 1)
-
-        fwaas.delete_firewall(rl, fw)
-        n = fwaas.rest.count_cfg_objs(pol_url, prefix)
-        self.assertEqual(n, 0)
-
-        router._router_removed(ri.router['id'])
diff --git a/neutron/tests/unit/services/firewall/test_fwaas_plugin.py b/neutron/tests/unit/services/firewall/test_fwaas_plugin.py
deleted file mode 100644 (file)
index dd162f0..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright 2013 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License"); you may
-#  not use this file except in compliance with the License. You may obtain
-#  a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#  License for the specific language governing permissions and limitations
-#  under the License.
-
-
-import contextlib
-
-import mock
-from webob import exc
-
-from neutron import context
-from neutron.extensions import firewall
-from neutron.plugins.common import constants as const
-from neutron.services.firewall import fwaas_plugin
-from neutron.tests import base
-from neutron.tests.unit.db.firewall import test_db_firewall
-
-
-FW_PLUGIN_KLASS = (
-    "neutron.services.firewall.fwaas_plugin.FirewallPlugin"
-)
-
-
-class TestFirewallCallbacks(test_db_firewall.FirewallPluginDbTestCase):
-
-    def setUp(self):
-        super(TestFirewallCallbacks,
-              self).setUp(fw_plugin=FW_PLUGIN_KLASS)
-        self.callbacks = self.plugin.endpoints[0]
-
-    def test_set_firewall_status(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ) as fw:
-                fw_id = fw['firewall']['id']
-                res = self.callbacks.set_firewall_status(ctx, fw_id,
-                                                         const.ACTIVE,
-                                                         host='dummy')
-                fw_db = self.plugin.get_firewall(ctx, fw_id)
-                self.assertEqual(fw_db['status'], const.ACTIVE)
-                self.assertTrue(res)
-                res = self.callbacks.set_firewall_status(ctx, fw_id,
-                                                         const.ERROR)
-                fw_db = self.plugin.get_firewall(ctx, fw_id)
-                self.assertEqual(fw_db['status'], const.ERROR)
-                self.assertFalse(res)
-
-    def test_set_firewall_status_pending_delete(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ) as fw:
-                fw_id = fw['firewall']['id']
-                fw_db = self.plugin._get_firewall(ctx, fw_id)
-                fw_db['status'] = const.PENDING_DELETE
-                ctx.session.flush()
-                res = self.callbacks.set_firewall_status(ctx, fw_id,
-                                                         const.ACTIVE,
-                                                         host='dummy')
-                fw_db = self.plugin.get_firewall(ctx, fw_id)
-                self.assertEqual(fw_db['status'], const.PENDING_DELETE)
-                self.assertFalse(res)
-
-    def test_firewall_deleted(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(firewall_policy_id=fwp_id,
-                               admin_state_up=test_db_firewall.ADMIN_STATE_UP,
-                               do_delete=False) as fw:
-                fw_id = fw['firewall']['id']
-                with ctx.session.begin(subtransactions=True):
-                    fw_db = self.plugin._get_firewall(ctx, fw_id)
-                    fw_db['status'] = const.PENDING_DELETE
-                    ctx.session.flush()
-                    res = self.callbacks.firewall_deleted(ctx, fw_id,
-                                                          host='dummy')
-                    self.assertTrue(res)
-                    self.assertRaises(firewall.FirewallNotFound,
-                                      self.plugin.get_firewall,
-                                      ctx, fw_id)
-
-    def test_firewall_deleted_error(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP,
-            ) as fw:
-                fw_id = fw['firewall']['id']
-                res = self.callbacks.firewall_deleted(ctx, fw_id,
-                                                      host='dummy')
-                self.assertFalse(res)
-                fw_db = self.plugin._get_firewall(ctx, fw_id)
-                self.assertEqual(fw_db['status'], const.ERROR)
-
-    def test_get_firewall_for_tenant(self):
-        tenant_id = 'test-tenant'
-        ctx = context.Context('', tenant_id)
-        with contextlib.nested(self.firewall_rule(name='fwr1',
-                                                  tenant_id=tenant_id),
-                               self.firewall_rule(name='fwr2',
-                                                  tenant_id=tenant_id),
-                               self.firewall_rule(name='fwr3',
-                                                  tenant_id=tenant_id)
-                               ) as fr:
-            with self.firewall_policy(tenant_id=tenant_id) as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                res = req.get_response(self.ext_api)
-                attrs = self._get_test_firewall_attrs()
-                attrs['firewall_policy_id'] = fwp_id
-                with self.firewall(
-                        firewall_policy_id=fwp_id,
-                        tenant_id=tenant_id,
-                        admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw:
-                    fw_id = fw['firewall']['id']
-                    res = self.callbacks.get_firewalls_for_tenant(ctx,
-                                                                  host='dummy')
-                    fw_rules = (
-                        self.plugin._make_firewall_dict_with_rules(ctx,
-                                                                   fw_id)
-                    )
-                    self.assertEqual(res[0], fw_rules)
-                    self._compare_firewall_rule_lists(
-                        fwp_id, fr, res[0]['firewall_rule_list'])
-
-    def test_get_firewall_for_tenant_without_rules(self):
-        tenant_id = 'test-tenant'
-        ctx = context.Context('', tenant_id)
-        with self.firewall_policy(tenant_id=tenant_id) as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs = self._get_test_firewall_attrs()
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(firewall_policy_id=fwp_id, tenant_id=tenant_id,
-                               admin_state_up=test_db_firewall.ADMIN_STATE_UP
-                               ) as fw:
-                    fw_list = [fw['firewall']]
-                    f = self.callbacks.get_firewalls_for_tenant_without_rules
-                    res = f(ctx, host='dummy')
-                    for fw in res:
-                        del fw['shared']
-                    self.assertEqual(res, fw_list)
-
-
-class TestFirewallAgentApi(base.BaseTestCase):
-    def setUp(self):
-        super(TestFirewallAgentApi, self).setUp()
-
-        self.api = fwaas_plugin.FirewallAgentApi('topic', 'host')
-
-    def test_init(self):
-        self.assertEqual(self.api.client.target.topic, 'topic')
-        self.assertEqual(self.api.host, 'host')
-
-    def _call_test_helper(self, method_name):
-        with contextlib.nested(
-            mock.patch.object(self.api.client, 'cast'),
-            mock.patch.object(self.api.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.api.client
-            getattr(self.api, method_name)(mock.sentinel.context, 'test')
-
-        prepare_args = {'fanout': True}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
-                                         firewall='test', host='host')
-
-    def test_create_firewall(self):
-        self._call_test_helper('create_firewall')
-
-    def test_update_firewall(self):
-        self._call_test_helper('update_firewall')
-
-    def test_delete_firewall(self):
-        self._call_test_helper('delete_firewall')
-
-
-class TestFirewallPluginBase(test_db_firewall.TestFirewallDBPlugin):
-
-    def setUp(self):
-        super(TestFirewallPluginBase, self).setUp(fw_plugin=FW_PLUGIN_KLASS)
-        self.callbacks = self.plugin.endpoints[0]
-
-    def test_create_second_firewall_not_permitted(self):
-        with self.firewall():
-            res = self._create_firewall(
-                None, 'firewall2', description='test',
-                firewall_policy_id=None, admin_state_up=True)
-            self.assertEqual(res.status_int, exc.HTTPConflict.code)
-
-    def test_create_firewall_admin_not_affected_by_other_tenant(self):
-        # Create fw with admin after creating fw with other tenant
-        with self.firewall(tenant_id='other-tenant') as fw1:
-            with self.firewall() as fw2:
-                self.assertEqual('other-tenant', fw1['firewall']['tenant_id'])
-                self.assertEqual(self._tenant_id, fw2['firewall']['tenant_id'])
-
-    def test_update_firewall(self):
-        ctx = context.get_admin_context()
-        name = "new_firewall1"
-        attrs = self._get_test_firewall_attrs(name)
-
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ) as firewall:
-                fw_id = firewall['firewall']['id']
-                res = self.callbacks.set_firewall_status(ctx, fw_id,
-                                                         const.ACTIVE)
-                data = {'firewall': {'name': name}}
-                req = self.new_update_request('firewalls', data, fw_id)
-                res = self.deserialize(self.fmt,
-                                       req.get_response(self.ext_api))
-                attrs = self._replace_firewall_status(attrs,
-                                                      const.PENDING_CREATE,
-                                                      const.PENDING_UPDATE)
-                for k, v in attrs.iteritems():
-                    self.assertEqual(res['firewall'][k], v)
-
-    def test_update_firewall_fails_when_firewall_pending(self):
-        name = "new_firewall1"
-        attrs = self._get_test_firewall_attrs(name)
-
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ) as firewall:
-                fw_id = firewall['firewall']['id']
-                data = {'firewall': {'name': name}}
-                req = self.new_update_request('firewalls', data, fw_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, exc.HTTPConflict.code)
-
-    def test_update_firewall_shared_fails_for_non_admin(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP,
-                tenant_id='noadmin'
-            ) as firewall:
-                fw_id = firewall['firewall']['id']
-                self.callbacks.set_firewall_status(ctx, fw_id,
-                                                   const.ACTIVE)
-                data = {'firewall': {'shared': True}}
-                req = self.new_update_request(
-                    'firewalls', data, fw_id,
-                    context=context.Context('', 'noadmin'))
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, exc.HTTPForbidden.code)
-
-    def test_update_firewall_policy_fails_when_firewall_pending(self):
-        name = "new_firewall1"
-        attrs = self._get_test_firewall_attrs(name)
-
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ):
-                data = {'firewall_policy': {'name': name}}
-                req = self.new_update_request('firewall_policies',
-                                              data, fwp_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, exc.HTTPConflict.code)
-
-    def test_update_firewall_rule_fails_when_firewall_pending(self):
-        with self.firewall_rule(name='fwr1') as fr:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                fr_id = fr['firewall_rule']['id']
-                fw_rule_ids = [fr_id]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                with self.firewall(
-                    firewall_policy_id=fwp_id,
-                    admin_state_up=test_db_firewall.ADMIN_STATE_UP
-                ):
-                    data = {'firewall_rule': {'protocol': 'udp'}}
-                    req = self.new_update_request('firewall_rules',
-                                                  data, fr_id)
-                    res = req.get_response(self.ext_api)
-                    self.assertEqual(res.status_int, exc.HTTPConflict.code)
-
-    def test_delete_firewall(self):
-        ctx = context.get_admin_context()
-        attrs = self._get_test_firewall_attrs()
-        # stop the AgentRPC patch for this one to test pending states
-        self.agentapi_delf_p.stop()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            attrs['firewall_policy_id'] = fwp_id
-            with self.firewall(
-                firewall_policy_id=fwp_id,
-                admin_state_up=test_db_firewall.ADMIN_STATE_UP
-            ) as firewall:
-                fw_id = firewall['firewall']['id']
-                attrs = self._replace_firewall_status(attrs,
-                                                      const.PENDING_CREATE,
-                                                      const.PENDING_DELETE)
-                req = self.new_delete_request('firewalls', fw_id)
-                req.get_response(self.ext_api)
-                fw_db = self.plugin._get_firewall(ctx, fw_id)
-                for k, v in attrs.iteritems():
-                    self.assertEqual(fw_db[k], v)
-            # cleanup the pending firewall
-            self.plugin.endpoints[0].firewall_deleted(ctx, fw_id)
-
-    def test_delete_firewall_after_agent_delete(self):
-        ctx = context.get_admin_context()
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(firewall_policy_id=fwp_id,
-                               do_delete=False) as fw:
-                fw_id = fw['firewall']['id']
-                req = self.new_delete_request('firewalls', fw_id)
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-                self.assertRaises(firewall.FirewallNotFound,
-                                  self.plugin.get_firewall,
-                                  ctx, fw_id)
-
-    def test_make_firewall_dict_with_in_place_rules(self):
-        ctx = context.get_admin_context()
-        with contextlib.nested(self.firewall_rule(name='fwr1'),
-                               self.firewall_rule(name='fwr2'),
-                               self.firewall_rule(name='fwr3')) as fr:
-            with self.firewall_policy() as fwp:
-                fwp_id = fwp['firewall_policy']['id']
-                fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
-                data = {'firewall_policy':
-                        {'firewall_rules': fw_rule_ids}}
-                req = self.new_update_request('firewall_policies', data,
-                                              fwp_id)
-                req.get_response(self.ext_api)
-                attrs = self._get_test_firewall_attrs()
-                attrs['firewall_policy_id'] = fwp_id
-                with self.firewall(
-                    firewall_policy_id=fwp_id,
-                    admin_state_up=test_db_firewall.ADMIN_STATE_UP
-                ) as fw:
-                    fw_id = fw['firewall']['id']
-                    fw_rules = (
-                        self.plugin._make_firewall_dict_with_rules(ctx,
-                                                                   fw_id)
-                    )
-                    self.assertEqual(fw_rules['id'], fw_id)
-                    self._compare_firewall_rule_lists(
-                        fwp_id, fr, fw_rules['firewall_rule_list'])
-
-    def test_make_firewall_dict_with_in_place_rules_no_policy(self):
-        ctx = context.get_admin_context()
-        with self.firewall() as fw:
-            fw_id = fw['firewall']['id']
-            fw_rules = self.plugin._make_firewall_dict_with_rules(ctx, fw_id)
-            self.assertEqual(fw_rules['firewall_rule_list'], [])
-
-    def test_list_firewalls(self):
-        with self.firewall_policy() as fwp:
-            fwp_id = fwp['firewall_policy']['id']
-            with self.firewall(name='fw1', firewall_policy_id=fwp_id,
-                               description='fw') as fwalls:
-                self._test_list_resources('firewall', [fwalls],
-                                          query_params='description=fw')
diff --git a/neutron/tests/unit/services/loadbalancer/agent/__init__.py b/neutron/tests/unit/services/loadbalancer/agent/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_agent.py b/neutron/tests/unit/services/loadbalancer/agent/test_agent.py
deleted file mode 100644 (file)
index ae1afe3..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import mock
-from oslo.config import cfg
-
-from neutron.services.loadbalancer.agent import agent
-from neutron.tests import base
-
-
-class TestLbaasService(base.BaseTestCase):
-    def test_start(self):
-        with mock.patch.object(
-            agent.n_rpc.Service, 'start'
-        ) as mock_start:
-
-            mgr = mock.Mock()
-            cfg.CONF.periodic_interval = mock.Mock(return_value=10)
-            agent_service = agent.LbaasAgentService('host', 'topic', mgr)
-            agent_service.start()
-
-            self.assertTrue(mock_start.called)
-
-    def test_main(self):
-        logging_str = 'neutron.agent.common.config.setup_logging'
-        with contextlib.nested(
-            mock.patch(logging_str),
-            mock.patch.object(agent.service, 'launch'),
-            mock.patch('sys.argv'),
-            mock.patch.object(agent.manager, 'LbaasAgentManager'),
-            mock.patch.object(cfg.CONF, 'register_opts')
-        ) as (mock_logging, mock_launch, sys_argv, mgr_cls, ro):
-            agent.main()
-
-            mock_launch.assert_called_once_with(mock.ANY)
diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py b/neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py
deleted file mode 100644 (file)
index 8f63b62..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.agent import agent_manager as manager
-from neutron.tests import base
-
-
-class TestManager(base.BaseTestCase):
-    def setUp(self):
-        super(TestManager, self).setUp()
-
-        mock_conf = mock.Mock()
-        mock_conf.device_driver = ['devdriver']
-
-        self.mock_importer = mock.patch.object(manager, 'importutils').start()
-
-        rpc_mock_cls = mock.patch(
-            'neutron.services.loadbalancer.agent.agent_api.LbaasAgentApi'
-        ).start()
-
-        # disable setting up periodic state reporting
-        mock_conf.AGENT.report_interval = 0
-
-        self.mgr = manager.LbaasAgentManager(mock_conf)
-        self.rpc_mock = rpc_mock_cls.return_value
-        self.log = mock.patch.object(manager, 'LOG').start()
-        self.driver_mock = mock.Mock()
-        self.mgr.device_drivers = {'devdriver': self.driver_mock}
-        self.mgr.instance_mapping = {'1': 'devdriver', '2': 'devdriver'}
-        self.mgr.needs_resync = False
-
-    def test_initialize_service_hook(self):
-        with mock.patch.object(self.mgr, 'sync_state') as sync:
-            self.mgr.initialize_service_hook(mock.Mock())
-            sync.assert_called_once_with()
-
-    def test_periodic_resync_needs_sync(self):
-        with mock.patch.object(self.mgr, 'sync_state') as sync:
-            self.mgr.needs_resync = True
-            self.mgr.periodic_resync(mock.Mock())
-            sync.assert_called_once_with()
-
-    def test_periodic_resync_no_sync(self):
-        with mock.patch.object(self.mgr, 'sync_state') as sync:
-            self.mgr.needs_resync = False
-            self.mgr.periodic_resync(mock.Mock())
-            self.assertFalse(sync.called)
-
-    def test_collect_stats(self):
-        self.mgr.collect_stats(mock.Mock())
-        self.rpc_mock.update_pool_stats.assert_has_calls([
-            mock.call('1', mock.ANY),
-            mock.call('2', mock.ANY)
-        ], any_order=True)
-
-    def test_collect_stats_exception(self):
-        self.driver_mock.get_stats.side_effect = Exception
-
-        self.mgr.collect_stats(mock.Mock())
-
-        self.assertFalse(self.rpc_mock.called)
-        self.assertTrue(self.mgr.needs_resync)
-        self.assertTrue(self.log.exception.called)
-
-    def _sync_state_helper(self, ready, reloaded, destroyed):
-        with contextlib.nested(
-            mock.patch.object(self.mgr, '_reload_pool'),
-            mock.patch.object(self.mgr, '_destroy_pool')
-        ) as (reload, destroy):
-
-            self.rpc_mock.get_ready_devices.return_value = ready
-
-            self.mgr.sync_state()
-
-            self.assertEqual(len(reloaded), len(reload.mock_calls))
-            self.assertEqual(len(destroyed), len(destroy.mock_calls))
-
-            reload.assert_has_calls([mock.call(i) for i in reloaded],
-                                    any_order=True)
-            destroy.assert_has_calls([mock.call(i) for i in destroyed],
-                                     any_order=True)
-            self.assertFalse(self.mgr.needs_resync)
-
-    def test_sync_state_all_known(self):
-        self._sync_state_helper(['1', '2'], ['1', '2'], [])
-
-    def test_sync_state_all_unknown(self):
-        self.mgr.instance_mapping = {}
-        self._sync_state_helper(['1', '2'], ['1', '2'], [])
-
-    def test_sync_state_destroy_all(self):
-        self._sync_state_helper([], [], ['1', '2'])
-
-    def test_sync_state_both(self):
-        self.mgr.instance_mapping = {'1': 'devdriver'}
-        self._sync_state_helper(['2'], ['2'], ['1'])
-
-    def test_sync_state_exception(self):
-        self.rpc_mock.get_ready_devices.side_effect = Exception
-
-        self.mgr.sync_state()
-
-        self.assertTrue(self.log.exception.called)
-        self.assertTrue(self.mgr.needs_resync)
-
-    def test_reload_pool(self):
-        config = {'driver': 'devdriver'}
-        self.rpc_mock.get_logical_device.return_value = config
-        pool_id = 'new_id'
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-
-        self.mgr._reload_pool(pool_id)
-
-        self.driver_mock.deploy_instance.assert_called_once_with(config)
-        self.assertIn(pool_id, self.mgr.instance_mapping)
-        self.rpc_mock.pool_deployed.assert_called_once_with(pool_id)
-
-    def test_reload_pool_driver_not_found(self):
-        config = {'driver': 'unknown_driver'}
-        self.rpc_mock.get_logical_device.return_value = config
-        pool_id = 'new_id'
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-
-        self.mgr._reload_pool(pool_id)
-
-        self.assertTrue(self.log.error.called)
-        self.assertFalse(self.driver_mock.deploy_instance.called)
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-        self.assertFalse(self.rpc_mock.pool_deployed.called)
-
-    def test_reload_pool_exception_on_driver(self):
-        config = {'driver': 'devdriver'}
-        self.rpc_mock.get_logical_device.return_value = config
-        self.driver_mock.deploy_instance.side_effect = Exception
-        pool_id = 'new_id'
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-
-        self.mgr._reload_pool(pool_id)
-
-        self.driver_mock.deploy_instance.assert_called_once_with(config)
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-        self.assertFalse(self.rpc_mock.pool_deployed.called)
-        self.assertTrue(self.log.exception.called)
-        self.assertTrue(self.mgr.needs_resync)
-
-    def test_destroy_pool(self):
-        pool_id = '1'
-        self.assertIn(pool_id, self.mgr.instance_mapping)
-
-        self.mgr._destroy_pool(pool_id)
-
-        self.driver_mock.undeploy_instance.assert_called_once_with(pool_id)
-        self.assertNotIn(pool_id, self.mgr.instance_mapping)
-        self.rpc_mock.pool_destroyed.assert_called_once_with(pool_id)
-        self.assertFalse(self.mgr.needs_resync)
-
-    def test_destroy_pool_exception_on_driver(self):
-        pool_id = '1'
-        self.assertIn(pool_id, self.mgr.instance_mapping)
-        self.driver_mock.undeploy_instance.side_effect = Exception
-
-        self.mgr._destroy_pool(pool_id)
-
-        self.driver_mock.undeploy_instance.assert_called_once_with(pool_id)
-        self.assertIn(pool_id, self.mgr.instance_mapping)
-        self.assertFalse(self.rpc_mock.pool_destroyed.called)
-        self.assertTrue(self.log.exception.called)
-        self.assertTrue(self.mgr.needs_resync)
-
-    def test_get_driver_unknown_device(self):
-        self.assertRaises(manager.DeviceNotFoundOnAgent,
-                          self.mgr._get_driver, 'unknown')
-
-    def test_remove_orphans(self):
-        self.mgr.remove_orphans()
-        orphans = {'1': "Fake", '2': "Fake"}
-        self.driver_mock.remove_orphans.assert_called_once_with(orphans.keys())
-
-    def test_create_vip(self):
-        vip = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.create_vip(mock.Mock(), vip)
-        self.driver_mock.create_vip.assert_called_once_with(vip)
-        self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
-                                                            constants.ACTIVE)
-
-    def test_create_vip_failed(self):
-        vip = {'id': 'id1', 'pool_id': '1'}
-        self.driver_mock.create_vip.side_effect = Exception
-        self.mgr.create_vip(mock.Mock(), vip)
-        self.driver_mock.create_vip.assert_called_once_with(vip)
-        self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
-                                                            constants.ERROR)
-
-    def test_update_vip(self):
-        old_vip = {'id': 'id1'}
-        vip = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.update_vip(mock.Mock(), old_vip, vip)
-        self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
-        self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
-                                                            constants.ACTIVE)
-
-    def test_update_vip_failed(self):
-        old_vip = {'id': 'id1'}
-        vip = {'id': 'id1', 'pool_id': '1'}
-        self.driver_mock.update_vip.side_effect = Exception
-        self.mgr.update_vip(mock.Mock(), old_vip, vip)
-        self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
-        self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
-                                                            constants.ERROR)
-
-    def test_delete_vip(self):
-        vip = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.delete_vip(mock.Mock(), vip)
-        self.driver_mock.delete_vip.assert_called_once_with(vip)
-
-    def test_create_pool(self):
-        pool = {'id': 'id1'}
-        self.assertNotIn(pool['id'], self.mgr.instance_mapping)
-        self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
-        self.driver_mock.create_pool.assert_called_once_with(pool)
-        self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
-                                                            constants.ACTIVE)
-        self.assertIn(pool['id'], self.mgr.instance_mapping)
-
-    def test_create_pool_failed(self):
-        pool = {'id': 'id1'}
-        self.assertNotIn(pool['id'], self.mgr.instance_mapping)
-        self.driver_mock.create_pool.side_effect = Exception
-        self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
-        self.driver_mock.create_pool.assert_called_once_with(pool)
-        self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
-                                                            constants.ERROR)
-        self.assertNotIn(pool['id'], self.mgr.instance_mapping)
-
-    def test_update_pool(self):
-        old_pool = {'id': '1'}
-        pool = {'id': '1'}
-        self.mgr.update_pool(mock.Mock(), old_pool, pool)
-        self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
-        self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
-                                                            constants.ACTIVE)
-
-    def test_update_pool_failed(self):
-        old_pool = {'id': '1'}
-        pool = {'id': '1'}
-        self.driver_mock.update_pool.side_effect = Exception
-        self.mgr.update_pool(mock.Mock(), old_pool, pool)
-        self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
-        self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
-                                                            constants.ERROR)
-
-    def test_delete_pool(self):
-        pool = {'id': '1'}
-        self.assertIn(pool['id'], self.mgr.instance_mapping)
-        self.mgr.delete_pool(mock.Mock(), pool)
-        self.driver_mock.delete_pool.assert_called_once_with(pool)
-        self.assertNotIn(pool['id'], self.mgr.instance_mapping)
-
-    def test_create_member(self):
-        member = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.create_member(mock.Mock(), member)
-        self.driver_mock.create_member.assert_called_once_with(member)
-        self.rpc_mock.update_status.assert_called_once_with('member',
-                                                            member['id'],
-                                                            constants.ACTIVE)
-
-    def test_create_member_failed(self):
-        member = {'id': 'id1', 'pool_id': '1'}
-        self.driver_mock.create_member.side_effect = Exception
-        self.mgr.create_member(mock.Mock(), member)
-        self.driver_mock.create_member.assert_called_once_with(member)
-        self.rpc_mock.update_status.assert_called_once_with('member',
-                                                            member['id'],
-                                                            constants.ERROR)
-
-    def test_update_member(self):
-        old_member = {'id': 'id1'}
-        member = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.update_member(mock.Mock(), old_member, member)
-        self.driver_mock.update_member.assert_called_once_with(old_member,
-                                                               member)
-        self.rpc_mock.update_status.assert_called_once_with('member',
-                                                            member['id'],
-                                                            constants.ACTIVE)
-
-    def test_update_member_failed(self):
-        old_member = {'id': 'id1'}
-        member = {'id': 'id1', 'pool_id': '1'}
-        self.driver_mock.update_member.side_effect = Exception
-        self.mgr.update_member(mock.Mock(), old_member, member)
-        self.driver_mock.update_member.assert_called_once_with(old_member,
-                                                               member)
-        self.rpc_mock.update_status.assert_called_once_with('member',
-                                                            member['id'],
-                                                            constants.ERROR)
-
-    def test_delete_member(self):
-        member = {'id': 'id1', 'pool_id': '1'}
-        self.mgr.delete_member(mock.Mock(), member)
-        self.driver_mock.delete_member.assert_called_once_with(member)
-
-    def test_create_monitor(self):
-        monitor = {'id': 'id1'}
-        assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
-        self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
-        self.driver_mock.create_pool_health_monitor.assert_called_once_with(
-            monitor, '1')
-        self.rpc_mock.update_status.assert_called_once_with('health_monitor',
-                                                            assoc_id,
-                                                            constants.ACTIVE)
-
-    def test_create_monitor_failed(self):
-        monitor = {'id': 'id1'}
-        assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
-        self.driver_mock.create_pool_health_monitor.side_effect = Exception
-        self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
-        self.driver_mock.create_pool_health_monitor.assert_called_once_with(
-            monitor, '1')
-        self.rpc_mock.update_status.assert_called_once_with('health_monitor',
-                                                            assoc_id,
-                                                            constants.ERROR)
-
-    def test_update_monitor(self):
-        monitor = {'id': 'id1'}
-        assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
-        self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
-        self.driver_mock.update_pool_health_monitor.assert_called_once_with(
-            monitor, monitor, '1')
-        self.rpc_mock.update_status.assert_called_once_with('health_monitor',
-                                                            assoc_id,
-                                                            constants.ACTIVE)
-
-    def test_update_monitor_failed(self):
-        monitor = {'id': 'id1'}
-        assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
-        self.driver_mock.update_pool_health_monitor.side_effect = Exception
-        self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
-        self.driver_mock.update_pool_health_monitor.assert_called_once_with(
-            monitor, monitor, '1')
-        self.rpc_mock.update_status.assert_called_once_with('health_monitor',
-                                                            assoc_id,
-                                                            constants.ERROR)
-
-    def test_delete_monitor(self):
-        monitor = {'id': 'id1'}
-        self.mgr.delete_pool_health_monitor(mock.Mock(), monitor, '1')
-        self.driver_mock.delete_pool_health_monitor.assert_called_once_with(
-            monitor, '1')
-
-    def test_agent_disabled(self):
-        payload = {'admin_state_up': False}
-        self.mgr.agent_updated(mock.Mock(), payload)
-        self.driver_mock.undeploy_instance.assert_has_calls(
-            [mock.call('1'), mock.call('2')], any_order=True)
diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_api.py b/neutron/tests/unit/services/loadbalancer/agent/test_api.py
deleted file mode 100644 (file)
index 332250a..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import copy
-import mock
-
-from neutron.services.loadbalancer.agent import agent_api as api
-from neutron.tests import base
-
-
-class TestApiCache(base.BaseTestCase):
-    def setUp(self):
-        super(TestApiCache, self).setUp()
-
-        self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host')
-
-    def test_init(self):
-        self.assertEqual(self.api.host, 'host')
-        self.assertEqual(self.api.context, mock.sentinel.context)
-
-    def _test_method(self, method, **kwargs):
-        add_host = ('get_ready_devices', 'plug_vip_port', 'unplug_vip_port',
-                    'update_pool_stats')
-        expected_kwargs = copy.copy(kwargs)
-        if method in add_host:
-            expected_kwargs['host'] = self.api.host
-
-        with contextlib.nested(
-            mock.patch.object(self.api.client, 'call'),
-            mock.patch.object(self.api.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.api.client
-            rpc_mock.return_value = 'foo'
-            rv = getattr(self.api, method)(**kwargs)
-
-        self.assertEqual(rv, 'foo')
-
-        prepare_args = {}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        rpc_mock.assert_called_once_with(mock.sentinel.context, method,
-                                         **expected_kwargs)
-
-    def test_get_ready_devices(self):
-        self._test_method('get_ready_devices')
-
-    def test_get_logical_device(self):
-        self._test_method('get_logical_device', pool_id='pool_id')
-
-    def test_pool_destroyed(self):
-        self._test_method('pool_destroyed', pool_id='pool_id')
-
-    def test_pool_deployed(self):
-        self._test_method('pool_deployed', pool_id='pool_id')
-
-    def test_update_status(self):
-        self._test_method('update_status', obj_type='type', obj_id='id',
-                          status='status')
-
-    def test_plug_vip_port(self):
-        self._test_method('plug_vip_port', port_id='port_id')
-
-    def test_unplug_vip_port(self):
-        self._test_method('unplug_vip_port', port_id='port_id')
-
-    def test_update_pool_stats(self):
-        self._test_method('update_pool_stats', pool_id='id', stats='stats')
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/a10networks/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/a10networks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.py b/neutron/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.py
deleted file mode 100644 (file)
index 7517c64..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-import mock
-
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db as lb_db
-with mock.patch.dict(sys.modules, {'a10_neutron_lbaas': mock.Mock()}):
-    from neutron.services.loadbalancer.drivers.a10networks import driver_v1
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-
-
-def fake_model(id):
-    return {
-        'id': id,
-        'tenant_id': "tennant-was-a-great-doctor"
-    }
-
-
-def fake_member(id):
-    return {
-        'id': id,
-        'tenant_id': "vippyvip",
-        'address': '1.1.1.1'
-    }
-
-
-class TestA10ThunderDriver(test_db_loadbalancer.LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        super(TestA10ThunderDriver, self).setUp()
-        self.context = context.get_admin_context()
-        self.plugin = mock.Mock()
-        self.driver = driver_v1.ThunderDriver(self.plugin)
-        self.driver.a10 = mock.Mock()
-        self.m = fake_model('p1')
-
-    def test__hm_binding_count(self):
-        n = self.driver._hm_binding_count(self.context, 'hm01')
-        self.assertEqual(n, 0)
-
-    def test__member_count(self):
-        self.m = fake_member('mem1')
-        n = self.driver._member_count(self.context, self.m)
-        self.assertEqual(n, 0)
-
-    def test__member_get_ip(self):
-        self.m = fake_member('mem1')
-        z = self.driver._member_get_ip(self.context, self.m, False)
-        self.assertEqual(z, '1.1.1.1')
-        z = self.driver._member_get_ip(self.context, self.m, True)
-        self.assertEqual(z, '1.1.1.1')
-
-    def test__pool_get_hm(self):
-        self.driver._pool_get_hm(self.context, 'hm01')
-        self.plugin.get_health_monitor.assert_called_once_with(
-            self.context, 'hm01')
-
-    def test__pool_get_tenant_id(self):
-        z = self.driver._pool_get_tenant_id(self.context, 'pool1')
-        self.assertEqual(z, '')
-
-    def test__pool_get_vip_id(self):
-        z = self.driver._pool_get_vip_id(self.context, 'pool1')
-        self.assertEqual(z, '')
-
-    def test__pool_total(self):
-        n = self.driver._pool_total(self.context,
-                                    tenant_id='whatareyoudoingdave')
-        self.assertEqual(n, 0)
-
-    def test__active(self):
-        self.driver._active(self.context, 'vip', 'vip1')
-        self.plugin.update_status.assert_called_once_with(
-            self.context, lb_db.Vip, 'vip1', 'ACTIVE')
-
-    def test__failed(self):
-        self.driver._failed(self.context, 'vip', 'vip2-1-2')
-        self.plugin.update_status.assert_called_once_with(
-            self.context, lb_db.Vip, 'vip2-1-2', 'ERROR')
-
-    def test__db_delete(self):
-        self.driver._db_delete(self.context, 'pool', 'myid0101')
-        self.plugin._delete_db_pool.assert_called_once_with(
-            self.context, 'myid0101')
-
-    def test__hm_active(self):
-        self.driver._hm_active(self.context, 'hm01', 'pool1')
-        self.plugin.update_pool_health_monitor.assert_called_once_with(
-            self.context, 'hm01', 'pool1', 'ACTIVE')
-
-    def test__hm_failed(self):
-        self.driver._hm_failed(self.context, 'hm01', 'pool1')
-        self.plugin.update_pool_health_monitor.assert_called_once_with(
-            self.context, 'hm01', 'pool1', 'ERROR')
-
-    def test__hm_db_delete(self):
-        self.driver._hm_db_delete(self.context, 'hm01', 'pool2')
-        self.plugin._delete_db_pool_health_monitor.assert_called_once_with(
-            self.context, 'hm01', 'pool2')
-
-    def test_create_vip(self):
-        self.driver.create_vip(self.context, self.m)
-        self.driver.a10.vip.create.assert_called_once_with(
-            self.context, self.m)
-
-    def test_update_vip(self):
-        self.driver.update_vip(self.context, self.m, self.m)
-        self.driver.a10.vip.update.assert_called_once_with(
-            self.context, self.m, self.m)
-
-    def test_delete_vip(self):
-        self.driver.delete_vip(self.context, self.m)
-        self.driver.a10.vip.delete.assert_called_once_with(
-            self.context, self.m)
-
-    def test_create_pool(self):
-        self.driver.create_pool(self.context, self.m)
-        self.driver.a10.pool.create.assert_called_once_with(
-            self.context, self.m)
-
-    def test_update_pool(self):
-        self.driver.update_pool(self.context, self.m, self.m)
-        self.driver.a10.pool.update.assert_called_once_with(
-            self.context, self.m, self.m)
-
-    def test_delete_pool(self):
-        self.driver.delete_pool(self.context, self.m)
-        self.driver.a10.pool.delete.assert_called_once_with(
-            self.context, self.m)
-
-    def test_stats(self):
-        self.driver.stats(self.context, self.m['id'])
-        self.driver.a10.pool.stats.assert_called_once_with(
-            self.context, self.m['id'])
-
-    def test_create_member(self):
-        self.driver.create_member(self.context, self.m)
-        self.driver.a10.member.create.assert_called_once_with(
-            self.context, self.m)
-
-    def test_update_member(self):
-        self.driver.update_member(self.context, self.m, self.m)
-        self.driver.a10.member.update.assert_called_once_with(
-            self.context, self.m, self.m)
-
-    def test_delete_member(self):
-        self.driver.delete_member(self.context, self.m)
-        self.driver.a10.member.delete.assert_called_once_with(
-            self.context, self.m)
-
-    def test_update_pool_health_monitor(self):
-        self.driver.update_pool_health_monitor(self.context, self.m, self.m,
-                                               'pool1')
-        self.driver.a10.hm.update.assert_called_once_with(
-            self.context, self.m, self.m, 'pool1')
-
-    def test_create_pool_health_monitor(self):
-        self.driver.create_pool_health_monitor(self.context, self.m, 'pool1')
-        self.driver.a10.hm.create.assert_called_once_with(
-            self.context, self.m, 'pool1')
-
-    def test_delete_pool_health_monitor(self):
-        self.driver.delete_pool_health_monitor(self.context, self.m, 'pool1')
-        self.driver.a10.hm.delete.assert_called_once_with(
-            self.context, self.m, 'pool1')
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py
deleted file mode 100644 (file)
index d061adc..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2013 Embrane, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-
-from neutron.services.loadbalancer.drivers.embrane import config  # noqa
-from neutron.tests import base
-
-
-class ConfigurationTest(base.BaseTestCase):
-
-    def test_defaults(self):
-        self.assertEqual('small', cfg.CONF.heleoslb.lb_flavor)
-        self.assertEqual(60, cfg.CONF.heleoslb.sync_interval)
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py
deleted file mode 100644 (file)
index 00771d7..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2013 Embrane, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-import mock
-from oslo.config import cfg
-from oslo.db import exception as n_exc
-
-from neutron import context
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-
-HELEOSAPIMOCK = mock.Mock()
-sys.modules["heleosapi"] = HELEOSAPIMOCK
-from neutron.services.loadbalancer.drivers.embrane import config  # noqa
-from neutron.services.loadbalancer.drivers.embrane import constants as h_con
-from neutron.services.loadbalancer.drivers.embrane import db as h_db
-# Stop the mock from persisting indefinitely in the global modules space
-del sys.modules["heleosapi"]
-
-EMBRANE_PROVIDER = ('LOADBALANCER:lbaas:neutron.services.'
-                    'loadbalancer.drivers.embrane.driver.'
-                    'EmbraneLbaas:default')
-
-
-class TestLoadBalancerPluginBase(
-        test_db_loadbalancer.LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        cfg.CONF.set_override('admin_password', "admin123", 'heleoslb')
-        cfg.CONF.set_override('sync_interval', 0, 'heleoslb')
-        mock.patch.dict(sys.modules, {'heleosapi': HELEOSAPIMOCK}).start()
-        super(TestLoadBalancerPluginBase, self).setUp(
-            lbaas_provider=EMBRANE_PROVIDER)
-        self.driver = self.plugin.drivers['lbaas']
-        # prevent module mock from saving calls between tests
-        self.addCleanup(HELEOSAPIMOCK.reset_mock)
-
-
-class TestLoadBalancerPlugin(test_db_loadbalancer.TestLoadBalancer,
-                             TestLoadBalancerPluginBase):
-
-    def test_create_vip_with_session_persistence_with_app_cookie(self):
-        self.skip("App cookie persistence not supported.")
-
-    def test_pool_port(self):
-        with self.port() as port:
-            with self.pool() as pool:
-                h_db.add_pool_port(context.get_admin_context(),
-                                   pool['pool']['id'], port['port']['id'])
-                pool_port = h_db.get_pool_port(context.get_admin_context(),
-                                               pool['pool']['id'])
-                self.assertIsNotNone(pool_port)
-            pool_port = h_db.get_pool_port(context.get_admin_context(),
-                                           pool['pool']['id'])
-            self.assertIsNone(pool_port)
-
-    def test_create_pool_port_no_port(self):
-        with self.pool() as pool:
-            self.assertRaises(n_exc.DBError,
-                              h_db.add_pool_port,
-                              context.get_admin_context(),
-                              pool['pool']['id'], None)
-
-    def test_lb_operations_handlers(self):
-        h = self.driver._dispatcher.handlers
-        self.assertIsNotNone(h[h_con.Events.ADD_OR_UPDATE_MEMBER])
-        self.assertIsNotNone(h[h_con.Events.CREATE_VIP])
-        self.assertIsNotNone(h[h_con.Events.DELETE_MEMBER])
-        self.assertIsNotNone(h[h_con.Events.DELETE_VIP])
-        self.assertIsNotNone(h[h_con.Events.POLL_GRAPH])
-        self.assertIsNotNone(h[h_con.Events.REMOVE_MEMBER])
-        self.assertIsNotNone(h[h_con.Events.UPDATE_POOL])
-        self.assertIsNotNone(h[h_con.Events.UPDATE_VIP])
-        self.assertIsNotNone(h[h_con.Events.UPDATE_POOL_HM])
-        self.assertIsNotNone(h[h_con.Events.DELETE_POOL_HM])
-        self.assertIsNotNone(h[h_con.Events.ADD_POOL_HM])
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py
deleted file mode 100644 (file)
index 1e38822..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-# Copyright 2013 Mirantis, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-
-from neutron.services.loadbalancer.drivers.haproxy import cfg
-from neutron.tests import base
-
-
-class TestHaproxyCfg(base.BaseTestCase):
-    def test_save_config(self):
-        with contextlib.nested(
-                mock.patch('neutron.services.loadbalancer.'
-                           'drivers.haproxy.cfg._build_global'),
-                mock.patch('neutron.services.loadbalancer.'
-                           'drivers.haproxy.cfg._build_defaults'),
-                mock.patch('neutron.services.loadbalancer.'
-                           'drivers.haproxy.cfg._build_frontend'),
-                mock.patch('neutron.services.loadbalancer.'
-                           'drivers.haproxy.cfg._build_backend'),
-                mock.patch('neutron.agent.linux.utils.replace_file')
-        ) as (b_g, b_d, b_f, b_b, replace):
-            test_config = ['globals', 'defaults', 'frontend', 'backend']
-            b_g.return_value = [test_config[0]]
-            b_d.return_value = [test_config[1]]
-            b_f.return_value = [test_config[2]]
-            b_b.return_value = [test_config[3]]
-
-            cfg.save_config('test_path', mock.Mock())
-            replace.assert_called_once_with('test_path',
-                                            '\n'.join(test_config))
-
-    def test_build_global(self):
-        expected_opts = ['global',
-                         '\tdaemon',
-                         '\tuser nobody',
-                         '\tgroup test_group',
-                         '\tlog /dev/log local0',
-                         '\tlog /dev/log local1 notice',
-                         '\tstats socket test_path mode 0666 level user']
-        opts = cfg._build_global(mock.Mock(), 'test_path', 'test_group')
-        self.assertEqual(expected_opts, list(opts))
-
-    def test_build_defaults(self):
-        expected_opts = ['defaults',
-                         '\tlog global',
-                         '\tretries 3',
-                         '\toption redispatch',
-                         '\ttimeout connect 5000',
-                         '\ttimeout client 50000',
-                         '\ttimeout server 50000']
-        opts = cfg._build_defaults(mock.Mock())
-        self.assertEqual(expected_opts, list(opts))
-
-    def test_build_frontend(self):
-        test_config = {'vip': {'id': 'vip_id',
-                               'protocol': 'HTTP',
-                               'port': {'fixed_ips': [
-                                   {'ip_address': '10.0.0.2'}]
-                               },
-                               'protocol_port': 80,
-                               'connection_limit': 2000,
-                               },
-                       'pool': {'id': 'pool_id'}}
-        expected_opts = ['frontend vip_id',
-                         '\toption tcplog',
-                         '\tbind 10.0.0.2:80',
-                         '\tmode http',
-                         '\tdefault_backend pool_id',
-                         '\tmaxconn 2000',
-                         '\toption forwardfor']
-        opts = cfg._build_frontend(test_config)
-        self.assertEqual(expected_opts, list(opts))
-
-        test_config['vip']['connection_limit'] = -1
-        expected_opts.remove('\tmaxconn 2000')
-        opts = cfg._build_frontend(test_config)
-        self.assertEqual(expected_opts, list(opts))
-
-    def test_build_backend(self):
-        test_config = {'pool': {'id': 'pool_id',
-                                'protocol': 'HTTP',
-                                'lb_method': 'ROUND_ROBIN'},
-                       'members': [{'status': 'ACTIVE',
-                                    'admin_state_up': True,
-                                    'id': 'member1_id',
-                                    'address': '10.0.0.3',
-                                    'protocol_port': 80,
-                                    'weight': 1},
-                                   {'status': 'INACTIVE',
-                                    'admin_state_up': True,
-                                    'id': 'member2_id',
-                                    'address': '10.0.0.4',
-                                    'protocol_port': 80,
-                                    'weight': 1},
-                                   {'status': 'PENDING_CREATE',
-                                    'admin_state_up': True,
-                                    'id': 'member3_id',
-                                    'address': '10.0.0.5',
-                                    'protocol_port': 80,
-                                    'weight': 1}],
-                       'healthmonitors': [{'admin_state_up': True,
-                                           'delay': 3,
-                                           'max_retries': 4,
-                                           'timeout': 2,
-                                           'type': 'TCP'}],
-                       'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
-        expected_opts = ['backend pool_id',
-                         '\tmode http',
-                         '\tbalance roundrobin',
-                         '\toption forwardfor',
-                         '\ttimeout check 2s',
-                         '\tcookie SRV insert indirect nocache',
-                         '\tserver member1_id 10.0.0.3:80 weight 1 '
-                         'check inter 3s fall 4 cookie 0',
-                         '\tserver member2_id 10.0.0.4:80 weight 1 '
-                         'check inter 3s fall 4 cookie 1',
-                         '\tserver member3_id 10.0.0.5:80 weight 1 '
-                         'check inter 3s fall 4 cookie 2']
-        opts = cfg._build_backend(test_config)
-        self.assertEqual(expected_opts, list(opts))
-
-    def test_get_server_health_option(self):
-        test_config = {'healthmonitors': [{'admin_state_up': False,
-                                           'delay': 3,
-                                           'max_retries': 4,
-                                           'timeout': 2,
-                                           'type': 'TCP',
-                                           'http_method': 'GET',
-                                           'url_path': '/',
-                                           'expected_codes': '200'}]}
-        self.assertEqual(('', []), cfg._get_server_health_option(test_config))
-
-        self.assertEqual(('', []), cfg._get_server_health_option(test_config))
-
-        test_config['healthmonitors'][0]['admin_state_up'] = True
-        expected = (' check inter 3s fall 4', ['timeout check 2s'])
-        self.assertEqual(expected, cfg._get_server_health_option(test_config))
-
-        test_config['healthmonitors'][0]['type'] = 'HTTPS'
-        expected = (' check inter 3s fall 4',
-                    ['timeout check 2s',
-                     'option httpchk GET /',
-                     'http-check expect rstatus 200',
-                     'option ssl-hello-chk'])
-        self.assertEqual(expected, cfg._get_server_health_option(test_config))
-
-    def test_has_http_cookie_persistence(self):
-        config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
-        self.assertTrue(cfg._has_http_cookie_persistence(config))
-
-        config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
-        self.assertFalse(cfg._has_http_cookie_persistence(config))
-
-        config = {'vip': {'session_persistence': {}}}
-        self.assertFalse(cfg._has_http_cookie_persistence(config))
-
-    def test_get_session_persistence(self):
-        config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}}
-        self.assertEqual(cfg._get_session_persistence(config),
-                         ['stick-table type ip size 10k', 'stick on src'])
-
-        config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}},
-                  'members': []}
-        self.assertEqual([], cfg._get_session_persistence(config))
-
-        config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}}
-        self.assertEqual([], cfg._get_session_persistence(config))
-
-        config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}},
-                  'members': [{'id': 'member1_id'}]}
-        self.assertEqual(cfg._get_session_persistence(config),
-                         ['cookie SRV insert indirect nocache'])
-
-        config = {'vip': {'session_persistence': {'type': 'APP_COOKIE',
-                                                  'cookie_name': 'test'}}}
-        self.assertEqual(cfg._get_session_persistence(config),
-                         ['appsession test len 56 timeout 3h'])
-
-        config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}}
-        self.assertEqual(cfg._get_session_persistence(config), [])
-
-        config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}}
-        self.assertEqual(cfg._get_session_persistence(config), [])
-
-    def test_expand_expected_codes(self):
-        exp_codes = ''
-        self.assertEqual(cfg._expand_expected_codes(exp_codes), set([]))
-        exp_codes = '200'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200']))
-        exp_codes = '200, 201'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201']))
-        exp_codes = '200, 201,202'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201', '202']))
-        exp_codes = '200-202'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201', '202']))
-        exp_codes = '200-202, 205'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201', '202', '205']))
-        exp_codes = '200, 201-203'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201', '202', '203']))
-        exp_codes = '200, 201-203, 205'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes),
-                         set(['200', '201', '202', '203', '205']))
-        exp_codes = '201-200, 205'
-        self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205']))
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py
deleted file mode 100644 (file)
index 0f8a2fd..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-
-from neutron.common import exceptions
-from neutron.services.loadbalancer.drivers.haproxy import namespace_driver
-from neutron.tests import base
-
-
-class TestHaproxyNSDriver(base.BaseTestCase):
-    def setUp(self):
-        super(TestHaproxyNSDriver, self).setUp()
-
-        conf = mock.Mock()
-        conf.haproxy.loadbalancer_state_path = '/the/path'
-        conf.interface_driver = 'intdriver'
-        conf.haproxy.user_group = 'test_group'
-        conf.haproxy.send_gratuitous_arp = 3
-        conf.AGENT.root_helper = 'sudo_test'
-        self.conf = conf
-        self.mock_importer = mock.patch.object(namespace_driver,
-                                               'importutils').start()
-
-        self.rpc_mock = mock.Mock()
-        self.driver = namespace_driver.HaproxyNSDriver(
-            conf,
-            self.rpc_mock
-        )
-        self.vif_driver = mock.Mock()
-        self.driver.vif_driver = self.vif_driver
-
-        self.fake_config = {
-            'pool': {'id': 'pool_id', 'status': 'ACTIVE',
-                     'admin_state_up': True},
-            'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
-                    'status': 'ACTIVE', 'admin_state_up': True}
-        }
-
-    def test_get_name(self):
-        self.assertEqual(self.driver.get_name(), namespace_driver.DRIVER_NAME)
-
-    def test_create(self):
-        with mock.patch.object(self.driver, '_plug') as plug:
-            with mock.patch.object(self.driver, '_spawn') as spawn:
-                self.driver.create(self.fake_config)
-
-                plug.assert_called_once_with(
-                    'qlbaas-pool_id', {'id': 'port_id'}
-                )
-                spawn.assert_called_once_with(self.fake_config)
-
-    def test_update(self):
-        with contextlib.nested(
-            mock.patch.object(self.driver, '_get_state_file_path'),
-            mock.patch.object(self.driver, '_spawn'),
-            mock.patch('__builtin__.open')
-        ) as (gsp, spawn, mock_open):
-            mock_open.return_value = ['5']
-
-            self.driver.update(self.fake_config)
-
-            mock_open.assert_called_once_with(gsp.return_value, 'r')
-            spawn.assert_called_once_with(self.fake_config, ['-sf', '5'])
-
-    def test_spawn(self):
-        with contextlib.nested(
-            mock.patch.object(namespace_driver.hacfg, 'save_config'),
-            mock.patch.object(self.driver, '_get_state_file_path'),
-            mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
-        ) as (mock_save, gsp, ip_wrap):
-            gsp.side_effect = lambda x, y: y
-
-            self.driver._spawn(self.fake_config)
-
-            mock_save.assert_called_once_with('conf', self.fake_config,
-                                              'sock', 'test_group')
-            cmd = ['haproxy', '-f', 'conf', '-p', 'pid']
-            ip_wrap.assert_has_calls([
-                mock.call('sudo_test', 'qlbaas-pool_id'),
-                mock.call().netns.execute(cmd)
-            ])
-
-    def test_undeploy_instance(self):
-        with contextlib.nested(
-            mock.patch.object(self.driver, '_get_state_file_path'),
-            mock.patch.object(namespace_driver, 'kill_pids_in_file'),
-            mock.patch.object(self.driver, '_unplug'),
-            mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-            mock.patch('os.path.isdir'),
-            mock.patch('shutil.rmtree')
-        ) as (gsp, kill, unplug, ip_wrap, isdir, rmtree):
-            gsp.side_effect = lambda x, y: '/pool/' + y
-
-            self.driver.pool_to_port_id['pool_id'] = 'port_id'
-            isdir.return_value = True
-
-            self.driver.undeploy_instance('pool_id')
-
-            kill.assert_called_once_with('sudo_test', '/pool/pid')
-            unplug.assert_called_once_with('qlbaas-pool_id', 'port_id')
-            isdir.assert_called_once_with('/pool')
-            rmtree.assert_called_once_with('/pool')
-            ip_wrap.assert_has_calls([
-                mock.call('sudo_test', 'qlbaas-pool_id'),
-                mock.call().garbage_collect_namespace()
-            ])
-
-    def test_undeploy_instance_with_ns_cleanup(self):
-        with contextlib.nested(
-            mock.patch.object(self.driver, '_get_state_file_path'),
-            mock.patch.object(self.driver, 'vif_driver'),
-            mock.patch.object(namespace_driver, 'kill_pids_in_file'),
-            mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-            mock.patch('os.path.isdir'),
-            mock.patch('shutil.rmtree')
-        ) as (gsp, vif, kill, ip_wrap, isdir, rmtree):
-            device = mock.Mock()
-            device_name = 'port_device'
-            device.name = device_name
-            ip_wrap.return_value.get_devices.return_value = [device]
-
-            self.driver.undeploy_instance('pool_id', cleanup_namespace=True)
-            vif.unplug.assert_called_once_with(device_name,
-                                               namespace='qlbaas-pool_id')
-
-    def test_remove_orphans(self):
-        with contextlib.nested(
-            mock.patch.object(self.driver, 'exists'),
-            mock.patch.object(self.driver, 'undeploy_instance'),
-            mock.patch('os.listdir'),
-            mock.patch('os.path.exists')
-        ) as (exists, undeploy, listdir, path_exists):
-            known = ['known1', 'known2']
-            unknown = ['unknown1', 'unknown2']
-            listdir.return_value = known + unknown
-            exists.side_effect = lambda x: x == 'unknown2'
-
-            self.driver.remove_orphans(known)
-
-            undeploy.assert_called_once_with('unknown2',
-                                             cleanup_namespace=True)
-
-    def test_exists(self):
-        with contextlib.nested(
-            mock.patch.object(self.driver, '_get_state_file_path'),
-            mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-            mock.patch('socket.socket'),
-            mock.patch('os.path.exists'),
-        ) as (gsp, ip_wrap, socket, path_exists):
-            gsp.side_effect = lambda x, y, z: '/pool/' + y
-
-            ip_wrap.return_value.netns.exists.return_value = True
-            path_exists.return_value = True
-
-            self.driver.exists('pool_id')
-
-            ip_wrap.assert_has_calls([
-                mock.call('sudo_test'),
-                mock.call().netns.exists('qlbaas-pool_id')
-            ])
-
-            self.assertTrue(self.driver.exists('pool_id'))
-
-    def test_get_stats(self):
-        raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,'
-                     'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,'
-                     'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,'
-                     'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,'
-                     'check_status,check_code,check_duration,hrsp_1xx,'
-                     'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
-                     'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
-                     '8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
-                     '10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
-                     ',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n'
-                     'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
-                     '32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,'
-                     '224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,'
-                     '1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n'
-                     'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
-                     'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,'
-                     '0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,'
-                     'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n')
-        raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,'
-                           'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,'
-                           'status,weight,act,bck,chkfail,chkdown,lastchg,'
-                           'downtime,qlimit,pid,iid,sid,throttle,lbtot,'
-                           'tracked,type,rate,rate_lim,rate_max,check_status,'
-                           'check_code,check_duration,hrsp_1xx,hrsp_2xx,'
-                           'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
-                           'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,'
-                           '\n')
-        with contextlib.nested(
-                mock.patch.object(self.driver, '_get_state_file_path'),
-                mock.patch('socket.socket'),
-                mock.patch('os.path.exists'),
-        ) as (gsp, socket, path_exists):
-            gsp.side_effect = lambda x, y, z: '/pool/' + y
-            path_exists.return_value = True
-            socket.return_value = socket
-            socket.recv.return_value = raw_stats
-
-            exp_stats = {'connection_errors': '0',
-                         'active_connections': '3',
-                         'current_sessions': '3',
-                         'bytes_in': '7764',
-                         'max_connections': '4',
-                         'max_sessions': '4',
-                         'bytes_out': '2365',
-                         'response_errors': '0',
-                         'total_sessions': '10',
-                         'total_connections': '10',
-                         'members': {
-                             '32a6c2a3-420a-44c3-955d-86bd2fc6871e': {
-                                 'status': 'ACTIVE',
-                                 'health': 'L7OK',
-                                 'failed_checks': '0'
-                             },
-                             'd9aea044-8867-4e80-9875-16fb808fa0f9': {
-                                 'status': 'INACTIVE',
-                                 'health': 'L4CON',
-                                 'failed_checks': '9'
-                             }
-                         }
-                         }
-            stats = self.driver.get_stats('pool_id')
-            self.assertEqual(exp_stats, stats)
-
-            socket.recv.return_value = raw_stats_empty
-            self.assertEqual({'members': {}}, self.driver.get_stats('pool_id'))
-
-            path_exists.return_value = False
-            socket.reset_mock()
-            self.assertEqual({}, self.driver.get_stats('pool_id'))
-            self.assertFalse(socket.called)
-
-    def test_plug(self):
-        test_port = {'id': 'port_id',
-                     'network_id': 'net_id',
-                     'mac_address': 'mac_addr',
-                     'fixed_ips': [{'ip_address': '10.0.0.2',
-                                    'subnet': {'cidr': '10.0.0.0/24',
-                                               'gateway_ip': '10.0.0.1'}}]}
-        with contextlib.nested(
-                mock.patch('neutron.agent.linux.ip_lib.device_exists'),
-                mock.patch('netaddr.IPNetwork'),
-                mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-        ) as (dev_exists, ip_net, ip_wrap):
-            self.vif_driver.get_device_name.return_value = 'test_interface'
-            dev_exists.return_value = False
-            ip_net.return_value = ip_net
-            ip_net.prefixlen = 24
-
-            self.driver._plug('test_ns', test_port)
-            self.rpc_mock.plug_vip_port.assert_called_once_with(
-                test_port['id'])
-            self.assertTrue(dev_exists.called)
-            self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
-                                                         'test_interface',
-                                                         'mac_addr',
-                                                         namespace='test_ns')
-            self.vif_driver.init_l3.assert_called_once_with(
-                'test_interface',
-                ['10.0.0.2/24'],
-                namespace='test_ns'
-            )
-            cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
-            cmd_arping = ['arping', '-U', '-I',
-                          'test_interface', '-c',
-                          self.conf.haproxy.send_gratuitous_arp, '10.0.0.2']
-            ip_wrap.assert_has_calls([
-                mock.call('sudo_test', namespace='test_ns'),
-                mock.call().netns.execute(cmd, check_exit_code=False),
-                mock.call().netns.execute(cmd_arping, check_exit_code=False),
-            ])
-
-            dev_exists.return_value = True
-            self.assertRaises(exceptions.PreexistingDeviceFailure,
-                              self.driver._plug, 'test_ns', test_port, False)
-
-    def test_plug_not_send_gratuitous_arp(self):
-        self.conf.haproxy.send_gratuitous_arp = 0
-        test_port = {'id': 'port_id',
-                     'network_id': 'net_id',
-                     'mac_address': 'mac_addr',
-                     'fixed_ips': [{'ip_address': '10.0.0.2',
-                                    'subnet': {'cidr': '10.0.0.0/24',
-                                               'gateway_ip': '10.0.0.1'}}]}
-        with contextlib.nested(
-                mock.patch('neutron.agent.linux.ip_lib.device_exists'),
-                mock.patch('netaddr.IPNetwork'),
-                mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-        ) as (dev_exists, ip_net, ip_wrap):
-            self.vif_driver.get_device_name.return_value = 'test_interface'
-            dev_exists.return_value = False
-            ip_net.return_value = ip_net
-            ip_net.prefixlen = 24
-
-            self.driver._plug('test_ns', test_port)
-            cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
-            expected = [
-                mock.call('sudo_test', namespace='test_ns'),
-                mock.call().netns.execute(cmd, check_exit_code=False)]
-            self.assertEqual(expected, ip_wrap.mock_calls)
-
-    def test_plug_no_gw(self):
-        test_port = {'id': 'port_id',
-                     'network_id': 'net_id',
-                     'mac_address': 'mac_addr',
-                     'fixed_ips': [{'ip_address': '10.0.0.2',
-                                    'subnet': {'cidr': '10.0.0.0/24'}}]}
-        with contextlib.nested(
-                mock.patch('neutron.agent.linux.ip_lib.device_exists'),
-                mock.patch('netaddr.IPNetwork'),
-                mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-        ) as (dev_exists, ip_net, ip_wrap):
-            self.vif_driver.get_device_name.return_value = 'test_interface'
-            dev_exists.return_value = False
-            ip_net.return_value = ip_net
-            ip_net.prefixlen = 24
-
-            self.driver._plug('test_ns', test_port)
-            self.rpc_mock.plug_vip_port.assert_called_once_with(
-                test_port['id'])
-            self.assertTrue(dev_exists.called)
-            self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
-                                                         'test_interface',
-                                                         'mac_addr',
-                                                         namespace='test_ns')
-            self.vif_driver.init_l3.assert_called_once_with(
-                'test_interface',
-                ['10.0.0.2/24'],
-                namespace='test_ns'
-            )
-            self.assertFalse(ip_wrap.called)
-            dev_exists.return_value = True
-            self.assertRaises(exceptions.PreexistingDeviceFailure,
-                              self.driver._plug, 'test_ns', test_port, False)
-
-    def test_plug_gw_in_host_routes(self):
-        test_port = {'id': 'port_id',
-                     'network_id': 'net_id',
-                     'mac_address': 'mac_addr',
-                     'fixed_ips': [{'ip_address': '10.0.0.2',
-                                    'subnet': {'cidr': '10.0.0.0/24',
-                                               'host_routes':
-                                               [{'destination': '0.0.0.0/0',
-                                                 'nexthop': '10.0.0.1'}]}}]}
-        with contextlib.nested(
-                mock.patch('neutron.agent.linux.ip_lib.device_exists'),
-                mock.patch('netaddr.IPNetwork'),
-                mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
-        ) as (dev_exists, ip_net, ip_wrap):
-            self.vif_driver.get_device_name.return_value = 'test_interface'
-            dev_exists.return_value = False
-            ip_net.return_value = ip_net
-            ip_net.prefixlen = 24
-
-            self.driver._plug('test_ns', test_port)
-            self.rpc_mock.plug_vip_port.assert_called_once_with(
-                test_port['id'])
-            self.assertTrue(dev_exists.called)
-            self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
-                                                         'test_interface',
-                                                         'mac_addr',
-                                                         namespace='test_ns')
-            self.vif_driver.init_l3.assert_called_once_with(
-                'test_interface',
-                ['10.0.0.2/24'],
-                namespace='test_ns'
-            )
-            cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
-            ip_wrap.assert_has_calls([
-                mock.call('sudo_test', namespace='test_ns'),
-                mock.call().netns.execute(cmd, check_exit_code=False),
-            ])
-
-    def test_unplug(self):
-        self.vif_driver.get_device_name.return_value = 'test_interface'
-
-        self.driver._unplug('test_ns', 'port_id')
-        self.rpc_mock.unplug_vip_port.assert_called_once_with('port_id')
-        self.vif_driver.unplug('test_interface', namespace='test_ns')
-
-    def test_kill_pids_in_file(self):
-        with contextlib.nested(
-            mock.patch('os.path.exists'),
-            mock.patch('__builtin__.open'),
-            mock.patch('neutron.agent.linux.utils.execute'),
-            mock.patch.object(namespace_driver.LOG, 'exception'),
-        ) as (path_exists, mock_open, mock_execute, mock_log):
-            file_mock = mock.MagicMock()
-            mock_open.return_value = file_mock
-            file_mock.__enter__.return_value = file_mock
-            file_mock.__iter__.return_value = iter(['123'])
-
-            path_exists.return_value = False
-            namespace_driver.kill_pids_in_file('sudo_test', 'test_path')
-            path_exists.assert_called_once_with('test_path')
-            self.assertFalse(mock_open.called)
-            self.assertFalse(mock_execute.called)
-
-            path_exists.return_value = True
-            mock_execute.side_effect = RuntimeError
-            namespace_driver.kill_pids_in_file('sudo_test', 'test_path')
-            self.assertTrue(mock_log.called)
-            mock_execute.assert_called_once_with(
-                ['kill', '-9', '123'], 'sudo_test')
-
-    def test_get_state_file_path(self):
-        with mock.patch('os.makedirs') as mkdir:
-            path = self.driver._get_state_file_path('pool_id', 'conf')
-            self.assertEqual('/the/path/pool_id/conf', path)
-            mkdir.assert_called_once_with('/the/path/pool_id', 0o755)
-
-    def test_deploy_instance(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            with mock.patch.object(self.driver, 'update') as update:
-                self.driver.deploy_instance(self.fake_config)
-                exists.assert_called_once_with(self.fake_config['pool']['id'])
-                update.assert_called_once_with(self.fake_config)
-
-    def test_deploy_instance_non_existing(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            with mock.patch.object(self.driver, 'create') as create:
-                exists.return_value = False
-                self.driver.deploy_instance(self.fake_config)
-                exists.assert_called_once_with(self.fake_config['pool']['id'])
-                create.assert_called_once_with(self.fake_config)
-
-    def test_deploy_instance_vip_status_non_active(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            self.fake_config['vip']['status'] = 'NON_ACTIVE'
-            self.driver.deploy_instance(self.fake_config)
-            self.assertFalse(exists.called)
-
-    def test_deploy_instance_vip_admin_state_down(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            self.fake_config['vip']['admin_state_up'] = False
-            self.driver.deploy_instance(self.fake_config)
-            self.assertFalse(exists.called)
-
-    def test_deploy_instance_no_vip(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            del self.fake_config['vip']
-            self.driver.deploy_instance(self.fake_config)
-            self.assertFalse(exists.called)
-
-    def test_deploy_instance_pool_status_non_active(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            self.fake_config['pool']['status'] = 'NON_ACTIVE'
-            self.driver.deploy_instance(self.fake_config)
-            self.assertFalse(exists.called)
-
-    def test_deploy_instance_pool_admin_state_down(self):
-        with mock.patch.object(self.driver, 'exists') as exists:
-            self.fake_config['pool']['admin_state_up'] = False
-            self.driver.deploy_instance(self.fake_config)
-            self.assertFalse(exists.called)
-
-    def test_refresh_device(self):
-        with mock.patch.object(self.driver, 'deploy_instance') as deploy:
-            pool_id = 'pool_id1'
-            self.driver._refresh_device(pool_id)
-            self.rpc_mock.get_logical_device.assert_called_once_with(pool_id)
-            deploy.assert_called_once_with(
-                self.rpc_mock.get_logical_device.return_value)
-
-    def test_create_vip(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.create_vip({'pool_id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_update_vip(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.update_vip({}, {'pool_id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_delete_vip(self):
-        with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
-            self.driver.delete_vip({'pool_id': '1'})
-            undeploy.assert_called_once_with('1')
-
-    def test_create_pool(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.create_pool({'id': '1'})
-            self.assertFalse(refresh.called)
-
-    def test_update_pool(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.update_pool({}, {'id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_delete_pool_existing(self):
-        with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
-            with mock.patch.object(self.driver, 'exists') as exists:
-                exists.return_value = True
-                self.driver.delete_pool({'id': '1'})
-                undeploy.assert_called_once_with('1')
-
-    def test_delete_pool_non_existing(self):
-        with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
-            with mock.patch.object(self.driver, 'exists') as exists:
-                exists.return_value = False
-                self.driver.delete_pool({'id': '1'})
-                self.assertFalse(undeploy.called)
-
-    def test_create_member(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.create_member({'pool_id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_update_member(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.update_member({}, {'pool_id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_delete_member(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.delete_member({'pool_id': '1'})
-            refresh.assert_called_once_with('1')
-
-    def test_create_pool_health_monitor(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.create_pool_health_monitor('', '1')
-            refresh.assert_called_once_with('1')
-
-    def test_update_pool_health_monitor(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.update_pool_health_monitor('', '', '1')
-            refresh.assert_called_once_with('1')
-
-    def test_delete_pool_health_monitor(self):
-        with mock.patch.object(self.driver, '_refresh_device') as refresh:
-            self.driver.delete_pool_health_monitor('', '1')
-            refresh.assert_called_once_with('1')
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/logging_noop/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/logging_noop/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/logging_noop/test_logging_noop_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/logging_noop/test_logging_noop_driver.py
deleted file mode 100644 (file)
index be695e8..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron import context
-from neutron.services.loadbalancer.drivers.logging_noop import driver
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-
-log_path = 'neutron.services.loadbalancer.drivers.logging_noop.driver.LOG'
-
-
-class FakeModel(object):
-    def __init__(self, id):
-        self.id = id
-
-
-def patch_manager(func):
-    @mock.patch(log_path)
-    def wrapper(*args):
-        log_mock = args[-1]
-        manager_test = args[0]
-        model = args[1]
-        parent = manager_test.parent
-        driver = parent.driver
-        driver.plugin.reset_mock()
-
-        func(*args[:-1])
-
-        s = str(log_mock.mock_calls[0])
-        parent.assertEqual(s[:11], "call.debug(")
-        parent.assertTrue(s.index(model.id) != -1,
-                          msg="Model ID not found in log")
-
-    return wrapper
-
-
-class ManagerTest(object):
-    def __init__(self, parent, manager, model):
-        self.parent = parent
-        self.manager = manager
-
-        self.create(model)
-        self.update(model, model)
-        self.delete(model)
-
-    @patch_manager
-    def create(self, model):
-        self.manager.create(self.parent.context, model)
-
-    @patch_manager
-    def update(self, old_model, model):
-        self.manager.update(self.parent.context, old_model, model)
-
-    @patch_manager
-    def delete(self, model):
-        self.manager.delete(self.parent.context, model)
-
-
-class ManagerTestWithUpdates(ManagerTest):
-    def __init__(self, parent, manager, model):
-        self.parent = parent
-        self.manager = manager
-
-        self.create(model)
-        self.update(model, model)
-        self.delete(model)
-
-    @patch_manager
-    def create(self, model):
-        self.manager.create(self.parent.context, model)
-        if self.manager.model_class is not None:
-            self.parent.assertEqual(
-                             str(self.parent.driver.plugin.mock_calls[0])[:18],
-                             "call.update_status")
-
-    @patch_manager
-    def update(self, old_model, model):
-        self.manager.update(self.parent.context, old_model, model)
-        if self.manager.model_class is not None:
-            self.parent.assertEqual(
-                             str(self.parent.driver.plugin.mock_calls[0])[:18],
-                             "call.update_status")
-
-    @patch_manager
-    def delete(self, model):
-        self.manager.delete(self.parent.context, model)
-
-
-class LoadBalancerManagerTest(ManagerTestWithUpdates):
-    def __init__(self, parent, manager, model):
-        super(LoadBalancerManagerTest, self).__init__(parent, manager, model)
-
-        self.refresh(model)
-        self.stats(model)
-
-    @patch_manager
-    def refresh(self, model):
-        self.manager.refresh(self.parent.context, model)
-
-    @patch_manager
-    def stats(self, model):
-        dummy_stats = {
-            "bytes_in": 0,
-            "bytes_out": 0,
-            "active_connections": 0,
-            "total_connections": 0
-        }
-        h = self.manager.stats(self.parent.context, model)
-        self.parent.assertEqual(h, dummy_stats)
-
-
-class TestLoggingNoopLoadBalancerDriver(
-        test_db_loadbalancer.LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        super(TestLoggingNoopLoadBalancerDriver, self).setUp()
-        self.context = context.get_admin_context()
-        self.plugin = mock.Mock()
-        self.driver = driver.LoggingNoopLoadBalancerDriver(self.plugin)
-
-    def test_load_balancer_ops(self):
-        LoadBalancerManagerTest(self, self.driver.load_balancer,
-                                FakeModel("loadbalancer-001"))
-
-    def test_listener_ops(self):
-        ManagerTest(self, self.driver.listener, FakeModel("listener-001"))
-
-    def test_pool_ops(self):
-        ManagerTestWithUpdates(self, self.driver.pool, FakeModel("pool-001"))
-
-    def test_member_ops(self):
-        ManagerTestWithUpdates(self, self.driver.member,
-                               FakeModel("member-001"))
-
-    def test_health_monitor_ops(self):
-        ManagerTest(self, self.driver.health_monitor, FakeModel("hm-001"))
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py
deleted file mode 100644 (file)
index 6585e60..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2014 Citrix Systems
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-import requests
-
-from neutron.services.loadbalancer.drivers.netscaler import ncc_client
-from neutron.services.loadbalancer.drivers.netscaler import netscaler_driver
-from neutron.tests.unit import testlib_api
-
-NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers'
-                    '.netscaler.ncc_client.NSClient')
-
-TESTURI_SCHEME = 'http'
-TESTURI_HOSTNAME = '1.1.1.1'
-TESTURI_PORT = 4433
-TESTURI_PATH = '/ncc_service/1.0'
-TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME,
-                            TESTURI_PORT, TESTURI_PATH)
-TEST_USERNAME = 'user211'
-TEST_PASSWORD = '@30xHl5cT'
-TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1'
-TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
-
-
-class TestNSClient(testlib_api.WebTestCase):
-
-    """A Unit test for the NetScaler NCC client module."""
-
-    def setUp(self):
-        self.log = mock.patch.object(ncc_client, 'LOG').start()
-        super(TestNSClient, self).setUp()
-        # mock the requests.request function call
-        self.request_method_mock = mock.Mock()
-        requests.request = self.request_method_mock
-        self.testclient = self._get_nsclient()
-
-    def test_instantiate_nsclient_with_empty_uri(self):
-        """Asserts that a call with empty URI will raise an exception."""
-        self.assertRaises(ncc_client.NCCException, ncc_client.NSClient,
-                          '', TEST_USERNAME, TEST_PASSWORD)
-
-    def test_create_resource_with_no_connection(self):
-        """Asserts that a call with no connection will raise an exception."""
-        # mock a connection object that fails to establish a connection
-        self.request_method_mock.side_effect = (
-            requests.exceptions.ConnectionError())
-        resource_path = netscaler_driver.VIPS_RESOURCE
-        resource_name = netscaler_driver.VIP_RESOURCE
-        resource_body = self._get_testvip_httpbody_for_create()
-        # call method under test: create_resource() and assert that
-        # it raises an exception
-        self.assertRaises(ncc_client.NCCException,
-                          self.testclient.create_resource,
-                          TEST_TENANT_ID, resource_path,
-                          resource_name, resource_body)
-
-    def test_create_resource_with_error(self):
-        """Asserts that a failed create call raises an exception."""
-        # create a mock object to represent a valid http response
-        # with a failure status code.
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.unauthorized
-        fake_response.headers = []
-        requests.request.return_value = fake_response
-        resource_path = netscaler_driver.VIPS_RESOURCE
-        resource_name = netscaler_driver.VIP_RESOURCE
-        resource_body = self._get_testvip_httpbody_for_create()
-        # call method under test: create_resource
-        # and assert that it raises the expected exception.
-        self.assertRaises(ncc_client.NCCException,
-                          self.testclient.create_resource,
-                          TEST_TENANT_ID, resource_path,
-                          resource_name, resource_body)
-
-    def test_create_resource(self):
-        """Asserts that a correct call will succeed."""
-        # obtain the mock object that corresponds to the call of request()
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.created
-        fake_response.headers = []
-        self.request_method_mock.return_value = fake_response
-        resource_path = netscaler_driver.VIPS_RESOURCE
-        resource_name = netscaler_driver.VIP_RESOURCE
-        resource_body = self._get_testvip_httpbody_for_create()
-        # call method under test: create_resource()
-        self.testclient.create_resource(TEST_TENANT_ID, resource_path,
-                                        resource_name, resource_body)
-        # assert that request() was called
-        # with the expected params.
-        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
-        self.request_method_mock.assert_called_once_with(
-            'POST',
-            url=resource_url,
-            headers=mock.ANY,
-            data=mock.ANY)
-
-    def test_update_resource_with_error(self):
-        """Asserts that a failed update call raises an exception."""
-        # create a valid http response with a failure status code.
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.unauthorized
-        fake_response.headers = []
-        # obtain the mock object that corresponds to the call of request()
-        self.request_method_mock.return_value = fake_response
-        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
-                                   TESTVIP_ID)
-        resource_name = netscaler_driver.VIP_RESOURCE
-        resource_body = self._get_testvip_httpbody_for_update()
-        # call method under test: update_resource() and
-        # assert that it raises the expected exception.
-        self.assertRaises(ncc_client.NCCException,
-                          self.testclient.update_resource,
-                          TEST_TENANT_ID, resource_path,
-                          resource_name, resource_body)
-
-    def test_update_resource(self):
-        """Asserts that a correct update call will succeed."""
-        # create a valid http response with a successful status code.
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.ok
-        fake_response.headers = []
-        # obtain the mock object that corresponds to the call of request()
-        self.request_method_mock.return_value = fake_response
-        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
-                                   TESTVIP_ID)
-        resource_name = netscaler_driver.VIP_RESOURCE
-        resource_body = self._get_testvip_httpbody_for_update()
-        # call method under test: update_resource.
-        self.testclient.update_resource(TEST_TENANT_ID, resource_path,
-                                        resource_name, resource_body)
-        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
-        # assert that requests.request() was called with the
-        # expected params.
-        self.request_method_mock.assert_called_once_with(
-            'PUT',
-            url=resource_url,
-            headers=mock.ANY,
-            data=mock.ANY)
-
-    def test_delete_resource_with_error(self):
-        """Asserts that a failed delete call raises an exception."""
-        # create a valid http response with a failure status code.
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.unauthorized
-        fake_response.headers = []
-        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
-                                   TESTVIP_ID)
-        # call method under test: create_resource
-        self.assertRaises(ncc_client.NCCException,
-                          self.testclient.remove_resource,
-                          TEST_TENANT_ID, resource_path)
-
-    def test_delete_resource(self):
-        """Asserts that a correct delete call will succeed."""
-        # create a valid http response with a failure status code.
-        fake_response = requests.Response()
-        fake_response.status_code = requests.codes.ok
-        fake_response.headers = []
-        # obtain the mock object that corresponds to the call of request()
-        self.request_method_mock.return_value = fake_response
-        resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
-                                   TESTVIP_ID)
-        resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
-        # call method under test: create_resource
-        self.testclient.remove_resource(TEST_TENANT_ID, resource_path)
-        # assert that httplib.HTTPConnection request() was called with the
-        # expected params
-        self.request_method_mock.assert_called_once_with(
-            'DELETE',
-            url=resource_url,
-            headers=mock.ANY,
-            data=mock.ANY)
-
-    def _get_nsclient(self):
-        return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD)
-
-    def _get_testvip_httpbody_for_create(self):
-        body = {
-            'name': 'vip1',
-            'address': '10.0.0.3',
-            'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d',
-            'protocol': 'HTTP',
-            'protocol_port': 80,
-            'admin_state_up': True,
-        }
-        return body
-
-    def _get_testvip_httpbody_for_update(self):
-        body = {}
-        body['name'] = 'updated vip1'
-        body['admin_state_up'] = False
-        return body
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py
deleted file mode 100644 (file)
index e10c1a3..0000000
+++ /dev/null
@@ -1,802 +0,0 @@
-# Copyright 2014 Citrix Systems
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-
-from neutron.common import exceptions
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers.netscaler import ncc_client
-from neutron.services.loadbalancer.drivers.netscaler import netscaler_driver
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-
-
-LBAAS_DRIVER_CLASS = ('neutron.services.loadbalancer.drivers'
-                      '.netscaler.netscaler_driver'
-                      '.NetScalerPluginDriver')
-
-NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers'
-                    '.netscaler.ncc_client'
-                    '.NSClient')
-
-LBAAS_PROVIDER_NAME = 'netscaler'
-LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' %
-                  (LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS))
-
-#Test data
-TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
-TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d'
-TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee'
-TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c'
-
-TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180'
-TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e'
-TESTPOOL_SNATIP_ADDRESS = '10.0.0.50'
-TESTPOOL_SNAT_PORT = {
-    'id': TESTPOOL_PORT_ID,
-    'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}]
-}
-TESTVIP_IP = '10.0.1.100'
-TESTMEMBER_IP = '10.0.0.5'
-
-
-class TestLoadBalancerPluginBase(test_db_loadbalancer
-                                 .LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        super(TestLoadBalancerPluginBase, self).setUp(
-            lbaas_provider=LBAAS_PROVIDER)
-        loaded_plugins = manager.NeutronManager().get_service_plugins()
-        self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
-
-
-class TestNetScalerPluginDriver(TestLoadBalancerPluginBase):
-
-    """Unit tests for the NetScaler LBaaS driver module."""
-
-    def setUp(self):
-        mock.patch.object(netscaler_driver, 'LOG').start()
-
-        # mock the NSClient class (REST client)
-        client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start()
-
-        #mock the REST methods of the NSClient class
-        self.client_mock_instance = client_mock_cls.return_value
-        self.create_resource_mock = self.client_mock_instance.create_resource
-        self.create_resource_mock.side_effect = mock_create_resource_func
-        self.update_resource_mock = self.client_mock_instance.update_resource
-        self.update_resource_mock.side_effect = mock_update_resource_func
-        self.retrieve_resource_mock = (self.client_mock_instance
-                                           .retrieve_resource)
-        self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func
-        self.remove_resource_mock = self.client_mock_instance.remove_resource
-        self.remove_resource_mock.side_effect = mock_remove_resource_func
-        super(TestNetScalerPluginDriver, self).setUp()
-        self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = (
-            netscaler_driver.NetScalerPluginDriver(self.plugin_instance))
-        self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME]
-        self.context = context.get_admin_context()
-
-    def test_create_vip(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                testvip = self._build_testvip_contents(subnet['subnet'],
-                                                       pool['pool'])
-                expectedvip = self._build_expectedvip_contents(
-                    testvip,
-                    subnet['subnet'])
-                # mock the LBaaS plugin update_status().
-                self._mock_update_status()
-                # reset the create_resource() mock
-                self.create_resource_mock.reset_mock()
-                # execute the method under test
-                self.driver.create_vip(self.context, testvip)
-                # First, assert that create_resource was called once
-                # with expected params.
-                self.create_resource_mock.assert_called_once_with(
-                    None,
-                    netscaler_driver.VIPS_RESOURCE,
-                    netscaler_driver.VIP_RESOURCE,
-                    expectedvip)
-                #Finally, assert that the vip object is now ACTIVE
-                self.mock_update_status_obj.assert_called_once_with(
-                    mock.ANY,
-                    loadbalancer_db.Vip,
-                    expectedvip['id'],
-                    constants.ACTIVE)
-
-    def test_create_vip_without_connection(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                testvip = self._build_testvip_contents(subnet['subnet'],
-                                                       pool['pool'])
-                expectedvip = self._build_expectedvip_contents(
-                    testvip,
-                    subnet['subnet'])
-                errorcode = ncc_client.NCCException.CONNECTION_ERROR
-                self.create_resource_mock.side_effect = (
-                    ncc_client.NCCException(errorcode))
-                # mock the plugin's update_status()
-                self._mock_update_status()
-                # reset the create_resource() mock
-                self.create_resource_mock.reset_mock()
-                # execute the method under test.
-                self.driver.create_vip(self.context, testvip)
-                # First, assert that update_resource was called once
-                # with expected params.
-                self.create_resource_mock.assert_called_once_with(
-                    None,
-                    netscaler_driver.VIPS_RESOURCE,
-                    netscaler_driver.VIP_RESOURCE,
-                    expectedvip)
-                #Finally, assert that the vip object is in ERROR state
-                self.mock_update_status_obj.assert_called_once_with(
-                    mock.ANY,
-                    loadbalancer_db.Vip,
-                    testvip['id'],
-                    constants.ERROR)
-
-    def test_update_vip(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with self.vip(pool=pool, subnet=subnet) as vip:
-                    updated_vip = self._build_updated_testvip_contents(
-                        vip['vip'],
-                        subnet['subnet'],
-                        pool['pool'])
-                    expectedvip = self._build_updated_expectedvip_contents(
-                        updated_vip,
-                        subnet['subnet'],
-                        pool['pool'])
-                    # mock the plugin's update_status()
-                    self._mock_update_status()
-                    # reset the update_resource() mock
-                    self.update_resource_mock.reset_mock()
-                    # execute the method under test
-                    self.driver.update_vip(self.context, updated_vip,
-                                           updated_vip)
-                    vip_resource_path = "%s/%s" % (
-                        (netscaler_driver.VIPS_RESOURCE,
-                         vip['vip']['id']))
-                    # First, assert that update_resource was called once
-                    # with expected params.
-                    (self.update_resource_mock
-                         .assert_called_once_with(
-                             None,
-                             vip_resource_path,
-                             netscaler_driver.VIP_RESOURCE,
-                             expectedvip))
-                    #Finally, assert that the vip object is now ACTIVE
-                    self.mock_update_status_obj.assert_called_once_with(
-                        mock.ANY,
-                        loadbalancer_db.Vip,
-                        vip['vip']['id'],
-                        constants.ACTIVE)
-
-    def test_delete_vip(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with contextlib.nested(
-                    self.vip(pool=pool, subnet=subnet),
-                    mock.patch.object(self.driver.plugin, '_delete_db_vip')
-                ) as (vip, mock_delete_db_vip):
-                    mock_delete_db_vip.return_value = None
-                    #reset the remove_resource() mock
-                    self.remove_resource_mock.reset_mock()
-                    # execute the method under test
-                    self.driver.delete_vip(self.context, vip['vip'])
-                    vip_resource_path = "%s/%s" % (
-                                        (netscaler_driver.VIPS_RESOURCE,
-                                         vip['vip']['id']))
-                    # Assert that remove_resource() was called once
-                    # with expected params.
-                    (self.remove_resource_mock
-                         .assert_called_once_with(None, vip_resource_path))
-
-    def test_create_pool(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
-        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
-            mock_get_subnet.return_value = subnet['subnet']
-            mock_get_ports.return_value = None
-            mock_create_port.return_value = TESTPOOL_SNAT_PORT
-            testpool = self._build_testpool_contents(subnet['subnet'])
-            expectedpool = self._build_expectedpool_contents(testpool,
-                                                             subnet['subnet'])
-            #reset the create_resource() mock
-            self.create_resource_mock.reset_mock()
-            # mock the plugin's update_status()
-            self._mock_update_status()
-            # execute the method under test
-            self.driver.create_pool(self.context, testpool)
-            # First, assert that create_resource was called once
-            # with expected params.
-            (self.create_resource_mock
-                 .assert_called_once_with(None,
-                                          netscaler_driver.POOLS_RESOURCE,
-                                          netscaler_driver.POOL_RESOURCE,
-                                          expectedpool))
-            #Finally, assert that the pool object is now ACTIVE
-            self.mock_update_status_obj.assert_called_once_with(
-                mock.ANY,
-                loadbalancer_db.Pool,
-                expectedpool['id'],
-                constants.ACTIVE)
-
-    def test_create_pool_with_error(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
-        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
-            mock_get_subnet.return_value = subnet['subnet']
-            mock_get_ports.return_value = None
-            mock_create_port.return_value = TESTPOOL_SNAT_PORT
-            errorcode = ncc_client.NCCException.CONNECTION_ERROR
-            self.create_resource_mock.side_effect = (ncc_client
-                                                     .NCCException(errorcode))
-            testpool = self._build_testpool_contents(subnet['subnet'])
-            expectedpool = self._build_expectedpool_contents(testpool,
-                                                             subnet['subnet'])
-            # mock the plugin's update_status()
-            self._mock_update_status()
-            #reset the create_resource() mock
-            self.create_resource_mock.reset_mock()
-            # execute the method under test.
-            self.driver.create_pool(self.context, testpool)
-            # Also assert that create_resource was called once
-            # with expected params.
-            (self.create_resource_mock
-                 .assert_called_once_with(None,
-                                          netscaler_driver.POOLS_RESOURCE,
-                                          netscaler_driver.POOL_RESOURCE,
-                                          expectedpool))
-            #Finally, assert that the pool object is in ERROR state
-            self.mock_update_status_obj.assert_called_once_with(
-                mock.ANY,
-                loadbalancer_db.Pool,
-                expectedpool['id'],
-                constants.ERROR)
-
-    def test_create_pool_with_snatportcreate_failure(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
-            mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
-        ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
-            mock_get_subnet.return_value = subnet['subnet']
-            mock_get_ports.return_value = None
-            mock_create_port.side_effect = exceptions.NeutronException()
-            testpool = self._build_testpool_contents(subnet['subnet'])
-            #reset the create_resource() mock
-            self.create_resource_mock.reset_mock()
-            # execute the method under test.
-            self.assertRaises(exceptions.NeutronException,
-                              self.driver.create_pool,
-                              self.context, testpool)
-
-    def test_update_pool(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                updated_pool = self._build_updated_testpool_contents(
-                    pool['pool'],
-                    subnet['subnet'])
-                expectedpool = self._build_updated_expectedpool_contents(
-                    updated_pool,
-                    subnet['subnet'])
-                # mock the plugin's update_status()
-                self._mock_update_status()
-                # reset the update_resource() mock
-                self.update_resource_mock.reset_mock()
-                # execute the method under test.
-                self.driver.update_pool(self.context, pool['pool'],
-                                        updated_pool)
-                pool_resource_path = "%s/%s" % (
-                    (netscaler_driver.POOLS_RESOURCE,
-                     pool['pool']['id']))
-                # First, assert that update_resource was called once
-                # with expected params.
-                (self.update_resource_mock
-                     .assert_called_once_with(None,
-                                              pool_resource_path,
-                                              netscaler_driver.POOL_RESOURCE,
-                                              expectedpool))
-                #Finally, assert that the pool object is now ACTIVE
-                self.mock_update_status_obj.assert_called_once_with(
-                    mock.ANY,
-                    loadbalancer_db.Pool,
-                    pool['pool']['id'],
-                    constants.ACTIVE)
-
-    def test_delete_pool(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with contextlib.nested(
-                self.pool(provider=LBAAS_PROVIDER_NAME),
-                mock.patch.object(self.driver.plugin._core_plugin,
-                                  'delete_port'),
-                mock.patch.object(self.driver.plugin._core_plugin,
-                                  'get_ports'),
-                mock.patch.object(self.driver.plugin,
-                                  'get_pools'),
-                mock.patch.object(self.driver.plugin,
-                                  '_delete_db_pool')
-            ) as (pool, mock_delete_port, mock_get_ports, mock_get_pools,
-                  mock_delete_db_pool):
-                mock_delete_port.return_value = None
-                mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}]
-                mock_get_pools.return_value = []
-                mock_delete_db_pool.return_value = None
-                #reset the remove_resource() mock
-                self.remove_resource_mock.reset_mock()
-                # execute the method under test.
-                self.driver.delete_pool(self.context, pool['pool'])
-                pool_resource_path = "%s/%s" % (
-                    (netscaler_driver.POOLS_RESOURCE,
-                     pool['pool']['id']))
-                # Assert that delete_resource was called
-                # once with expected params.
-                (self.remove_resource_mock
-                     .assert_called_once_with(None, pool_resource_path))
-
-    def test_create_member(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin,
-                              'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                testmember = self._build_testmember_contents(pool['pool'])
-                expectedmember = self._build_expectedmember_contents(
-                    testmember)
-                # mock the plugin's update_status()
-                self._mock_update_status()
-                #reset the create_resource() mock
-                self.create_resource_mock.reset_mock()
-                # execute the method under test.
-                self.driver.create_member(self.context, testmember)
-                # First, assert that create_resource was called once
-                # with expected params.
-                (self.create_resource_mock
-                     .assert_called_once_with(
-                         None,
-                         netscaler_driver.POOLMEMBERS_RESOURCE,
-                         netscaler_driver.POOLMEMBER_RESOURCE,
-                         expectedmember))
-                #Finally, assert that the member object is now ACTIVE
-                self.mock_update_status_obj.assert_called_once_with(
-                    mock.ANY,
-                    loadbalancer_db.Member,
-                    expectedmember['id'],
-                    constants.ACTIVE)
-
-    def test_update_member(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with self.member(pool_id=pool['pool']['id']) as member:
-                    updatedmember = (self._build_updated_testmember_contents(
-                        member['member']))
-                    expectedmember = (self
-                                      ._build_updated_expectedmember_contents(
-                                          updatedmember))
-                    # mock the plugin's update_status()
-                    self._mock_update_status()
-                    # reset the update_resource() mock
-                    self.update_resource_mock.reset_mock()
-                    # execute the method under test
-                    self.driver.update_member(self.context,
-                                              member['member'],
-                                              updatedmember)
-                    member_resource_path = "%s/%s" % (
-                        (netscaler_driver.POOLMEMBERS_RESOURCE,
-                         member['member']['id']))
-                    # First, assert that update_resource was called once
-                    # with expected params.
-                    (self.update_resource_mock
-                         .assert_called_once_with(
-                             None,
-                             member_resource_path,
-                             netscaler_driver.POOLMEMBER_RESOURCE,
-                             expectedmember))
-                    #Finally, assert that the member object is now ACTIVE
-                    self.mock_update_status_obj.assert_called_once_with(
-                        mock.ANY,
-                        loadbalancer_db.Member,
-                        member['member']['id'],
-                        constants.ACTIVE)
-
-    def test_delete_member(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with contextlib.nested(
-                    self.member(pool_id=pool['pool']['id']),
-                    mock.patch.object(self.driver.plugin, '_delete_db_member')
-                ) as (member, mock_delete_db_member):
-                    mock_delete_db_member.return_value = None
-                    # reset the remove_resource() mock
-                    self.remove_resource_mock.reset_mock()
-                    # execute the method under test
-                    self.driver.delete_member(self.context,
-                                              member['member'])
-                    member_resource_path = "%s/%s" % (
-                        (netscaler_driver.POOLMEMBERS_RESOURCE,
-                         member['member']['id']))
-                    # Assert that delete_resource was called once
-                    # with expected params.
-                    (self.remove_resource_mock
-                         .assert_called_once_with(None, member_resource_path))
-
-    def test_create_pool_health_monitor(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                testhealthmonitor = self._build_testhealthmonitor_contents(
-                    pool['pool'])
-                expectedhealthmonitor = (
-                    self._build_expectedhealthmonitor_contents(
-                        testhealthmonitor))
-                with mock.patch.object(self.driver.plugin,
-                                       'update_pool_health_monitor') as mhm:
-                    # reset the create_resource() mock
-                    self.create_resource_mock.reset_mock()
-                    # execute the method under test.
-                    self.driver.create_pool_health_monitor(self.context,
-                                                           testhealthmonitor,
-                                                           pool['pool']['id'])
-                    # First, assert that create_resource was called once
-                    # with expected params.
-                    resource_path = "%s/%s/%s" % (
-                        netscaler_driver.POOLS_RESOURCE,
-                        pool['pool']['id'],
-                        netscaler_driver.MONITORS_RESOURCE)
-                    (self.create_resource_mock
-                         .assert_called_once_with(
-                             None,
-                             resource_path,
-                             netscaler_driver.MONITOR_RESOURCE,
-                             expectedhealthmonitor))
-                    # Finally, assert that the healthmonitor object is
-                    # now ACTIVE.
-                    (mhm.assert_called_once_with(
-                        mock.ANY,
-                        expectedhealthmonitor['id'],
-                        pool['pool']['id'],
-                        constants.ACTIVE, ""))
-
-    def test_update_pool_health_monitor(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with self.health_monitor(
-                    pool_id=pool['pool']['id']
-                ) as (health_monitor):
-                    updatedhealthmonitor = (
-                        self._build_updated_testhealthmonitor_contents(
-                            health_monitor['health_monitor']))
-                    expectedhealthmonitor = (
-                        self._build_updated_expectedhealthmonitor_contents(
-                            updatedhealthmonitor))
-                    with mock.patch.object(self.driver.plugin,
-                                           'update_pool_health_monitor')as mhm:
-                        # reset the update_resource() mock
-                        self.update_resource_mock.reset_mock()
-                        # execute the method under test.
-                        self.driver.update_pool_health_monitor(
-                            self.context,
-                            health_monitor['health_monitor'],
-                            updatedhealthmonitor,
-                            pool['pool']['id'])
-                        monitor_resource_path = "%s/%s" % (
-                            (netscaler_driver.MONITORS_RESOURCE,
-                             health_monitor['health_monitor']['id']))
-                        # First, assert that update_resource was called once
-                        # with expected params.
-                        self.update_resource_mock.assert_called_once_with(
-                            None,
-                            monitor_resource_path,
-                            netscaler_driver.MONITOR_RESOURCE,
-                            expectedhealthmonitor)
-                        #Finally, assert that the member object is now ACTIVE
-                        (mhm.assert_called_once_with(
-                            mock.ANY,
-                            health_monitor['health_monitor']['id'],
-                            pool['pool']['id'],
-                            constants.ACTIVE, ""))
-
-    def test_delete_pool_health_monitor(self):
-        with contextlib.nested(
-            self.subnet(),
-            mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
-        ) as (subnet, mock_get_subnet):
-            mock_get_subnet.return_value = subnet['subnet']
-            with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
-                with contextlib.nested(
-                    self.health_monitor(pool_id=pool['pool']['id']),
-                    mock.patch.object(self.driver.plugin,
-                                      '_delete_db_pool_health_monitor')
-                ) as (health_monitor, mock_delete_db_monitor):
-                    mock_delete_db_monitor.return_value = None
-                    # reset the remove_resource() mock
-                    self.remove_resource_mock.reset_mock()
-                    # execute the method under test.
-                    self.driver.delete_pool_health_monitor(
-                        self.context,
-                        health_monitor['health_monitor'],
-                        pool['pool']['id'])
-                    monitor_resource_path = "%s/%s/%s/%s" % (
-                        netscaler_driver.POOLS_RESOURCE,
-                        pool['pool']['id'],
-                        netscaler_driver.MONITORS_RESOURCE,
-                        health_monitor['health_monitor']['id'])
-                    # Assert that delete_resource was called once
-                    # with expected params.
-                    self.remove_resource_mock.assert_called_once_with(
-                        None,
-                        monitor_resource_path)
-
-    def _build_testvip_contents(self, subnet, pool):
-        vip_obj = dict(id=TESTVIP_ID,
-                       name='testvip',
-                       description='a test vip',
-                       tenant_id=self._tenant_id,
-                       subnet_id=subnet['id'],
-                       address=TESTVIP_IP,
-                       port_id=TESTVIP_PORT_ID,
-                       pool_id=pool['id'],
-                       protocol='HTTP',
-                       protocol_port=80,
-                       connection_limit=1000,
-                       admin_state_up=True,
-                       status='PENDING_CREATE',
-                       status_description='')
-        return vip_obj
-
-    def _build_expectedvip_contents(self, testvip, subnet):
-        expectedvip = dict(id=testvip['id'],
-                           name=testvip['name'],
-                           description=testvip['description'],
-                           tenant_id=testvip['tenant_id'],
-                           subnet_id=testvip['subnet_id'],
-                           address=testvip['address'],
-                           network_id=subnet['network_id'],
-                           port_id=testvip['port_id'],
-                           pool_id=testvip['pool_id'],
-                           protocol=testvip['protocol'],
-                           protocol_port=testvip['protocol_port'],
-                           connection_limit=testvip['connection_limit'],
-                           admin_state_up=testvip['admin_state_up'])
-        return expectedvip
-
-    def _build_updated_testvip_contents(self, testvip, subnet, pool):
-        #update some updateable fields of the vip
-        testvip['name'] = 'udpated testvip'
-        testvip['description'] = 'An updated version of test vip'
-        testvip['connection_limit'] = 2000
-        return testvip
-
-    def _build_updated_expectedvip_contents(self, testvip, subnet, pool):
-        expectedvip = dict(name=testvip['name'],
-                           description=testvip['description'],
-                           connection_limit=testvip['connection_limit'],
-                           admin_state_up=testvip['admin_state_up'],
-                           pool_id=testvip['pool_id'])
-        return expectedvip
-
-    def _build_testpool_contents(self, subnet):
-        pool_obj = dict(id=TESTPOOL_ID,
-                        name='testpool',
-                        description='a test pool',
-                        tenant_id=self._tenant_id,
-                        subnet_id=subnet['id'],
-                        protocol='HTTP',
-                        vip_id=None,
-                        admin_state_up=True,
-                        lb_method='ROUND_ROBIN',
-                        status='PENDING_CREATE',
-                        status_description='',
-                        members=[],
-                        health_monitors=[],
-                        health_monitors_status=None,
-                        provider=LBAAS_PROVIDER_NAME)
-        return pool_obj
-
-    def _build_expectedpool_contents(self, testpool, subnet):
-        expectedpool = dict(id=testpool['id'],
-                            name=testpool['name'],
-                            description=testpool['description'],
-                            tenant_id=testpool['tenant_id'],
-                            subnet_id=testpool['subnet_id'],
-                            network_id=subnet['network_id'],
-                            protocol=testpool['protocol'],
-                            vip_id=testpool['vip_id'],
-                            lb_method=testpool['lb_method'],
-                            snat_ip=TESTPOOL_SNATIP_ADDRESS,
-                            port_id=TESTPOOL_PORT_ID,
-                            admin_state_up=testpool['admin_state_up'])
-        return expectedpool
-
-    def _build_updated_testpool_contents(self, testpool, subnet):
-        updated_pool = dict(testpool.items())
-        updated_pool['name'] = 'udpated testpool'
-        updated_pool['description'] = 'An updated version of test pool'
-        updated_pool['lb_method'] = 'LEAST_CONNECTIONS'
-        updated_pool['admin_state_up'] = True
-        updated_pool['provider'] = LBAAS_PROVIDER_NAME
-        updated_pool['status'] = 'PENDING_UPDATE'
-        updated_pool['status_description'] = ''
-        updated_pool['members'] = []
-        updated_pool["health_monitors"] = []
-        updated_pool["health_monitors_status"] = None
-        return updated_pool
-
-    def _build_updated_expectedpool_contents(self, testpool, subnet):
-        expectedpool = dict(name=testpool['name'],
-                            description=testpool['description'],
-                            lb_method=testpool['lb_method'],
-                            admin_state_up=testpool['admin_state_up'])
-        return expectedpool
-
-    def _build_testmember_contents(self, pool):
-        member_obj = dict(
-            id=TESTMEMBER_ID,
-            tenant_id=self._tenant_id,
-            pool_id=pool['id'],
-            address=TESTMEMBER_IP,
-            protocol_port=8080,
-            weight=2,
-            admin_state_up=True,
-            status='PENDING_CREATE',
-            status_description='')
-        return member_obj
-
-    def _build_expectedmember_contents(self, testmember):
-        expectedmember = dict(
-            id=testmember['id'],
-            tenant_id=testmember['tenant_id'],
-            pool_id=testmember['pool_id'],
-            address=testmember['address'],
-            protocol_port=testmember['protocol_port'],
-            weight=testmember['weight'],
-            admin_state_up=testmember['admin_state_up'])
-        return expectedmember
-
-    def _build_updated_testmember_contents(self, testmember):
-        updated_member = dict(testmember.items())
-        updated_member.update(
-            weight=3,
-            admin_state_up=True,
-            status='PENDING_CREATE',
-            status_description=''
-        )
-        return updated_member
-
-    def _build_updated_expectedmember_contents(self, testmember):
-        expectedmember = dict(weight=testmember['weight'],
-                              pool_id=testmember['pool_id'],
-                              admin_state_up=testmember['admin_state_up'])
-        return expectedmember
-
-    def _build_testhealthmonitor_contents(self, pool):
-        monitor_obj = dict(
-            id=TESTMONITOR_ID,
-            tenant_id=self._tenant_id,
-            type='TCP',
-            delay=10,
-            timeout=5,
-            max_retries=3,
-            admin_state_up=True,
-            pools=[])
-        pool_obj = dict(status='PENDING_CREATE',
-                        status_description=None,
-                        pool_id=pool['id'])
-        monitor_obj['pools'].append(pool_obj)
-        return monitor_obj
-
-    def _build_expectedhealthmonitor_contents(self, testhealthmonitor):
-        expectedmonitor = dict(id=testhealthmonitor['id'],
-                               tenant_id=testhealthmonitor['tenant_id'],
-                               type=testhealthmonitor['type'],
-                               delay=testhealthmonitor['delay'],
-                               timeout=testhealthmonitor['timeout'],
-                               max_retries=testhealthmonitor['max_retries'],
-                               admin_state_up=(
-                                   testhealthmonitor['admin_state_up']))
-        return expectedmonitor
-
-    def _build_updated_testhealthmonitor_contents(self, testmonitor):
-        updated_monitor = dict(testmonitor.items())
-        updated_monitor.update(
-            delay=30,
-            timeout=3,
-            max_retries=5,
-            admin_state_up=True
-        )
-        return updated_monitor
-
-    def _build_updated_expectedhealthmonitor_contents(self, testmonitor):
-        expectedmonitor = dict(delay=testmonitor['delay'],
-                               timeout=testmonitor['timeout'],
-                               max_retries=testmonitor['max_retries'],
-                               admin_state_up=testmonitor['admin_state_up'])
-        return expectedmonitor
-
-    def _mock_update_status(self):
-        #patch the plugin's update_status() method with a mock object
-        self.mock_update_status_patcher = mock.patch.object(
-            self.driver.plugin,
-            'update_status')
-        self.mock_update_status_obj = self.mock_update_status_patcher.start()
-
-
-def mock_create_resource_func(*args, **kwargs):
-    return 201, {}
-
-
-def mock_update_resource_func(*args, **kwargs):
-    return 202, {}
-
-
-def mock_retrieve_resource_func(*args, **kwargs):
-    return 200, {}
-
-
-def mock_remove_resource_func(*args, **kwargs):
-    return 200, {}
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py
deleted file mode 100644 (file)
index e3bf644..0000000
+++ /dev/null
@@ -1,997 +0,0 @@
-# Copyright 2013 Radware LTD.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-
-import contextlib
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from six.moves import queue as Queue
-
-from neutron.api.v2 import attributes
-from neutron import context
-from neutron.extensions import loadbalancer
-from neutron import manager
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers.radware import driver
-from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-
-GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
-SERVER_DOWN_CODES = (-1, 301, 307)
-
-
-class QueueMock(Queue.Queue):
-    def __init__(self, completion_handler):
-        self.completion_handler = completion_handler
-        super(QueueMock, self).__init__()
-
-    def put_nowait(self, oper):
-        self.completion_handler(oper)
-
-
-def _recover_function_mock(action, resource, data, headers, binary=False):
-    pass
-
-
-def rest_call_function_mock(action, resource, data, headers, binary=False):
-    if rest_call_function_mock.RESPOND_WITH_ERROR:
-        return 400, 'error_status', 'error_description', None
-    if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
-        val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
-        return val, 'error_status', 'error_description', None
-    if action == 'GET':
-        return _get_handler(resource)
-    elif action == 'DELETE':
-        return _delete_handler(resource)
-    elif action == 'POST':
-        return _post_handler(resource, binary)
-    else:
-        return 0, None, None, None
-
-
-def _get_handler(resource):
-    if resource == GET_200[2]:
-        if rest_call_function_mock.TEMPLATES_MISSING:
-            data = jsonutils.loads('[]')
-        else:
-            data = jsonutils.loads(
-                '[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
-            )
-        return 200, '', '', data
-
-    if resource in GET_200:
-        return 200, '', '', ''
-    else:
-        data = jsonutils.loads('{"complete":"True", "success": "True"}')
-        return 202, '', '', data
-
-
-def _delete_handler(resource):
-    return 404, '', '', {'message': 'Not Found'}
-
-
-def _post_handler(resource, binary):
-    if re.search(r'/api/workflow/.+/action/.+', resource):
-        data = jsonutils.loads('{"uri":"some_uri"}')
-        return 202, '', '', data
-    elif re.search(r'/api/service\?name=.+', resource):
-        data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}')
-        return 201, '', '', data
-    elif binary:
-        return 201, '', '', ''
-    else:
-        return 202, '', '', ''
-
-RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.'
-                    'loadbalancer.drivers.radware.driver.'
-                    'LoadBalancerDriver:default')
-
-
-class TestLoadBalancerPluginBase(
-    test_db_loadbalancer.LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        super(TestLoadBalancerPluginBase, self).setUp(
-            lbaas_provider=RADWARE_PROVIDER)
-
-        loaded_plugins = manager.NeutronManager().get_service_plugins()
-        self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
-
-
-class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
-    def setUp(self):
-        super(TestLoadBalancerPlugin, self).setUp()
-
-        rest_call_function_mock.__dict__.update(
-            {'RESPOND_WITH_ERROR': False})
-        rest_call_function_mock.__dict__.update(
-            {'TEMPLATES_MISSING': False})
-        rest_call_function_mock.__dict__.update(
-            {'RESPOND_WITH_SERVER_DOWN': 200})
-
-        self.operation_completer_start_mock = mock.Mock(
-            return_value=None)
-        self.operation_completer_join_mock = mock.Mock(
-            return_value=None)
-        self.driver_rest_call_mock = mock.Mock(
-            side_effect=rest_call_function_mock)
-        self.flip_servers_mock = mock.Mock(
-            return_value=None)
-        self.recover_mock = mock.Mock(
-            side_effect=_recover_function_mock)
-
-        radware_driver = self.plugin_instance.drivers['radware']
-        radware_driver.completion_handler.start = (
-            self.operation_completer_start_mock)
-        radware_driver.completion_handler.join = (
-            self.operation_completer_join_mock)
-        self.orig_call = radware_driver.rest_client.call
-        self.orig__call = radware_driver.rest_client._call
-        radware_driver.rest_client.call = self.driver_rest_call_mock
-        radware_driver.rest_client._call = self.driver_rest_call_mock
-        radware_driver.rest_client._flip_servers = self.flip_servers_mock
-        radware_driver.rest_client._recover = self.recover_mock
-        radware_driver.completion_handler.rest_client.call = (
-            self.driver_rest_call_mock)
-
-        radware_driver.queue = QueueMock(
-            radware_driver.completion_handler.handle_operation_completion)
-
-        self.addCleanup(radware_driver.completion_handler.join)
-
-    def test_get_pip(self):
-        """Call _get_pip twice and verify that a Port is created once."""
-        port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10',
-                                    'ip_address': '11.11.11.11'}]}
-        port_data = {
-            'tenant_id': 'tenant_id',
-            'name': 'port_name',
-            'network_id': 'network_id',
-            'mac_address': attributes.ATTR_NOT_SPECIFIED,
-            'admin_state_up': False,
-            'device_id': '',
-            'device_owner': 'neutron:' + constants.LOADBALANCER,
-            'fixed_ips': [{'subnet_id': '10.10.10.10'}]
-        }
-        self.plugin_instance._core_plugin.get_ports = mock.Mock(
-            return_value=[])
-        self.plugin_instance._core_plugin.create_port = mock.Mock(
-            return_value=port_dict)
-        radware_driver = self.plugin_instance.drivers['radware']
-        radware_driver._get_pip(context.get_admin_context(),
-                                'tenant_id', 'port_name',
-                                'network_id', '10.10.10.10')
-        self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
-                mock.ANY, filters={'name': ['port_name']})
-        self.plugin_instance._core_plugin.create_port.assert_called_once_with(
-                mock.ANY, {'port': port_data})
-        self.plugin_instance._core_plugin.create_port.reset_mock()
-        self.plugin_instance._core_plugin.get_ports.reset_mock()
-        self.plugin_instance._core_plugin.get_ports.return_value = [port_dict]
-        radware_driver._get_pip(context.get_admin_context(),
-                                'tenant_id', 'port_name',
-                                'network_id', '10.10.10.10')
-        self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
-                mock.ANY, filters={'name': ['port_name']})
-        self.assertFalse(self.plugin_instance._core_plugin.create_port.called)
-
-    def test_rest_client_recover_was_called(self):
-        """Call the real REST client and verify _recover is called."""
-        radware_driver = self.plugin_instance.drivers['radware']
-        radware_driver.rest_client.call = self.orig_call
-        radware_driver.rest_client._call = self.orig__call
-        self.assertRaises(r_exc.RESTRequestFailure,
-                          radware_driver._verify_workflow_templates)
-        self.recover_mock.assert_called_once_with('GET',
-                                                  '/api/workflowTemplate',
-                                                  None, None, False)
-
-    def test_rest_client_flip_servers(self):
-        radware_driver = self.plugin_instance.drivers['radware']
-        server = radware_driver.rest_client.server
-        sec_server = radware_driver.rest_client.secondary_server
-        radware_driver.rest_client._flip_servers()
-        self.assertEqual(server,
-                         radware_driver.rest_client.secondary_server)
-        self.assertEqual(sec_server,
-                         radware_driver.rest_client.server)
-
-    def test_verify_workflow_templates_server_down(self):
-        """Test the rest call failure when backend is down."""
-        for value in SERVER_DOWN_CODES:
-            rest_call_function_mock.__dict__.update(
-                {'RESPOND_WITH_SERVER_DOWN': value})
-            self.assertRaises(r_exc.RESTRequestFailure,
-                              self.plugin_instance.drivers['radware'].
-                              _verify_workflow_templates)
-
-    def test_verify_workflow_templates(self):
-        """Test the rest call failure handling by Exception raising."""
-        rest_call_function_mock.__dict__.update(
-            {'TEMPLATES_MISSING': True})
-
-        self.assertRaises(r_exc.WorkflowMissing,
-                          self.plugin_instance.drivers['radware'].
-                          _verify_workflow_templates)
-
-    def test_create_vip_failure(self):
-        """Test the rest call failure handling by Exception raising."""
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                with self.pool(do_delete=False,
-                               provider='radware',
-                               subnet_id=subnet['subnet']['id']) as pool:
-                    vip_data = {
-                        'name': 'vip1',
-                        'subnet_id': subnet['subnet']['id'],
-                        'pool_id': pool['pool']['id'],
-                        'description': '',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'connection_limit': -1,
-                        'admin_state_up': True,
-                        'status': constants.PENDING_CREATE,
-                        'tenant_id': self._tenant_id,
-                        'session_persistence': ''
-                    }
-
-                    rest_call_function_mock.__dict__.update(
-                        {'RESPOND_WITH_ERROR': True})
-
-                    self.assertRaises(r_exc.RESTRequestFailure,
-                                      self.plugin_instance.create_vip,
-                                      context.get_admin_context(),
-                                      {'vip': vip_data})
-
-    def test_create_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           subnet_id=subnet['subnet']['id']) as pool:
-                vip_data = {
-                    'name': 'vip1',
-                    'subnet_id': subnet['subnet']['id'],
-                    'pool_id': pool['pool']['id'],
-                    'description': '',
-                    'protocol_port': 80,
-                    'protocol': 'HTTP',
-                    'connection_limit': -1,
-                    'admin_state_up': True,
-                    'status': constants.PENDING_CREATE,
-                    'tenant_id': self._tenant_id,
-                    'session_persistence': ''
-                }
-
-                vip = self.plugin_instance.create_vip(
-                    context.get_admin_context(), {'vip': vip_data})
-
-                # Test creation REST calls
-                calls = [
-                    mock.call('GET', u'/api/service/srv_' +
-                              subnet['subnet']['network_id'], None, None),
-                    mock.call('POST', u'/api/service?name=srv_' +
-                              subnet['subnet']['network_id'] + '&tenant=' +
-                              vip['tenant_id'], mock.ANY,
-                              driver.CREATE_SERVICE_HEADER),
-                    mock.call('GET', u'/api/workflow/l2_l3_' +
-                              subnet['subnet']['network_id'], None, None),
-                    mock.call('POST', '/api/workflow/l2_l3_' +
-                              subnet['subnet']['network_id'] +
-                              '/action/setup_l2_l3',
-                              mock.ANY, driver.TEMPLATE_HEADER),
-                    mock.call('POST', 'someuri',
-                              None, driver.PROVISION_HEADER),
-
-
-                    mock.call('POST', '/api/workflowTemplate/' +
-                              'openstack_l4' +
-                              '?name=' + pool['pool']['id'],
-                              mock.ANY,
-                              driver.TEMPLATE_HEADER),
-                    mock.call('POST', '/api/workflowTemplate/' +
-                              'openstack_l2_l3' +
-                              '?name=l2_l3_' + subnet['subnet']['network_id'],
-                              mock.ANY,
-                              driver.TEMPLATE_HEADER),
-
-                    mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
-                              '/action/BaseCreate',
-                              mock.ANY, driver.TEMPLATE_HEADER),
-                    mock.call('GET', '/api/workflow/' +
-                              pool['pool']['id'], None, None)
-                ]
-                self.driver_rest_call_mock.assert_has_calls(calls,
-                                                            any_order=True)
-
-                #Test DB
-                new_vip = self.plugin_instance.get_vip(
-                    context.get_admin_context(),
-                    vip['id']
-                )
-                self.assertEqual(new_vip['status'], constants.ACTIVE)
-
-                # Delete VIP
-                self.plugin_instance.delete_vip(
-                    context.get_admin_context(), vip['id'])
-
-                # Test deletion REST calls
-                calls = [
-                    mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
-                              None, None)
-                ]
-                self.driver_rest_call_mock.assert_has_calls(
-                    calls, any_order=True)
-
-    def test_create_vip_2_leg(self):
-        """Test creation of a VIP where Alteon VIP and PIP are different."""
-
-        with self.subnet(cidr='10.0.0.0/24') as subnet:
-            with self.subnet(cidr='10.0.1.0/24') as pool_sub:
-                with self.pool(provider='radware',
-                               subnet_id=pool_sub['subnet']['id']) as pool:
-                    vip_data = {
-                        'name': 'vip1',
-                        'subnet_id': subnet['subnet']['id'],
-                        'pool_id': pool['pool']['id'],
-                        'description': '',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'connection_limit': -1,
-                        'admin_state_up': True,
-                        'status': constants.PENDING_CREATE,
-                        'tenant_id': self._tenant_id,
-                        'session_persistence': ''
-                    }
-
-                    vip = self.plugin_instance.create_vip(
-                        context.get_admin_context(), {'vip': vip_data})
-                    name_suffix = '%s_%s' % (subnet['subnet']['network_id'],
-                                             pool_sub['subnet']['network_id'])
-                    # Test creation REST calls
-                    calls = [
-                        mock.call('GET', '/api/workflowTemplate', None, None),
-                        mock.call('GET', '/api/service/srv_' + name_suffix,
-                                  None, None),
-                        mock.call('POST', '/api/service?name=srv_' +
-                                  name_suffix + '&tenant=' + vip['tenant_id'],
-                                  mock.ANY, driver.CREATE_SERVICE_HEADER),
-                        mock.call('POST', 'someuri',
-                                  None, driver.PROVISION_HEADER),
-                        mock.call('GET', '/api/workflow/l2_l3_' + name_suffix,
-                                  None, None),
-                        mock.call('POST', '/api/workflowTemplate/' +
-                                  'openstack_l2_l3' +
-                                  '?name=l2_l3_' + name_suffix,
-                                  mock.ANY,
-                                  driver.TEMPLATE_HEADER),
-                        mock.call('POST', '/api/workflow/l2_l3_' +
-                                  name_suffix + '/action/setup_l2_l3',
-                                  mock.ANY, driver.TEMPLATE_HEADER),
-                        mock.call('GET', '/api/workflow/' +
-                                  pool['pool']['id'], None, None),
-                        mock.call('POST', '/api/workflowTemplate/' +
-                                  'openstack_l4' +
-                                  '?name=' + pool['pool']['id'],
-                                  mock.ANY,
-                                  driver.TEMPLATE_HEADER),
-                        mock.call('POST', '/api/workflow/' +
-                                  pool['pool']['id'] + '/action/BaseCreate',
-                                  mock.ANY, driver.TEMPLATE_HEADER)
-                    ]
-                    self.driver_rest_call_mock.assert_has_calls(calls)
-                    #Test DB
-                    new_vip = self.plugin_instance.get_vip(
-                        context.get_admin_context(),
-                        vip['id']
-                    )
-                    self.assertEqual(new_vip['status'], constants.ACTIVE)
-
-                    # Test that PIP neutron port was created
-                    pip_port_filter = {
-                        'name': ['pip_' + vip['id']],
-                    }
-                    plugin = manager.NeutronManager.get_plugin()
-                    num_ports = plugin.get_ports_count(
-                        context.get_admin_context(), filters=pip_port_filter)
-                    self.assertTrue(num_ports > 0)
-
-                    # Delete VIP
-                    self.plugin_instance.delete_vip(
-                        context.get_admin_context(), vip['id'])
-
-                    # Test deletion REST calls
-                    calls = [
-                        mock.call('DELETE', u'/api/workflow/' +
-                                  pool['pool']['id'], None, None)
-                    ]
-                    self.driver_rest_call_mock.assert_has_calls(calls)
-
-    def test_update_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           do_delete=False,
-                           subnet_id=subnet['subnet']['id']) as pool:
-                vip_data = {
-                    'name': 'vip1',
-                    'subnet_id': subnet['subnet']['id'],
-                    'pool_id': pool['pool']['id'],
-                    'description': '',
-                    'protocol_port': 80,
-                    'protocol': 'HTTP',
-                    'connection_limit': -1,
-                    'admin_state_up': True,
-                    'status': constants.PENDING_CREATE,
-                    'tenant_id': self._tenant_id,
-                    'session_persistence': ''
-                }
-
-                vip = self.plugin_instance.create_vip(
-                    context.get_admin_context(), {'vip': vip_data})
-
-                vip_data['status'] = constants.PENDING_UPDATE
-                self.plugin_instance.update_vip(
-                    context.get_admin_context(),
-                    vip['id'], {'vip': vip_data})
-
-                # Test REST calls
-                calls = [
-                    mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
-                              '/action/BaseCreate',
-                              mock.ANY, driver.TEMPLATE_HEADER),
-                ]
-                self.driver_rest_call_mock.assert_has_calls(
-                    calls, any_order=True)
-
-                updated_vip = self.plugin_instance.get_vip(
-                    context.get_admin_context(), vip['id'])
-                self.assertEqual(updated_vip['status'], constants.ACTIVE)
-
-                # delete VIP
-                self.plugin_instance.delete_vip(
-                    context.get_admin_context(), vip['id'])
-
-    def test_update_vip_2_leg(self):
-        """Test update of a VIP where Alteon VIP and PIP are different."""
-
-        with self.subnet(cidr='10.0.0.0/24') as subnet:
-            with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
-                with self.pool(provider='radware',
-                               subnet_id=pool_subnet['subnet']['id']) as pool:
-                    vip_data = {
-                        'name': 'vip1',
-                        'subnet_id': subnet['subnet']['id'],
-                        'pool_id': pool['pool']['id'],
-                        'description': '',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'connection_limit': -1,
-                        'admin_state_up': True,
-                        'status': constants.PENDING_CREATE,
-                        'tenant_id': self._tenant_id,
-                        'session_persistence': ''
-                    }
-
-                    vip = self.plugin_instance.create_vip(
-                        context.get_admin_context(), {'vip': vip_data})
-
-                    self.plugin_instance.update_vip(
-                        context.get_admin_context(),
-                        vip['id'], {'vip': vip_data})
-
-                    # Test REST calls
-                    calls = [
-                        mock.call('POST', '/api/workflow/' +
-                                  pool['pool']['id'] + '/action/BaseCreate',
-                                  mock.ANY, driver.TEMPLATE_HEADER),
-                    ]
-                    self.driver_rest_call_mock.assert_has_calls(calls)
-
-                    updated_vip = self.plugin_instance.get_vip(
-                        context.get_admin_context(), vip['id'])
-                    self.assertEqual(updated_vip['status'], constants.ACTIVE)
-
-                    # delete VIP
-                    self.plugin_instance.delete_vip(
-                        context.get_admin_context(), vip['id'])
-
-    def test_delete_vip_failure(self):
-        plugin = self.plugin_instance
-
-        with self.network() as network:
-            with self.subnet(network=network) as subnet:
-                with self.pool(do_delete=False,
-                               provider='radware',
-                               subnet_id=subnet['subnet']['id']) as pool:
-                    with contextlib.nested(
-                        self.member(pool_id=pool['pool']['id'],
-                                    do_delete=False),
-                        self.member(pool_id=pool['pool']['id'],
-                                    address='192.168.1.101',
-                                    do_delete=False),
-                        self.health_monitor(do_delete=False),
-                        self.vip(pool=pool, subnet=subnet, do_delete=False)
-                    ) as (mem1, mem2, hm, vip):
-
-                        plugin.create_pool_health_monitor(
-                            context.get_admin_context(), hm, pool['pool']['id']
-                        )
-
-                        rest_call_function_mock.__dict__.update(
-                            {'RESPOND_WITH_ERROR': True})
-
-                        plugin.delete_vip(
-                            context.get_admin_context(), vip['vip']['id'])
-
-                        u_vip = plugin.get_vip(
-                            context.get_admin_context(), vip['vip']['id'])
-                        u_pool = plugin.get_pool(
-                            context.get_admin_context(), pool['pool']['id'])
-                        u_mem1 = plugin.get_member(
-                            context.get_admin_context(), mem1['member']['id'])
-                        u_mem2 = plugin.get_member(
-                            context.get_admin_context(), mem2['member']['id'])
-                        u_phm = plugin.get_pool_health_monitor(
-                            context.get_admin_context(),
-                            hm['health_monitor']['id'], pool['pool']['id'])
-
-                        self.assertEqual(u_vip['status'], constants.ERROR)
-                        self.assertEqual(u_pool['status'], constants.ACTIVE)
-                        self.assertEqual(u_mem1['status'], constants.ACTIVE)
-                        self.assertEqual(u_mem2['status'], constants.ACTIVE)
-                        self.assertEqual(u_phm['status'], constants.ACTIVE)
-
-    def test_delete_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           do_delete=False,
-                           subnet_id=subnet['subnet']['id']) as pool:
-                vip_data = {
-                    'name': 'vip1',
-                    'subnet_id': subnet['subnet']['id'],
-                    'pool_id': pool['pool']['id'],
-                    'description': '',
-                    'protocol_port': 80,
-                    'protocol': 'HTTP',
-                    'connection_limit': -1,
-                    'admin_state_up': True,
-                    'status': constants.PENDING_CREATE,
-                    'tenant_id': self._tenant_id,
-                    'session_persistence': ''
-                }
-
-                vip = self.plugin_instance.create_vip(
-                    context.get_admin_context(), {'vip': vip_data})
-
-                self.plugin_instance.delete_vip(
-                    context.get_admin_context(), vip['id'])
-
-                calls = [
-                    mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
-                              None, None)
-                ]
-                self.driver_rest_call_mock.assert_has_calls(
-                    calls, any_order=True)
-
-                self.assertRaises(loadbalancer.VipNotFound,
-                                  self.plugin_instance.get_vip,
-                                  context.get_admin_context(), vip['id'])
-
-    def test_delete_vip_2_leg(self):
-        """Test deletion of a VIP where Alteon VIP and PIP are different."""
-
-        self.driver_rest_call_mock.reset_mock()
-        with self.subnet(cidr='10.0.0.0/24') as subnet:
-            with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
-                with self.pool(provider='radware',
-                               do_delete=False,
-                               subnet_id=pool_subnet['subnet']['id']) as pool:
-                    vip_data = {
-                        'name': 'vip1',
-                        'subnet_id': subnet['subnet']['id'],
-                        'pool_id': pool['pool']['id'],
-                        'description': '',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'connection_limit': -1,
-                        'admin_state_up': True,
-                        'status': constants.PENDING_CREATE,
-                        'tenant_id': self._tenant_id,
-                        'session_persistence': ''
-                    }
-
-                    vip = self.plugin_instance.create_vip(
-                        context.get_admin_context(), {'vip': vip_data})
-
-                    self.plugin_instance.delete_vip(
-                        context.get_admin_context(), vip['id'])
-
-                    calls = [
-                        mock.call('DELETE', '/api/workflow/' +
-                                  pool['pool']['id'], None, None)
-                    ]
-                    self.driver_rest_call_mock.assert_has_calls(calls)
-
-                    # Test that PIP neutron port was deleted
-                    pip_port_filter = {
-                        'name': ['pip_' + vip['id']],
-                    }
-                    plugin = manager.NeutronManager.get_plugin()
-                    num_ports = plugin.get_ports_count(
-                        context.get_admin_context(), filters=pip_port_filter)
-                    self.assertTrue(num_ports == 0)
-
-                    self.assertRaises(loadbalancer.VipNotFound,
-                                      self.plugin_instance.get_vip,
-                                      context.get_admin_context(), vip['id'])
-
-    def test_update_pool(self):
-        with self.subnet():
-            with self.pool() as pool:
-                del pool['pool']['provider']
-                del pool['pool']['status']
-                self.plugin_instance.update_pool(
-                    context.get_admin_context(),
-                    pool['pool']['id'], pool)
-                pool_db = self.plugin_instance.get_pool(
-                    context.get_admin_context(), pool['pool']['id'])
-                self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
-
-    def test_delete_pool_with_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           do_delete=False,
-                           subnet_id=subnet['subnet']['id']) as pool:
-                with self.vip(pool=pool, subnet=subnet):
-                    self.assertRaises(loadbalancer.PoolInUse,
-                                      self.plugin_instance.delete_pool,
-                                      context.get_admin_context(),
-                                      pool['pool']['id'])
-
-    def test_create_member_with_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           subnet_id=subnet['subnet']['id']) as p:
-                with self.vip(pool=p, subnet=subnet):
-                    with self.member(pool_id=p['pool']['id']):
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' + p['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            ),
-                            mock.call(
-                                'POST', '/api/workflow/' + p['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-    def test_create_member_on_different_subnets(self):
-        with contextlib.nested(
-            self.subnet(),
-            self.subnet(cidr='20.0.0.0/24'),
-            self.subnet(cidr='30.0.0.0/24')
-        ) as (vip_sub, pool_sub, member_sub):
-            with self.pool(provider='radware',
-                           subnet_id=pool_sub['subnet']['id']) as pool:
-                with contextlib.nested(
-                    self.port(subnet=vip_sub,
-                              fixed_ips=[{'ip_address': '10.0.0.2'}]),
-                    self.port(subnet=pool_sub,
-                              fixed_ips=[{'ip_address': '20.0.0.2'}]),
-                    self.port(subnet=member_sub,
-                              fixed_ips=[{'ip_address': '30.0.0.2'}])
-                ):
-                    with contextlib.nested(
-                        self.member(pool_id=pool['pool']['id'],
-                                    address='10.0.0.2'),
-                        self.member(pool_id=pool['pool']['id'],
-                                    address='20.0.0.2'),
-                        self.member(pool_id=pool['pool']['id'],
-                                    address='30.0.0.2')
-                    ) as (member_vip, member_pool, member_out):
-                        with self.vip(pool=pool, subnet=vip_sub):
-                            calls = [
-                                mock.call(
-                                    'POST', '/api/workflow/' +
-                                    pool['pool']['id'] +
-                                    '/action/BaseCreate',
-                                    mock.ANY, driver.TEMPLATE_HEADER
-                                )
-                            ]
-                            self.driver_rest_call_mock.assert_has_calls(
-                                calls, any_order=True)
-
-                            mock_calls = self.driver_rest_call_mock.mock_calls
-                            params = mock_calls[-2][1][2]['parameters']
-                            member_subnet_array = params['member_subnet_array']
-                            member_mask_array = params['member_mask_array']
-                            member_gw_array = params['member_gw_array']
-                            self.assertEqual(member_subnet_array,
-                                             ['10.0.0.0',
-                                              '255.255.255.255',
-                                              '30.0.0.0'])
-                            self.assertEqual(member_mask_array,
-                                             ['255.255.255.0',
-                                              '255.255.255.255',
-                                              '255.255.255.0'])
-                            self.assertEqual(
-                                member_gw_array,
-                                [pool_sub['subnet']['gateway_ip'],
-                                 '255.255.255.255',
-                                 pool_sub['subnet']['gateway_ip']])
-
-    def test_create_member_on_different_subnet_no_port(self):
-        with contextlib.nested(
-            self.subnet(),
-            self.subnet(cidr='20.0.0.0/24'),
-            self.subnet(cidr='30.0.0.0/24')
-        ) as (vip_sub, pool_sub, member_sub):
-            with self.pool(provider='radware',
-                           subnet_id=pool_sub['subnet']['id']) as pool:
-                with self.member(pool_id=pool['pool']['id'],
-                                 address='30.0.0.2'):
-                    with self.vip(pool=pool, subnet=vip_sub):
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' +
-                                pool['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-                        mock_calls = self.driver_rest_call_mock.mock_calls
-                        params = mock_calls[-2][1][2]['parameters']
-                        member_subnet_array = params['member_subnet_array']
-                        member_mask_array = params['member_mask_array']
-                        member_gw_array = params['member_gw_array']
-                        self.assertEqual(member_subnet_array,
-                                         ['30.0.0.2'])
-                        self.assertEqual(member_mask_array,
-                                         ['255.255.255.255'])
-                        self.assertEqual(member_gw_array,
-                                         [pool_sub['subnet']['gateway_ip']])
-
-    def test_create_member_on_different_subnet_multiple_ports(self):
-        cfg.CONF.set_override("allow_overlapping_ips", 'true')
-        with self.network() as other_net:
-            with contextlib.nested(
-                self.subnet(),
-                self.subnet(cidr='20.0.0.0/24'),
-                self.subnet(cidr='30.0.0.0/24'),
-                self.subnet(network=other_net, cidr='30.0.0.0/24')
-            ) as (vip_sub, pool_sub, member_sub1, member_sub2):
-                with self.pool(provider='radware',
-                               subnet_id=pool_sub['subnet']['id']) as pool:
-                    with contextlib.nested(
-                        self.port(subnet=member_sub1,
-                                  fixed_ips=[{'ip_address': '30.0.0.2'}]),
-                        self.port(subnet=member_sub2,
-                                  fixed_ips=[{'ip_address': '30.0.0.2'}])):
-                        with self.member(pool_id=pool['pool']['id'],
-                                         address='30.0.0.2'):
-                            with self.vip(pool=pool, subnet=vip_sub):
-                                calls = [
-                                    mock.call(
-                                        'POST', '/api/workflow/' +
-                                        pool['pool']['id'] +
-                                        '/action/BaseCreate',
-                                        mock.ANY, driver.TEMPLATE_HEADER
-                                    )
-                                ]
-                                self.driver_rest_call_mock.assert_has_calls(
-                                    calls, any_order=True)
-
-                                calls = self.driver_rest_call_mock.mock_calls
-                                params = calls[-2][1][2]['parameters']
-                                m_sub_array = params['member_subnet_array']
-                                m_mask_array = params['member_mask_array']
-                                m_gw_array = params['member_gw_array']
-                                self.assertEqual(m_sub_array,
-                                                 ['30.0.0.2'])
-                                self.assertEqual(m_mask_array,
-                                                 ['255.255.255.255'])
-                                self.assertEqual(
-                                    m_gw_array,
-                                    [pool_sub['subnet']['gateway_ip']])
-
-    def test_update_member_with_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           subnet_id=subnet['subnet']['id']) as p:
-                with self.member(pool_id=p['pool']['id']) as member:
-                    with self.vip(pool=p, subnet=subnet):
-                        self.plugin_instance.update_member(
-                            context.get_admin_context(),
-                            member['member']['id'], member
-                        )
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' + p['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            ),
-                            mock.call(
-                                'POST', '/api/workflow/' + p['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-                        updated_member = self.plugin_instance.get_member(
-                            context.get_admin_context(),
-                            member['member']['id']
-                        )
-
-                        updated_member = self.plugin_instance.get_member(
-                            context.get_admin_context(),
-                            member['member']['id']
-                        )
-                        self.assertEqual(updated_member['status'],
-                                         constants.ACTIVE)
-
-    def test_update_member_without_vip(self):
-        with self.subnet():
-            with self.pool(provider='radware') as pool:
-                with self.member(pool_id=pool['pool']['id']) as member:
-                    member['member']['status'] = constants.PENDING_UPDATE
-                    updated_member = self.plugin_instance.update_member(
-                        context.get_admin_context(),
-                        member['member']['id'], member
-                    )
-                    self.assertEqual(updated_member['status'],
-                                     constants.PENDING_UPDATE)
-
-    def test_delete_member_with_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(provider='radware',
-                           subnet_id=subnet['subnet']['id']) as p:
-                with self.member(pool_id=p['pool']['id'],
-                                 do_delete=False) as m:
-                    with self.vip(pool=p, subnet=subnet):
-
-                        # Reset mock and
-                        # wait for being sure the member
-                        # Changed status from PENDING-CREATE
-                        # to ACTIVE
-
-                        self.plugin_instance.delete_member(
-                            context.get_admin_context(),
-                            m['member']['id']
-                        )
-
-                        name, args, kwargs = (
-                            self.driver_rest_call_mock.mock_calls[-2]
-                        )
-                        deletion_post_graph = str(args[2])
-
-                        self.assertTrue(re.search(
-                            r'.*\'member_address_array\': \[\].*',
-                            deletion_post_graph
-                        ))
-
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' + p['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-                        self.assertRaises(loadbalancer.MemberNotFound,
-                                          self.plugin_instance.get_member,
-                                          context.get_admin_context(),
-                                          m['member']['id'])
-
-    def test_delete_member_without_vip(self):
-        with self.subnet():
-            with self.pool(provider='radware') as p:
-                with self.member(pool_id=p['pool']['id'],
-                                 do_delete=False) as m:
-                    self.plugin_instance.delete_member(
-                        context.get_admin_context(), m['member']['id']
-                    )
-                    self.assertRaises(loadbalancer.MemberNotFound,
-                                      self.plugin_instance.get_member,
-                                      context.get_admin_context(),
-                                      m['member']['id'])
-
-    def test_create_hm_with_vip(self):
-        with self.subnet() as subnet:
-            with self.health_monitor() as hm:
-                with self.pool(provider='radware',
-                               subnet_id=subnet['subnet']['id']) as pool:
-                    with self.vip(pool=pool, subnet=subnet):
-
-                        self.plugin_instance.create_pool_health_monitor(
-                            context.get_admin_context(),
-                            hm, pool['pool']['id']
-                        )
-
-                        # Test REST calls
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' + pool['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            ),
-                            mock.call(
-                                'POST', '/api/workflow/' + pool['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-                        phm = self.plugin_instance.get_pool_health_monitor(
-                            context.get_admin_context(),
-                            hm['health_monitor']['id'], pool['pool']['id']
-                        )
-                        self.assertEqual(phm['status'], constants.ACTIVE)
-
-    def test_delete_pool_hm_with_vip(self):
-        with self.subnet() as subnet:
-            with self.health_monitor(do_delete=False) as hm:
-                with self.pool(provider='radware',
-                               subnet_id=subnet['subnet']['id']) as pool:
-                    with self.vip(pool=pool, subnet=subnet):
-                        self.plugin_instance.create_pool_health_monitor(
-                            context.get_admin_context(),
-                            hm, pool['pool']['id']
-                        )
-
-                        self.plugin_instance.delete_pool_health_monitor(
-                            context.get_admin_context(),
-                            hm['health_monitor']['id'],
-                            pool['pool']['id']
-                        )
-
-                        name, args, kwargs = (
-                            self.driver_rest_call_mock.mock_calls[-2]
-                        )
-                        deletion_post_graph = str(args[2])
-
-                        self.assertTrue(re.search(
-                            r'.*\'hm_uuid_array\': \[\].*',
-                            deletion_post_graph
-                        ))
-
-                        calls = [
-                            mock.call(
-                                'POST', '/api/workflow/' + pool['pool']['id'] +
-                                '/action/BaseCreate',
-                                mock.ANY, driver.TEMPLATE_HEADER
-                            )
-                        ]
-                        self.driver_rest_call_mock.assert_has_calls(
-                            calls, any_order=True)
-
-                        self.assertRaises(
-                            loadbalancer.PoolMonitorAssociationNotFound,
-                            self.plugin_instance.get_pool_health_monitor,
-                            context.get_admin_context(),
-                            hm['health_monitor']['id'],
-                            pool['pool']['id']
-                        )
diff --git a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py
deleted file mode 100644 (file)
index fa64fb9..0000000
+++ /dev/null
@@ -1,749 +0,0 @@
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-from six import moves
-from webob import exc
-
-from neutron import context
-from neutron.db.loadbalancer import loadbalancer_db as ldb
-from neutron.db import servicetype_db as st_db
-from neutron.extensions import loadbalancer
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.loadbalancer.drivers.common import agent_driver_base
-from neutron.tests import base
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-from neutron.tests.unit import testlib_api
-
-
-class TestLoadBalancerPluginBase(
-    test_db_loadbalancer.LoadBalancerPluginDbTestCase):
-
-    def setUp(self):
-        def reset_device_driver():
-            agent_driver_base.AgentDriverBase.device_driver = None
-        self.addCleanup(reset_device_driver)
-
-        self.mock_importer = mock.patch.object(
-            agent_driver_base, 'importutils').start()
-
-        # needed to reload provider configuration
-        st_db.ServiceTypeManager._instance = None
-        agent_driver_base.AgentDriverBase.device_driver = 'dummy'
-        super(TestLoadBalancerPluginBase, self).setUp(
-            lbaas_provider=('LOADBALANCER:lbaas:neutron.services.'
-                            'loadbalancer.drivers.common.agent_driver_base.'
-                            'AgentDriverBase:default'))
-
-        # we need access to loaded plugins to modify models
-        loaded_plugins = manager.NeutronManager().get_service_plugins()
-
-        self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
-
-
-class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
-    def setUp(self):
-        super(TestLoadBalancerCallbacks, self).setUp()
-
-        self.callbacks = agent_driver_base.LoadBalancerCallbacks(
-            self.plugin_instance
-        )
-        get_lbaas_agents_patcher = mock.patch(
-            'neutron.services.loadbalancer.agent_scheduler'
-            '.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
-        get_lbaas_agents_patcher.start()
-
-    def test_get_ready_devices(self):
-        with self.vip() as vip:
-            with mock.patch('neutron.services.loadbalancer.agent_scheduler'
-                            '.LbaasAgentSchedulerDbMixin.'
-                            'list_pools_on_lbaas_agent') as mock_agent_pools:
-                mock_agent_pools.return_value = {
-                    'pools': [{'id': vip['vip']['pool_id']}]}
-                ready = self.callbacks.get_ready_devices(
-                    context.get_admin_context(),
-                )
-                self.assertEqual(ready, [vip['vip']['pool_id']])
-
-    def test_get_ready_devices_multiple_vips_and_pools(self):
-        ctx = context.get_admin_context()
-
-        # add 3 pools and 2 vips directly to DB
-        # to create 2 "ready" devices and one pool without vip
-        pools = []
-        for i in moves.xrange(3):
-            pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
-                                  subnet_id=self._subnet_id,
-                                  protocol="HTTP",
-                                  lb_method="ROUND_ROBIN",
-                                  status=constants.ACTIVE,
-                                  admin_state_up=True))
-            ctx.session.add(pools[i])
-
-        vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
-                       protocol_port=80,
-                       protocol="HTTP",
-                       pool_id=pools[0].id,
-                       status=constants.ACTIVE,
-                       admin_state_up=True,
-                       connection_limit=3)
-        ctx.session.add(vip0)
-        pools[0].vip_id = vip0.id
-
-        vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
-                       protocol_port=80,
-                       protocol="HTTP",
-                       pool_id=pools[1].id,
-                       status=constants.ACTIVE,
-                       admin_state_up=True,
-                       connection_limit=3)
-        ctx.session.add(vip1)
-        pools[1].vip_id = vip1.id
-
-        ctx.session.flush()
-
-        self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
-        self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
-        with mock.patch('neutron.services.loadbalancer.agent_scheduler'
-                        '.LbaasAgentSchedulerDbMixin'
-                        '.list_pools_on_lbaas_agent') as mock_agent_pools:
-            mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
-                                                       {'id': pools[1].id},
-                                                       {'id': pools[2].id}]}
-            ready = self.callbacks.get_ready_devices(ctx)
-            self.assertEqual(len(ready), 3)
-            self.assertIn(pools[0].id, ready)
-            self.assertIn(pools[1].id, ready)
-            self.assertIn(pools[2].id, ready)
-        # cleanup
-        ctx.session.query(ldb.Pool).delete()
-        ctx.session.query(ldb.Vip).delete()
-
-    def test_get_ready_devices_inactive_vip(self):
-        with self.vip() as vip:
-
-            # set the vip inactive need to use plugin directly since
-            # status is not tenant mutable
-            self.plugin_instance.update_vip(
-                context.get_admin_context(),
-                vip['vip']['id'],
-                {'vip': {'status': constants.INACTIVE}}
-            )
-            with mock.patch('neutron.services.loadbalancer.agent_scheduler'
-                            '.LbaasAgentSchedulerDbMixin.'
-                            'list_pools_on_lbaas_agent') as mock_agent_pools:
-                mock_agent_pools.return_value = {
-                    'pools': [{'id': vip['vip']['pool_id']}]}
-                ready = self.callbacks.get_ready_devices(
-                    context.get_admin_context(),
-                )
-                self.assertEqual([vip['vip']['pool_id']], ready)
-
-    def test_get_ready_devices_inactive_pool(self):
-        with self.vip() as vip:
-
-            # set the pool inactive need to use plugin directly since
-            # status is not tenant mutable
-            self.plugin_instance.update_pool(
-                context.get_admin_context(),
-                vip['vip']['pool_id'],
-                {'pool': {'status': constants.INACTIVE}}
-            )
-            with mock.patch('neutron.services.loadbalancer.agent_scheduler'
-                            '.LbaasAgentSchedulerDbMixin.'
-                            'list_pools_on_lbaas_agent') as mock_agent_pools:
-                mock_agent_pools.return_value = {
-                    'pools': [{'id': vip['vip']['pool_id']}]}
-                ready = self.callbacks.get_ready_devices(
-                    context.get_admin_context(),
-                )
-                self.assertFalse(ready)
-
-    def test_get_logical_device_non_active(self):
-        with self.pool() as pool:
-            ctx = context.get_admin_context()
-            for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
-                self.plugin_instance.update_status(
-                    ctx, ldb.Pool, pool['pool']['id'], status)
-                pool['pool']['status'] = status
-                expected = {
-                    'pool': pool['pool'],
-                    'members': [],
-                    'healthmonitors': [],
-                    'driver': 'dummy'
-                }
-
-                logical_config = self.callbacks.get_logical_device(
-                    ctx, pool['pool']['id']
-                )
-
-                self.assertEqual(expected, logical_config)
-
-    def test_get_logical_device_active(self):
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']) as member:
-                    ctx = context.get_admin_context()
-                    # activate objects
-                    self.plugin_instance.update_status(
-                        ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
-                    self.plugin_instance.update_status(
-                        ctx, ldb.Member, member['member']['id'], 'ACTIVE')
-                    self.plugin_instance.update_status(
-                        ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
-
-                    # build the expected
-                    port = self.plugin_instance._core_plugin.get_port(
-                        ctx, vip['vip']['port_id']
-                    )
-                    subnet = self.plugin_instance._core_plugin.get_subnet(
-                        ctx, vip['vip']['subnet_id']
-                    )
-                    port['fixed_ips'][0]['subnet'] = subnet
-
-                    # reload pool to add members and vip
-                    pool = self.plugin_instance.get_pool(
-                        ctx, pool['pool']['id']
-                    )
-
-                    pool['status'] = constants.ACTIVE
-                    vip['vip']['status'] = constants.ACTIVE
-                    vip['vip']['port'] = port
-                    member['member']['status'] = constants.ACTIVE
-
-                    expected = {
-                        'pool': pool,
-                        'vip': vip['vip'],
-                        'members': [member['member']],
-                        'healthmonitors': [],
-                        'driver': 'dummy'
-                    }
-
-                    logical_config = self.callbacks.get_logical_device(
-                        ctx, pool['id']
-                    )
-
-                    self.assertEqual(logical_config, expected)
-
-    def test_get_logical_device_inactive_member(self):
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']) as member:
-                    ctx = context.get_admin_context()
-                    self.plugin_instance.update_status(ctx, ldb.Pool,
-                                                       pool['pool']['id'],
-                                                       'ACTIVE')
-                    self.plugin_instance.update_status(ctx, ldb.Vip,
-                                                       vip['vip']['id'],
-                                                       'ACTIVE')
-                    self.plugin_instance.update_status(ctx, ldb.Member,
-                                                       member['member']['id'],
-                                                       'INACTIVE')
-
-                    logical_config = self.callbacks.get_logical_device(
-                        ctx, pool['pool']['id'])
-
-                    member['member']['status'] = constants.INACTIVE
-                    self.assertEqual([member['member']],
-                                     logical_config['members'])
-
-    def test_get_logical_device_pending_create_member(self):
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']) as member:
-                    ctx = context.get_admin_context()
-                    self.plugin_instance.update_status(ctx, ldb.Pool,
-                                                       pool['pool']['id'],
-                                                       'ACTIVE')
-                    self.plugin_instance.update_status(ctx, ldb.Vip,
-                                                       vip['vip']['id'],
-                                                       'ACTIVE')
-
-                    member = self.plugin_instance.get_member(
-                        ctx, member['member']['id'])
-                    self.assertEqual('PENDING_CREATE',
-                                     member['status'])
-                    logical_config = self.callbacks.get_logical_device(
-                        ctx, pool['pool']['id'])
-
-                    self.assertEqual([member], logical_config['members'])
-
-    def test_get_logical_device_pending_create_health_monitor(self):
-        with self.health_monitor() as monitor:
-            with self.pool() as pool:
-                with self.vip(pool=pool) as vip:
-                    ctx = context.get_admin_context()
-                    self.plugin_instance.update_status(ctx, ldb.Pool,
-                                                       pool['pool']['id'],
-                                                       'ACTIVE')
-                    self.plugin_instance.update_status(ctx, ldb.Vip,
-                                                       vip['vip']['id'],
-                                                       'ACTIVE')
-                    self.plugin_instance.create_pool_health_monitor(
-                        ctx, monitor, pool['pool']['id'])
-                    pool = self.plugin_instance.get_pool(
-                        ctx, pool['pool']['id'])
-                    monitor = self.plugin_instance.get_health_monitor(
-                        ctx, monitor['health_monitor']['id'])
-
-                    self.assertEqual(
-                        'PENDING_CREATE',
-                        pool['health_monitors_status'][0]['status'])
-                    logical_config = self.callbacks.get_logical_device(
-                        ctx, pool['id'])
-
-                    self.assertEqual([monitor],
-                                     logical_config['healthmonitors'])
-
-    def _update_port_test_helper(self, expected, func, **kwargs):
-        core = self.plugin_instance._core_plugin
-
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']):
-                    ctx = context.get_admin_context()
-                    func(ctx, port_id=vip['vip']['port_id'], **kwargs)
-
-                    db_port = core.get_port(ctx, vip['vip']['port_id'])
-
-                    for k, v in expected.iteritems():
-                        self.assertEqual(db_port[k], v)
-
-    def test_plug_vip_port(self):
-        exp = {
-            'device_owner': 'neutron:' + constants.LOADBALANCER,
-            'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
-            'admin_state_up': True
-        }
-        self._update_port_test_helper(
-            exp,
-            self.callbacks.plug_vip_port,
-            host='host'
-        )
-
-    def test_plug_vip_port_mock_with_host(self):
-        exp = {
-            'device_owner': 'neutron:' + constants.LOADBALANCER,
-            'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
-            'admin_state_up': True,
-            portbindings.HOST_ID: 'host'
-        }
-        with mock.patch.object(
-            self.plugin._core_plugin, 'update_port') as mock_update_port:
-            with self.pool() as pool:
-                with self.vip(pool=pool) as vip:
-                    ctx = context.get_admin_context()
-                    self.callbacks.plug_vip_port(
-                        ctx, port_id=vip['vip']['port_id'], host='host')
-            mock_update_port.assert_called_once_with(
-                ctx, vip['vip']['port_id'],
-                {'port': testlib_api.SubDictMatch(exp)})
-
-    def test_unplug_vip_port(self):
-        exp = {
-            'device_owner': '',
-            'device_id': '',
-            'admin_state_up': False
-        }
-        self._update_port_test_helper(
-            exp,
-            self.callbacks.unplug_vip_port,
-            host='host'
-        )
-
-    def test_pool_deployed(self):
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']) as member:
-                    ctx = context.get_admin_context()
-                    p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
-                    self.assertEqual('PENDING_CREATE', p['status'])
-                    v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
-                    self.assertEqual('PENDING_CREATE', v['status'])
-                    m = self.plugin_instance.get_member(
-                        ctx, member['member']['id'])
-                    self.assertEqual('PENDING_CREATE', m['status'])
-
-                    self.callbacks.pool_deployed(ctx, pool['pool']['id'])
-
-                    p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
-                    self.assertEqual('ACTIVE', p['status'])
-                    v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
-                    self.assertEqual('ACTIVE', v['status'])
-                    m = self.plugin_instance.get_member(
-                        ctx, member['member']['id'])
-                    self.assertEqual('ACTIVE', m['status'])
-
-    def test_update_status_pool(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            p = self.plugin_instance.get_pool(ctx, pool_id)
-            self.assertEqual('PENDING_CREATE', p['status'])
-            self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
-            p = self.plugin_instance.get_pool(ctx, pool_id)
-            self.assertEqual('ACTIVE', p['status'])
-
-    def test_update_status_pool_deleted_already(self):
-        with mock.patch.object(agent_driver_base, 'LOG') as mock_log:
-            pool_id = 'deleted_pool'
-            ctx = context.get_admin_context()
-            self.assertRaises(loadbalancer.PoolNotFound,
-                              self.plugin_instance.get_pool, ctx, pool_id)
-            self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
-            self.assertTrue(mock_log.warning.called)
-
-    def test_update_status_health_monitor(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.pool()
-        ) as (hm, pool):
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
-            hm_id = hm['health_monitor']['id']
-            h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
-                                                             pool_id)
-            self.assertEqual('PENDING_CREATE', h['status'])
-            self.callbacks.update_status(
-                ctx, 'health_monitor',
-                {'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
-            h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
-                                                             pool_id)
-            self.assertEqual('ACTIVE', h['status'])
-
-
-class TestLoadBalancerAgentApi(base.BaseTestCase):
-    def setUp(self):
-        super(TestLoadBalancerAgentApi, self).setUp()
-
-        self.api = agent_driver_base.LoadBalancerAgentApi('topic')
-
-    def test_init(self):
-        self.assertEqual(self.api.client.target.topic, 'topic')
-
-    def _call_test_helper(self, method_name, method_args):
-        with contextlib.nested(
-            mock.patch.object(self.api.client, 'cast'),
-            mock.patch.object(self.api.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.api.client
-            getattr(self.api, method_name)(mock.sentinel.context,
-                                           host='host',
-                                           **method_args)
-
-        prepare_args = {'server': 'host'}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        if method_name == 'agent_updated':
-            method_args = {'payload': method_args}
-        rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
-                                         **method_args)
-
-    def test_agent_updated(self):
-        self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
-
-    def test_create_pool(self):
-        self._call_test_helper('create_pool', {'pool': 'test',
-                                               'driver_name': 'dummy'})
-
-    def test_update_pool(self):
-        self._call_test_helper('update_pool', {'old_pool': 'test',
-                                               'pool': 'test'})
-
-    def test_delete_pool(self):
-        self._call_test_helper('delete_pool', {'pool': 'test'})
-
-    def test_create_vip(self):
-        self._call_test_helper('create_vip', {'vip': 'test'})
-
-    def test_update_vip(self):
-        self._call_test_helper('update_vip', {'old_vip': 'test',
-                                              'vip': 'test'})
-
-    def test_delete_vip(self):
-        self._call_test_helper('delete_vip', {'vip': 'test'})
-
-    def test_create_member(self):
-        self._call_test_helper('create_member', {'member': 'test'})
-
-    def test_update_member(self):
-        self._call_test_helper('update_member', {'old_member': 'test',
-                                                 'member': 'test'})
-
-    def test_delete_member(self):
-        self._call_test_helper('delete_member', {'member': 'test'})
-
-    def test_create_monitor(self):
-        self._call_test_helper('create_pool_health_monitor',
-                               {'health_monitor': 'test', 'pool_id': 'test'})
-
-    def test_update_monitor(self):
-        self._call_test_helper('update_pool_health_monitor',
-                               {'old_health_monitor': 'test',
-                                'health_monitor': 'test',
-                                'pool_id': 'test'})
-
-    def test_delete_monitor(self):
-        self._call_test_helper('delete_pool_health_monitor',
-                               {'health_monitor': 'test', 'pool_id': 'test'})
-
-
-class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
-    def setUp(self):
-        self.log = mock.patch.object(agent_driver_base, 'LOG')
-        api_cls = mock.patch.object(agent_driver_base,
-                                    'LoadBalancerAgentApi').start()
-        super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
-        self.mock_api = api_cls.return_value
-
-        self.mock_get_driver = mock.patch.object(self.plugin_instance,
-                                                 '_get_driver')
-        self.mock_get_driver.return_value = (agent_driver_base.
-                                             AgentDriverBase(
-                                                 self.plugin_instance
-                                             ))
-
-    def test_create_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(subnet=subnet) as pool:
-                with self.vip(pool=pool, subnet=subnet) as vip:
-                    self.mock_api.create_vip.assert_called_once_with(
-                        mock.ANY,
-                        vip['vip'],
-                        'host'
-                    )
-
-    def test_update_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(subnet=subnet) as pool:
-                with self.vip(pool=pool, subnet=subnet) as vip:
-                    ctx = context.get_admin_context()
-                    old_vip = vip['vip'].copy()
-                    vip['vip'].pop('status')
-                    new_vip = self.plugin_instance.update_vip(
-                        ctx,
-                        vip['vip']['id'],
-                        vip
-                    )
-
-                    self.mock_api.update_vip.assert_called_once_with(
-                        mock.ANY,
-                        old_vip,
-                        new_vip,
-                        'host'
-                    )
-
-                    self.assertEqual(
-                        new_vip['status'],
-                        constants.PENDING_UPDATE
-                    )
-
-    def test_delete_vip(self):
-        with self.subnet() as subnet:
-            with self.pool(subnet=subnet) as pool:
-                with self.vip(pool=pool, subnet=subnet,
-                              do_delete=False) as vip:
-                    ctx = context.get_admin_context()
-                    self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
-                    vip['vip']['status'] = 'PENDING_DELETE'
-                    self.mock_api.delete_vip.assert_called_once_with(
-                        mock.ANY,
-                        vip['vip'],
-                        'host'
-                    )
-
-    def test_create_pool(self):
-        with self.pool() as pool:
-            self.mock_api.create_pool.assert_called_once_with(
-                mock.ANY,
-                pool['pool'],
-                mock.ANY,
-                'dummy'
-            )
-
-    def test_update_pool_non_active(self):
-        with self.pool() as pool:
-            pool['pool']['status'] = 'INACTIVE'
-            ctx = context.get_admin_context()
-            orig_pool = pool['pool'].copy()
-            del pool['pool']['provider']
-            self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
-            self.mock_api.delete_pool.assert_called_once_with(
-                mock.ANY, orig_pool, 'host')
-
-    def test_update_pool_no_vip_id(self):
-        with self.pool() as pool:
-            ctx = context.get_admin_context()
-            orig_pool = pool['pool'].copy()
-            del pool['pool']['provider']
-            updated = self.plugin_instance.update_pool(
-                ctx, pool['pool']['id'], pool)
-            self.mock_api.update_pool.assert_called_once_with(
-                mock.ANY, orig_pool, updated, 'host')
-
-    def test_update_pool_with_vip_id(self):
-        with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                ctx = context.get_admin_context()
-                old_pool = pool['pool'].copy()
-                old_pool['vip_id'] = vip['vip']['id']
-                del pool['pool']['provider']
-                updated = self.plugin_instance.update_pool(
-                    ctx, pool['pool']['id'], pool)
-                self.mock_api.update_pool.assert_called_once_with(
-                    mock.ANY, old_pool, updated, 'host')
-
-    def test_delete_pool(self):
-        with self.pool(do_delete=False) as pool:
-            req = self.new_delete_request('pools',
-                                          pool['pool']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-            pool['pool']['status'] = 'PENDING_DELETE'
-            self.mock_api.delete_pool.assert_called_once_with(
-                mock.ANY, pool['pool'], 'host')
-
-    def test_create_member(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(pool_id=pool_id) as member:
-                self.mock_api.create_member.assert_called_once_with(
-                    mock.ANY, member['member'], 'host')
-
-    def test_update_member(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(pool_id=pool_id) as member:
-                ctx = context.get_admin_context()
-                updated = self.plugin_instance.update_member(
-                    ctx, member['member']['id'], member)
-                self.mock_api.update_member.assert_called_once_with(
-                    mock.ANY, member['member'], updated, 'host')
-
-    def test_update_member_new_pool(self):
-        with self.pool() as pool1:
-            pool1_id = pool1['pool']['id']
-            with self.pool() as pool2:
-                pool2_id = pool2['pool']['id']
-                with self.member(pool_id=pool1_id) as member:
-                    self.mock_api.create_member.reset_mock()
-                    ctx = context.get_admin_context()
-                    old_member = member['member'].copy()
-                    member['member']['pool_id'] = pool2_id
-                    updated = self.plugin_instance.update_member(
-                        ctx, member['member']['id'], member)
-                    self.mock_api.delete_member.assert_called_once_with(
-                        mock.ANY, old_member, 'host')
-                    self.mock_api.create_member.assert_called_once_with(
-                        mock.ANY, updated, 'host')
-
-    def test_delete_member(self):
-        with self.pool() as pool:
-            pool_id = pool['pool']['id']
-            with self.member(pool_id=pool_id,
-                             do_delete=False) as member:
-                req = self.new_delete_request('members',
-                                              member['member']['id'])
-                res = req.get_response(self.ext_api)
-                self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-                member['member']['status'] = 'PENDING_DELETE'
-                self.mock_api.delete_member.assert_called_once_with(
-                    mock.ANY, member['member'], 'host')
-
-    def test_create_pool_health_monitor(self):
-        with contextlib.nested(
-            self.health_monitor(),
-            self.pool(),
-        ) as (hm, pool):
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
-            # hm now has a ref to the pool with which it is associated
-            hm = self.plugin.get_health_monitor(
-                ctx, hm['health_monitor']['id'])
-            self.mock_api.create_pool_health_monitor.assert_called_once_with(
-                mock.ANY, hm, pool_id, 'host')
-
-    def test_delete_pool_health_monitor(self):
-        with contextlib.nested(
-            self.pool(),
-            self.health_monitor()
-        ) as (pool, hm):
-            pool_id = pool['pool']['id']
-            ctx = context.get_admin_context()
-            self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
-            # hm now has a ref to the pool with which it is associated
-            hm = self.plugin.get_health_monitor(
-                ctx, hm['health_monitor']['id'])
-            hm['pools'][0]['status'] = 'PENDING_DELETE'
-            self.plugin_instance.delete_pool_health_monitor(
-                ctx, hm['id'], pool_id)
-            self.mock_api.delete_pool_health_monitor.assert_called_once_with(
-                mock.ANY, hm, pool_id, 'host')
-
-    def test_update_health_monitor_associated_with_pool(self):
-        with contextlib.nested(
-            self.health_monitor(type='HTTP'),
-            self.pool()
-        ) as (monitor, pool):
-            data = {
-                'health_monitor': {
-                    'id': monitor['health_monitor']['id'],
-                    'tenant_id': self._tenant_id
-                }
-            }
-            req = self.new_create_request(
-                'pools',
-                data,
-                fmt=self.fmt,
-                id=pool['pool']['id'],
-                subresource='health_monitors')
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, exc.HTTPCreated.code)
-            # hm now has a ref to the pool with which it is associated
-            ctx = context.get_admin_context()
-            hm = self.plugin.get_health_monitor(
-                ctx, monitor['health_monitor']['id'])
-            self.mock_api.create_pool_health_monitor.assert_called_once_with(
-                mock.ANY,
-                hm,
-                pool['pool']['id'],
-                'host'
-            )
-
-            self.mock_api.reset_mock()
-            data = {'health_monitor': {'delay': 20,
-                                       'timeout': 20,
-                                       'max_retries': 2,
-                                       'admin_state_up': False}}
-            updated = hm.copy()
-            updated.update(data['health_monitor'])
-            req = self.new_update_request("health_monitors",
-                                          data,
-                                          monitor['health_monitor']['id'])
-            req.get_response(self.ext_api)
-            self.mock_api.update_pool_health_monitor.assert_called_once_with(
-                mock.ANY,
-                hm,
-                updated,
-                pool['pool']['id'],
-                'host')
diff --git a/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py b/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py
deleted file mode 100644 (file)
index a08bca6..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo.config import cfg
-from webob import exc
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron import context
-from neutron.db import servicetype_db as st_db
-from neutron.extensions import agent
-from neutron.extensions import lbaas_agentscheduler
-from neutron.extensions import loadbalancer
-from neutron import manager
-from neutron.plugins.common import constants as plugin_const
-from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
-from neutron.tests.unit.openvswitch import test_agent_scheduler
-from neutron.tests.unit import test_agent_ext_plugin
-from neutron.tests.unit import test_db_plugin as test_plugin
-from neutron.tests.unit import test_extensions
-
-LBAAS_HOSTA = 'hosta'
-
-
-class AgentSchedulerTestMixIn(test_agent_scheduler.AgentSchedulerTestMixIn):
-    def _list_pools_hosted_by_lbaas_agent(self, agent_id,
-                                          expected_code=exc.HTTPOk.code,
-                                          admin_context=True):
-        path = "/agents/%s/%s.%s" % (agent_id,
-                                     lbaas_agentscheduler.LOADBALANCER_POOLS,
-                                     self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-    def _get_lbaas_agent_hosting_pool(self, pool_id,
-                                      expected_code=exc.HTTPOk.code,
-                                      admin_context=True):
-        path = "/lb/pools/%s/%s.%s" % (pool_id,
-                                       lbaas_agentscheduler.LOADBALANCER_AGENT,
-                                       self.fmt)
-        return self._request_list(path, expected_code=expected_code,
-                                  admin_context=admin_context)
-
-
-class LBaaSAgentSchedulerTestCase(test_agent_ext_plugin.AgentDBTestMixIn,
-                                  AgentSchedulerTestMixIn,
-                                  test_db_loadbalancer.LoadBalancerTestMixin,
-                                  test_plugin.NeutronDbPluginV2TestCase):
-    fmt = 'json'
-    plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-
-    def setUp(self):
-        # Save the global RESOURCE_ATTRIBUTE_MAP
-        self.saved_attr_map = {}
-        for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
-            self.saved_attr_map[resource] = attrs.copy()
-        service_plugins = {
-            'lb_plugin_name': test_db_loadbalancer.DB_LB_PLUGIN_KLASS}
-
-        #default provider should support agent scheduling
-        cfg.CONF.set_override(
-            'service_provider',
-            [('LOADBALANCER:lbaas:neutron.services.'
-              'loadbalancer.drivers.haproxy.plugin_driver.'
-              'HaproxyOnHostPluginDriver:default')],
-            'service_providers')
-
-        # need to reload provider configuration
-        st_db.ServiceTypeManager._instance = None
-
-        super(LBaaSAgentSchedulerTestCase, self).setUp(
-            self.plugin_str, service_plugins=service_plugins)
-        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.adminContext = context.get_admin_context()
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        self.addCleanup(self.restore_attribute_map)
-
-    def restore_attribute_map(self):
-        # Restore the original RESOURCE_ATTRIBUTE_MAP
-        attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
-
-    def test_report_states(self):
-        self._register_agent_states(lbaas_agents=True)
-        agents = self._list_agents()
-        self.assertEqual(6, len(agents['agents']))
-
-    def test_pool_scheduling_on_pool_creation(self):
-        self._register_agent_states(lbaas_agents=True)
-        with self.pool() as pool:
-            lbaas_agent = self._get_lbaas_agent_hosting_pool(
-                pool['pool']['id'])
-            self.assertIsNotNone(lbaas_agent)
-            self.assertEqual(lbaas_agent['agent']['agent_type'],
-                             constants.AGENT_TYPE_LOADBALANCER)
-            pools = self._list_pools_hosted_by_lbaas_agent(
-                lbaas_agent['agent']['id'])
-            self.assertEqual(1, len(pools['pools']))
-            self.assertEqual(pool['pool'], pools['pools'][0])
-
-    def test_schedule_pool_with_disabled_agent(self):
-        lbaas_hosta = {
-            'binary': 'neutron-loadbalancer-agent',
-            'host': LBAAS_HOSTA,
-            'topic': 'LOADBALANCER_AGENT',
-            'configurations': {'device_drivers': ['haproxy_ns']},
-            'agent_type': constants.AGENT_TYPE_LOADBALANCER}
-        self._register_one_agent_state(lbaas_hosta)
-        with self.pool() as pool:
-            lbaas_agent = self._get_lbaas_agent_hosting_pool(
-                pool['pool']['id'])
-            self.assertIsNotNone(lbaas_agent)
-
-        agents = self._list_agents()
-        self._disable_agent(agents['agents'][0]['id'])
-        pool = {'pool': {'name': 'test',
-                         'subnet_id': 'test',
-                         'lb_method': 'ROUND_ROBIN',
-                         'protocol': 'HTTP',
-                         'admin_state_up': True,
-                         'tenant_id': 'test',
-                         'description': 'test'}}
-        lbaas_plugin = manager.NeutronManager.get_service_plugins()[
-            plugin_const.LOADBALANCER]
-        self.assertRaises(loadbalancer.NoEligibleBackend,
-                          lbaas_plugin.create_pool, self.adminContext, pool)
-        pools = lbaas_plugin.get_pools(self.adminContext)
-        self.assertEqual('ERROR', pools[0]['status'])
-        self.assertEqual('No eligible backend',
-                         pools[0]['status_description'])
-
-    def test_schedule_pool_with_down_agent(self):
-        lbaas_hosta = {
-            'binary': 'neutron-loadbalancer-agent',
-            'host': LBAAS_HOSTA,
-            'topic': 'LOADBALANCER_AGENT',
-            'configurations': {'device_drivers': ['haproxy_ns']},
-            'agent_type': constants.AGENT_TYPE_LOADBALANCER}
-        self._register_one_agent_state(lbaas_hosta)
-        is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down'
-        with mock.patch(is_agent_down_str) as mock_is_agent_down:
-            mock_is_agent_down.return_value = False
-            with self.pool() as pool:
-                lbaas_agent = self._get_lbaas_agent_hosting_pool(
-                    pool['pool']['id'])
-            self.assertIsNotNone(lbaas_agent)
-        with mock.patch(is_agent_down_str) as mock_is_agent_down:
-            mock_is_agent_down.return_value = True
-            pool = {'pool': {'name': 'test',
-                             'subnet_id': 'test',
-                             'lb_method': 'ROUND_ROBIN',
-                             'protocol': 'HTTP',
-                             'provider': 'lbaas',
-                             'admin_state_up': True,
-                             'tenant_id': 'test',
-                             'description': 'test'}}
-            lbaas_plugin = manager.NeutronManager.get_service_plugins()[
-                plugin_const.LOADBALANCER]
-            self.assertRaises(loadbalancer.NoEligibleBackend,
-                              lbaas_plugin.create_pool,
-                              self.adminContext, pool)
-            pools = lbaas_plugin.get_pools(self.adminContext)
-            self.assertEqual('ERROR', pools[0]['status'])
-            self.assertEqual('No eligible backend',
-                             pools[0]['status_description'])
-
-    def test_pool_unscheduling_on_pool_deletion(self):
-        self._register_agent_states(lbaas_agents=True)
-        with self.pool(do_delete=False) as pool:
-            lbaas_agent = self._get_lbaas_agent_hosting_pool(
-                pool['pool']['id'])
-            self.assertIsNotNone(lbaas_agent)
-            self.assertEqual(lbaas_agent['agent']['agent_type'],
-                             constants.AGENT_TYPE_LOADBALANCER)
-            pools = self._list_pools_hosted_by_lbaas_agent(
-                lbaas_agent['agent']['id'])
-            self.assertEqual(1, len(pools['pools']))
-            self.assertEqual(pool['pool'], pools['pools'][0])
-
-            req = self.new_delete_request('pools',
-                                          pool['pool']['id'])
-            res = req.get_response(self.ext_api)
-            self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-            pools = self._list_pools_hosted_by_lbaas_agent(
-                lbaas_agent['agent']['id'])
-            self.assertEqual(0, len(pools['pools']))
-
-    def test_pool_scheduling_non_admin_access(self):
-        self._register_agent_states(lbaas_agents=True)
-        with self.pool() as pool:
-            self._get_lbaas_agent_hosting_pool(
-                pool['pool']['id'],
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
-            self._list_pools_hosted_by_lbaas_agent(
-                'fake_id',
-                expected_code=exc.HTTPForbidden.code,
-                admin_context=False)
diff --git a/neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py
deleted file mode 100644 (file)
index 8619b98..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-# Copyright 2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License"); you may
-#  not use this file except in compliance with the License. You may obtain
-#  a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#  License for the specific language governing permissions and limitations
-#  under the License.
-
-import copy
-
-import mock
-from webob import exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.extensions import loadbalancer
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_api_v2_extension
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_api_v2._get_path
-
-
-class LoadBalancerExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        super(LoadBalancerExtensionTestCase, self).setUp()
-        self._setUpExtension(
-            'neutron.extensions.loadbalancer.LoadBalancerPluginBase',
-            constants.LOADBALANCER, loadbalancer.RESOURCE_ATTRIBUTE_MAP,
-            loadbalancer.Loadbalancer, 'lb', use_quota=True)
-
-    def test_vip_create(self):
-        vip_id = _uuid()
-        data = {'vip': {'name': 'vip1',
-                        'description': 'descr_vip1',
-                        'subnet_id': _uuid(),
-                        'address': '127.0.0.1',
-                        'protocol_port': 80,
-                        'protocol': 'HTTP',
-                        'pool_id': _uuid(),
-                        'session_persistence': {'type': 'HTTP_COOKIE'},
-                        'connection_limit': 100,
-                        'admin_state_up': True,
-                        'tenant_id': _uuid()}}
-        return_value = copy.copy(data['vip'])
-        return_value.update({'status': "ACTIVE", 'id': vip_id})
-
-        instance = self.plugin.return_value
-        instance.create_vip.return_value = return_value
-        res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_vip.assert_called_with(mock.ANY,
-                                               vip=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('vip', res)
-        self.assertEqual(res['vip'], return_value)
-
-    def test_vip_list(self):
-        vip_id = _uuid()
-        return_value = [{'name': 'vip1',
-                         'admin_state_up': True,
-                         'tenant_id': _uuid(),
-                         'id': vip_id}]
-
-        instance = self.plugin.return_value
-        instance.get_vips.return_value = return_value
-
-        res = self.api.get(_get_path('lb/vips', fmt=self.fmt))
-
-        instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY,
-                                             filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_vip_update(self):
-        vip_id = _uuid()
-        update_data = {'vip': {'admin_state_up': False}}
-        return_value = {'name': 'vip1',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': vip_id}
-
-        instance = self.plugin.return_value
-        instance.update_vip.return_value = return_value
-
-        res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_vip.assert_called_with(mock.ANY, vip_id,
-                                               vip=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('vip', res)
-        self.assertEqual(res['vip'], return_value)
-
-    def test_vip_get(self):
-        vip_id = _uuid()
-        return_value = {'name': 'vip1',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': vip_id}
-
-        instance = self.plugin.return_value
-        instance.get_vip.return_value = return_value
-
-        res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt))
-
-        instance.get_vip.assert_called_with(mock.ANY, vip_id,
-                                            fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('vip', res)
-        self.assertEqual(res['vip'], return_value)
-
-    def test_vip_delete(self):
-        self._test_entity_delete('vip')
-
-    def test_pool_create(self):
-        pool_id = _uuid()
-        hm_id = _uuid()
-        data = {'pool': {'name': 'pool1',
-                         'description': 'descr_pool1',
-                         'subnet_id': _uuid(),
-                         'protocol': 'HTTP',
-                         'lb_method': 'ROUND_ROBIN',
-                         'health_monitors': [hm_id],
-                         'admin_state_up': True,
-                         'tenant_id': _uuid()}}
-        return_value = copy.copy(data['pool'])
-        return_value['provider'] = 'lbaas'
-        return_value.update({'status': "ACTIVE", 'id': pool_id})
-
-        instance = self.plugin.return_value
-        instance.create_pool.return_value = return_value
-        res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        data['pool']['provider'] = attr.ATTR_NOT_SPECIFIED
-        instance.create_pool.assert_called_with(mock.ANY,
-                                                pool=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('pool', res)
-        self.assertEqual(res['pool'], return_value)
-
-    def test_pool_list(self):
-        pool_id = _uuid()
-        return_value = [{'name': 'pool1',
-                         'admin_state_up': True,
-                         'tenant_id': _uuid(),
-                         'id': pool_id}]
-
-        instance = self.plugin.return_value
-        instance.get_pools.return_value = return_value
-
-        res = self.api.get(_get_path('lb/pools', fmt=self.fmt))
-
-        instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
-                                              filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_pool_update(self):
-        pool_id = _uuid()
-        update_data = {'pool': {'admin_state_up': False}}
-        return_value = {'name': 'pool1',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': pool_id}
-
-        instance = self.plugin.return_value
-        instance.update_pool.return_value = return_value
-
-        res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_pool.assert_called_with(mock.ANY, pool_id,
-                                                pool=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('pool', res)
-        self.assertEqual(res['pool'], return_value)
-
-    def test_pool_get(self):
-        pool_id = _uuid()
-        return_value = {'name': 'pool1',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': pool_id}
-
-        instance = self.plugin.return_value
-        instance.get_pool.return_value = return_value
-
-        res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt))
-
-        instance.get_pool.assert_called_with(mock.ANY, pool_id,
-                                             fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('pool', res)
-        self.assertEqual(res['pool'], return_value)
-
-    def test_pool_delete(self):
-        self._test_entity_delete('pool')
-
-    def test_pool_stats(self):
-        pool_id = _uuid()
-
-        stats = {'stats': 'dummy'}
-        instance = self.plugin.return_value
-        instance.stats.return_value = stats
-
-        path = _get_path('lb/pools', id=pool_id,
-                         action="stats", fmt=self.fmt)
-        res = self.api.get(path)
-
-        instance.stats.assert_called_with(mock.ANY, pool_id)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('stats', res)
-        self.assertEqual(res['stats'], stats['stats'])
-
-    def test_member_create(self):
-        member_id = _uuid()
-        data = {'member': {'pool_id': _uuid(),
-                           'address': '127.0.0.1',
-                           'protocol_port': 80,
-                           'weight': 1,
-                           'admin_state_up': True,
-                           'tenant_id': _uuid()}}
-        return_value = copy.copy(data['member'])
-        return_value.update({'status': "ACTIVE", 'id': member_id})
-
-        instance = self.plugin.return_value
-        instance.create_member.return_value = return_value
-        res = self.api.post(_get_path('lb/members', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_member.assert_called_with(mock.ANY,
-                                                  member=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('member', res)
-        self.assertEqual(res['member'], return_value)
-
-    def test_member_list(self):
-        member_id = _uuid()
-        return_value = [{'name': 'member1',
-                         'admin_state_up': True,
-                         'tenant_id': _uuid(),
-                         'id': member_id}]
-
-        instance = self.plugin.return_value
-        instance.get_members.return_value = return_value
-
-        res = self.api.get(_get_path('lb/members', fmt=self.fmt))
-
-        instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY,
-                                                filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_member_update(self):
-        member_id = _uuid()
-        update_data = {'member': {'admin_state_up': False}}
-        return_value = {'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': member_id}
-
-        instance = self.plugin.return_value
-        instance.update_member.return_value = return_value
-
-        res = self.api.put(_get_path('lb/members', id=member_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_member.assert_called_with(mock.ANY, member_id,
-                                                  member=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('member', res)
-        self.assertEqual(res['member'], return_value)
-
-    def test_member_get(self):
-        member_id = _uuid()
-        return_value = {'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': member_id}
-
-        instance = self.plugin.return_value
-        instance.get_member.return_value = return_value
-
-        res = self.api.get(_get_path('lb/members', id=member_id,
-                                     fmt=self.fmt))
-
-        instance.get_member.assert_called_with(mock.ANY, member_id,
-                                               fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('member', res)
-        self.assertEqual(res['member'], return_value)
-
-    def test_member_delete(self):
-        self._test_entity_delete('member')
-
-    def test_health_monitor_create(self):
-        health_monitor_id = _uuid()
-        data = {'health_monitor': {'type': 'HTTP',
-                                   'delay': 2,
-                                   'timeout': 1,
-                                   'max_retries': 3,
-                                   'http_method': 'GET',
-                                   'url_path': '/path',
-                                   'expected_codes': '200-300',
-                                   'admin_state_up': True,
-                                   'tenant_id': _uuid()}}
-        return_value = copy.copy(data['health_monitor'])
-        return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
-
-        instance = self.plugin.return_value
-        instance.create_health_monitor.return_value = return_value
-        res = self.api.post(_get_path('lb/health_monitors',
-                                      fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_health_monitor.assert_called_with(mock.ANY,
-                                                          health_monitor=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('health_monitor', res)
-        self.assertEqual(res['health_monitor'], return_value)
-
-    def test_health_monitor_create_with_timeout_negative(self):
-        data = {'health_monitor': {'type': 'HTTP',
-                                   'delay': 2,
-                                   'timeout': -1,
-                                   'max_retries': 3,
-                                   'http_method': 'GET',
-                                   'url_path': '/path',
-                                   'expected_codes': '200-300',
-                                   'admin_state_up': True,
-                                   'tenant_id': _uuid()}}
-        res = self.api.post(_get_path('lb/health_monitors',
-                                      fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt,
-                            expect_errors=True)
-        self.assertEqual(400, res.status_int)
-
-    def test_health_monitor_list(self):
-        health_monitor_id = _uuid()
-        return_value = [{'type': 'HTTP',
-                         'admin_state_up': True,
-                         'tenant_id': _uuid(),
-                         'id': health_monitor_id}]
-
-        instance = self.plugin.return_value
-        instance.get_health_monitors.return_value = return_value
-
-        res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt))
-
-        instance.get_health_monitors.assert_called_with(
-            mock.ANY, fields=mock.ANY, filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_health_monitor_update(self):
-        health_monitor_id = _uuid()
-        update_data = {'health_monitor': {'admin_state_up': False}}
-        return_value = {'type': 'HTTP',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': health_monitor_id}
-
-        instance = self.plugin.return_value
-        instance.update_health_monitor.return_value = return_value
-
-        res = self.api.put(_get_path('lb/health_monitors',
-                                     id=health_monitor_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_health_monitor.assert_called_with(
-            mock.ANY, health_monitor_id, health_monitor=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('health_monitor', res)
-        self.assertEqual(res['health_monitor'], return_value)
-
-    def test_health_monitor_get(self):
-        health_monitor_id = _uuid()
-        return_value = {'type': 'HTTP',
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': health_monitor_id}
-
-        instance = self.plugin.return_value
-        instance.get_health_monitor.return_value = return_value
-
-        res = self.api.get(_get_path('lb/health_monitors',
-                                     id=health_monitor_id,
-                                     fmt=self.fmt))
-
-        instance.get_health_monitor.assert_called_with(
-            mock.ANY, health_monitor_id, fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('health_monitor', res)
-        self.assertEqual(res['health_monitor'], return_value)
-
-    def test_health_monitor_delete(self):
-        self._test_entity_delete('health_monitor')
-
-    def test_create_pool_health_monitor(self):
-        health_monitor_id = _uuid()
-        data = {'health_monitor': {'id': health_monitor_id,
-                                   'tenant_id': _uuid()}}
-
-        return_value = copy.copy(data['health_monitor'])
-        instance = self.plugin.return_value
-        instance.create_pool_health_monitor.return_value = return_value
-        res = self.api.post('/lb/pools/id1/health_monitors',
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_pool_health_monitor.assert_called_with(
-            mock.ANY, pool_id='id1', health_monitor=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('health_monitor', res)
-        self.assertEqual(res['health_monitor'], return_value)
-
-    def test_delete_pool_health_monitor(self):
-        health_monitor_id = _uuid()
-
-        res = self.api.delete('/lb/pools/id1/health_monitors/%s' %
-                              health_monitor_id)
-
-        instance = self.plugin.return_value
-        instance.delete_pool_health_monitor.assert_called_with(
-            mock.ANY, health_monitor_id, pool_id='id1')
-        self.assertEqual(res.status_int, exc.HTTPNoContent.code)
diff --git a/neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py
deleted file mode 100644 (file)
index 7edbdbb..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-
-from neutron import context
-from neutron import quota
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_quota_ext
-
-_get_path = test_api_v2._get_path
-
-
-class LBaaSQuotaExtensionTestCase(
-    test_quota_ext.QuotaExtensionTestCase):
-
-    def setUp(self):
-        super(LBaaSQuotaExtensionTestCase, self).setUp()
-        cfg.CONF.set_override(
-            'quota_items',
-            ['vip', 'pool', 'member', 'health_monitor', 'extra1'],
-            group='QUOTAS')
-        quota.register_resources_from_config()
-
-
-class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        cfg.CONF.set_override(
-            'quota_driver',
-            'neutron.db.quota_db.DbQuotaDriver',
-            group='QUOTAS')
-        super(LBaaSQuotaExtensionDbTestCase, self).setUp()
-
-    def test_quotas_loaded_right(self):
-        res = self.api.get(_get_path('quotas', fmt=self.fmt))
-        quota = self.deserialize(res)
-        self.assertEqual([], quota['quotas'])
-        self.assertEqual(200, res.status_int)
-
-    def test_quotas_default_values(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['vip'])
-        self.assertEqual(10, quota['quota']['pool'])
-        self.assertEqual(-1, quota['quota']['member'])
-        self.assertEqual(-1, quota['quota']['health_monitor'])
-        self.assertEqual(-1, quota['quota']['extra1'])
-
-    def test_show_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['vip'])
-        self.assertEqual(10, quota['quota']['pool'])
-        self.assertEqual(-1, quota['quota']['member'])
-        self.assertEqual(-1, quota['quota']['health_monitor'])
-
-    def test_show_quotas_with_owner_tenant(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=False)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['vip'])
-        self.assertEqual(10, quota['quota']['pool'])
-        self.assertEqual(-1, quota['quota']['member'])
-        self.assertEqual(-1, quota['quota']['health_monitor'])
-
-    def test_update_quotas_to_unlimited(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'pool': -1}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=False)
-        self.assertEqual(200, res.status_int)
-
-    def test_update_quotas_exceeding_current_limit(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id,
-                                                  is_admin=True)}
-        quotas = {'quota': {'pool': 120}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env,
-                           expect_errors=False)
-        self.assertEqual(200, res.status_int)
-
-    def test_update_quotas_with_admin(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id + '2',
-                                                  is_admin=True)}
-        quotas = {'quota': {'pool': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas), extra_environ=env)
-        self.assertEqual(200, res.status_int)
-        env2 = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env2)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['vip'])
-        self.assertEqual(100, quota['quota']['pool'])
-        self.assertEqual(-1, quota['quota']['member'])
-        self.assertEqual(-1, quota['quota']['health_monitor'])
-
-
-class LBaaSQuotaExtensionCfgTestCase(
-    LBaaSQuotaExtensionTestCase):
-
-    def setUp(self):
-        cfg.CONF.set_override(
-            'quota_driver',
-            'neutron.quota.ConfDriver',
-            group='QUOTAS')
-        super(LBaaSQuotaExtensionCfgTestCase, self).setUp()
-
-    def test_quotas_default_values(self):
-        tenant_id = 'tenant_id1'
-        env = {'neutron.context': context.Context('', tenant_id)}
-        res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           extra_environ=env)
-        quota = self.deserialize(res)
-        self.assertEqual(10, quota['quota']['vip'])
-        self.assertEqual(10, quota['quota']['pool'])
-        self.assertEqual(-1, quota['quota']['member'])
-        self.assertEqual(-1, quota['quota']['health_monitor'])
-        self.assertEqual(-1, quota['quota']['extra1'])
-
-    def test_update_quotas_forbidden(self):
-        tenant_id = 'tenant_id1'
-        quotas = {'quota': {'pool': 100}}
-        res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
-                           self.serialize(quotas),
-                           expect_errors=True)
-        self.assertEqual(403, res.status_int)
diff --git a/neutron/tests/unit/services/vpn/__init__.py b/neutron/tests/unit/services/vpn/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/vpn/device_drivers/__init__.py b/neutron/tests/unit/services/vpn/device_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest.py b/neutron/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest.py
deleted file mode 100644 (file)
index 5d86cea..0000000
+++ /dev/null
@@ -1,1634 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import random
-import re
-
-import requests
-from requests import exceptions as r_exc
-from requests_mock.contrib import fixture as mock_fixture
-
-from neutron.services.vpn.device_drivers import (
-    cisco_csr_rest_client as csr_client)
-from neutron.tests import base
-
-
-dummy_policy_id = 'dummy-ipsec-policy-id-name'
-TEST_VRF = 'nrouter-123456'
-BASE_URL = 'https://%s:55443/api/v1/'
-LOCAL_URL = 'https://localhost:55443/api/v1/'
-
-URI_HOSTNAME = 'global/host-name'
-URI_USERS = 'global/local-users'
-URI_AUTH = 'auth/token-services'
-URI_INTERFACE_GE1 = 'interfaces/GigabitEthernet1'
-URI_PSK = 'vrf/' + TEST_VRF + '/vpn-svc/ike/keyrings'
-URI_PSK_ID = URI_PSK + '/%s'
-URI_IKE_POLICY = 'vpn-svc/ike/policies'
-URI_IKE_POLICY_ID = URI_IKE_POLICY + '/%s'
-URI_IPSEC_POLICY = 'vpn-svc/ipsec/policies'
-URI_IPSEC_POLICY_ID = URI_IPSEC_POLICY + '/%s'
-URI_IPSEC_CONN = 'vrf/' + TEST_VRF + '/vpn-svc/site-to-site'
-URI_IPSEC_CONN_ID = URI_IPSEC_CONN + '/%s'
-URI_KEEPALIVE = 'vpn-svc/ike/keepalive'
-URI_ROUTES = 'vrf/' + TEST_VRF + '/routing-svc/static-routes'
-URI_ROUTES_ID = URI_ROUTES + '/%s'
-URI_SESSIONS = 'vrf/' + TEST_VRF + '/vpn-svc/site-to-site/active/sessions'
-
-
-# Note: Helper functions to test reuse of IDs.
-def generate_pre_shared_key_id():
-    return random.randint(100, 200)
-
-
-def generate_ike_policy_id():
-    return random.randint(200, 300)
-
-
-def generate_ipsec_policy_id():
-    return random.randint(300, 400)
-
-
-class CiscoCsrBaseTestCase(base.BaseTestCase):
-
-    """Helper methods to register mock intercepts - used by child classes."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        super(CiscoCsrBaseTestCase, self).setUp()
-        self.base_url = BASE_URL % host
-        self.requests = self.useFixture(mock_fixture.Fixture())
-        info = {'rest_mgmt_ip': host, 'tunnel_ip': tunnel_ip,
-                'vrf': 'nrouter-123456',
-                'username': 'stack', 'password': 'cisco', 'timeout': timeout}
-        self.csr = csr_client.CsrRestClient(info)
-
-    def _register_local_get(self, uri, json=None,
-                            result_code=requests.codes.OK):
-        self.requests.register_uri(
-            'GET',
-            LOCAL_URL + uri,
-            status_code=result_code,
-            json=json)
-
-    def _register_local_post(self, uri, resource_id,
-                             result_code=requests.codes.CREATED):
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + uri,
-            status_code=result_code,
-            headers={'location': LOCAL_URL + uri + '/' + str(resource_id)})
-
-    def _register_local_delete(self, uri, resource_id, json=None,
-                               result_code=requests.codes.NO_CONTENT):
-        self.requests.register_uri(
-            'DELETE',
-            LOCAL_URL + uri + '/' + str(resource_id),
-            status_code=result_code,
-            json=json)
-
-    def _register_local_delete_by_id(self, resource_id,
-                                     result_code=requests.codes.NO_CONTENT):
-        local_resource_re = re.compile(LOCAL_URL + '.+%s$' % resource_id)
-        self.requests.register_uri(
-            'DELETE',
-            local_resource_re,
-            status_code=result_code)
-
-    def _register_local_put(self, uri, resource_id,
-                            result_code=requests.codes.NO_CONTENT):
-        self.requests.register_uri('PUT',
-                                   LOCAL_URL + uri + '/' + resource_id,
-                                   status_code=result_code)
-
-    def _register_local_get_not_found(self, uri, resource_id,
-                                      result_code=requests.codes.NOT_FOUND):
-        self.requests.register_uri(
-            'GET',
-            LOCAL_URL + uri + '/' + str(resource_id),
-            status_code=result_code)
-
-    def _helper_register_auth_request(self):
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_AUTH,
-                                   status_code=requests.codes.OK,
-                                   json={'token-id': 'dummy-token'})
-
-    def _helper_register_psk_post(self, psk_id):
-        self._register_local_post(URI_PSK, psk_id)
-
-    def _helper_register_ike_policy_post(self, policy_id):
-        self._register_local_post(URI_IKE_POLICY, policy_id)
-
-    def _helper_register_ipsec_policy_post(self, policy_id):
-        self._register_local_post(URI_IPSEC_POLICY, policy_id)
-
-    def _helper_register_tunnel_post(self, tunnel):
-        self._register_local_post(URI_IPSEC_CONN, tunnel)
-
-
-class TestCsrLoginRestApi(CiscoCsrBaseTestCase):
-
-    """Test logging into CSR to obtain token-id."""
-
-    def test_get_token(self):
-        """Obtain the token and its expiration time."""
-        self._helper_register_auth_request()
-        self.assertTrue(self.csr.authenticate())
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIsNotNone(self.csr.token)
-
-    def test_unauthorized_token_request(self):
-        """Negative test of invalid user/password."""
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_AUTH,
-                                   status_code=requests.codes.UNAUTHORIZED)
-        self.csr.auth = ('stack', 'bogus')
-        self.assertIsNone(self.csr.authenticate())
-        self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status)
-
-    def _simulate_wrong_host(self, request):
-        if 'wrong-host' in request.url:
-            raise r_exc.ConnectionError()
-
-    def test_non_existent_host(self):
-        """Negative test of request to non-existent host."""
-        self.requests.add_matcher(self._simulate_wrong_host)
-        self.csr.host = 'wrong-host'
-        self.csr.token = 'Set by some previously successful access'
-        self.assertIsNone(self.csr.authenticate())
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-        self.assertIsNone(self.csr.token)
-
-    def _simulate_token_timeout(self, request):
-        raise r_exc.Timeout()
-
-    def test_timeout_on_token_access(self):
-        """Negative test of a timeout on a request."""
-        self.requests.add_matcher(self._simulate_token_timeout)
-        self.assertIsNone(self.csr.authenticate())
-        self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status)
-        self.assertIsNone(self.csr.token)
-
-
-class TestCsrGetRestApi(CiscoCsrBaseTestCase):
-
-    """Test CSR GET REST API."""
-
-    def test_valid_rest_gets(self):
-        """Simple GET requests.
-
-        First request will do a post to get token (login). Assumes
-        that there are two interfaces on the CSR.
-        """
-
-        self._helper_register_auth_request()
-        self._register_local_get(URI_HOSTNAME,
-                                 json={u'kind': u'object#host-name',
-                                       u'host-name': u'Router'})
-        self._register_local_get(URI_USERS,
-                                 json={u'kind': u'collection#local-user',
-                                       u'users': ['peter', 'paul', 'mary']})
-
-        actual = self.csr.get_request(URI_HOSTNAME)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIn('host-name', actual)
-        self.assertIsNotNone(actual['host-name'])
-
-        actual = self.csr.get_request(URI_USERS)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIn('users', actual)
-
-
-class TestCsrPostRestApi(CiscoCsrBaseTestCase):
-
-    """Test CSR POST REST API."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered here, although they may replace it, as needed.
-        """
-        super(TestCsrPostRestApi, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-
-    def test_post_requests(self):
-        """Simple POST requests (repeatable).
-
-        First request will do a post to get token (login). Assumes
-        that there are two interfaces (Ge1 and Ge2) on the CSR.
-        """
-
-        interface_re = re.compile('https://localhost:55443/.*/interfaces/'
-                                  'GigabitEthernet\d/statistics')
-        self.requests.register_uri('POST',
-                                   interface_re,
-                                   status_code=requests.codes.NO_CONTENT)
-
-        actual = self.csr.post_request(
-            'interfaces/GigabitEthernet1/statistics',
-            payload={'action': 'clear'})
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-        actual = self.csr.post_request(
-            'interfaces/GigabitEthernet2/statistics',
-            payload={'action': 'clear'})
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-
-    def test_post_with_location(self):
-        """Create a user and verify that location returned."""
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.CREATED,
-            headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
-        location = self.csr.post_request(
-            URI_USERS,
-            payload={'username': 'test-user',
-                     'password': 'pass12345',
-                     'privilege': 15})
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_USERS + '/test-user', location)
-
-    def test_post_missing_required_attribute(self):
-        """Negative test of POST with missing mandatory info."""
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_USERS,
-                                   status_code=requests.codes.BAD_REQUEST)
-        self.csr.post_request(URI_USERS,
-                              payload={'password': 'pass12345',
-                                       'privilege': 15})
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def test_post_invalid_attribute(self):
-        """Negative test of POST with invalid info."""
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_USERS,
-                                   status_code=requests.codes.BAD_REQUEST)
-        self.csr.post_request(URI_USERS,
-                              payload={'username': 'test-user',
-                                       'password': 'pass12345',
-                                       'privilege': 20})
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def test_post_already_exists(self):
-        """Negative test of a duplicate POST.
-
-        Uses the lower level _do_request() API to just perform the POST and
-        obtain the response, without any error processing.
-        """
-
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.CREATED,
-            headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
-
-        location = self.csr._do_request(
-            'POST',
-            URI_USERS,
-            payload={'username': 'test-user',
-                     'password': 'pass12345',
-                     'privilege': 15},
-            more_headers=csr_client.HEADER_CONTENT_TYPE_JSON)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_USERS + '/test-user', location)
-        self.csr.post_request(URI_USERS,
-                              payload={'username': 'test-user',
-                                       'password': 'pass12345',
-                                       'privilege': 20})
-
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.NOT_FOUND,
-            json={u'error-code': -1,
-                  u'error-message': u'user test-user already exists'})
-
-        self.csr._do_request(
-            'POST',
-            URI_USERS,
-            payload={'username': 'test-user',
-                     'password': 'pass12345',
-                     'privilege': 15},
-            more_headers=csr_client.HEADER_CONTENT_TYPE_JSON)
-        # Note: For local-user, a 404 error is returned. For
-        # site-to-site connection a 400 is returned.
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-
-    def test_post_changing_value(self):
-        """Negative test of a POST trying to change a value."""
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.CREATED,
-            headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
-
-        location = self.csr.post_request(
-            URI_USERS,
-            payload={'username': 'test-user',
-                     'password': 'pass12345',
-                     'privilege': 15})
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_USERS + '/test-user', location)
-
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.NOT_FOUND,
-            json={u'error-code': -1,
-                  u'error-message': u'user test-user already exists'})
-
-        actual = self.csr.post_request(URI_USERS,
-                                       payload={'username': 'test-user',
-                                                'password': 'changed',
-                                                'privilege': 15})
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-        expected = {u'error-code': -1,
-                    u'error-message': u'user test-user already exists'}
-        self.assertDictSupersetOf(expected, actual)
-
-
-class TestCsrPutRestApi(CiscoCsrBaseTestCase):
-
-    """Test CSR PUT REST API."""
-
-    def _save_resources(self):
-        self._register_local_get(URI_HOSTNAME,
-                                 json={u'kind': u'object#host-name',
-                                       u'host-name': u'Router'})
-        interface_info = {u'kind': u'object#interface',
-                          u'description': u'Changed description',
-                          u'if-name': 'interfaces/GigabitEthernet1',
-                          u'proxy-arp': True,
-                          u'subnet-mask': u'255.255.255.0',
-                          u'icmp-unreachable': True,
-                          u'nat-direction': u'',
-                          u'icmp-redirects': True,
-                          u'ip-address': u'192.168.200.1',
-                          u'verify-unicast-source': False,
-                          u'type': u'ethernet'}
-        self._register_local_get(URI_INTERFACE_GE1,
-                                 json=interface_info)
-        details = self.csr.get_request(URI_HOSTNAME)
-        if self.csr.status != requests.codes.OK:
-            self.fail("Unable to save original host name")
-        self.original_host = details['host-name']
-        details = self.csr.get_request(URI_INTERFACE_GE1)
-        if self.csr.status != requests.codes.OK:
-            self.fail("Unable to save interface Ge1 description")
-        self.original_if = details
-        self.csr.token = None
-
-    def _restore_resources(self, user, password):
-        """Restore the host name and interface description.
-
-        Must restore the user and password, so that authentication
-        token can be obtained (as some tests corrupt auth info).
-        Will also clear token, so that it gets a fresh token.
-        """
-
-        self._register_local_put('global', 'host-name')
-        self._register_local_put('interfaces', 'GigabitEthernet1')
-
-        self.csr.auth = (user, password)
-        self.csr.token = None
-        payload = {'host-name': self.original_host}
-        self.csr.put_request(URI_HOSTNAME, payload=payload)
-        if self.csr.status != requests.codes.NO_CONTENT:
-            self.fail("Unable to restore host name after test")
-        payload = {'description': self.original_if['description'],
-                   'if-name': self.original_if['if-name'],
-                   'ip-address': self.original_if['ip-address'],
-                   'subnet-mask': self.original_if['subnet-mask'],
-                   'type': self.original_if['type']}
-        self.csr.put_request(URI_INTERFACE_GE1,
-                             payload=payload)
-        if self.csr.status != requests.codes.NO_CONTENT:
-            self.fail("Unable to restore I/F Ge1 description after test")
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered here, although they may replace it, as needed. In
-        addition, resources are saved, before each test is run, and
-        restored, after each test completes.
-        """
-        super(TestCsrPutRestApi, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-        self._save_resources()
-        self.addCleanup(self._restore_resources, 'stack', 'cisco')
-
-    def test_put_requests(self):
-        """Simple PUT requests (repeatable).
-
-        First request will do a post to get token (login). Assumes
-        that there are two interfaces on the CSR (Ge1 and Ge2).
-        """
-
-        self._register_local_put('interfaces', 'GigabitEthernet1')
-        self._register_local_put('global', 'host-name')
-
-        actual = self.csr.put_request(URI_HOSTNAME,
-                                      payload={'host-name': 'TestHost'})
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-
-        actual = self.csr.put_request(URI_HOSTNAME,
-                                      payload={'host-name': 'TestHost2'})
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-
-    def test_change_interface_description(self):
-        """Test that interface description can be changed.
-
-        This was a problem with an earlier version of the CSR image and is
-        here to prevent regression.
-        """
-        self._register_local_put('interfaces', 'GigabitEthernet1')
-        payload = {'description': u'Changed description',
-                   'if-name': self.original_if['if-name'],
-                   'ip-address': self.original_if['ip-address'],
-                   'subnet-mask': self.original_if['subnet-mask'],
-                   'type': self.original_if['type']}
-        actual = self.csr.put_request(URI_INTERFACE_GE1, payload=payload)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-        actual = self.csr.get_request(URI_INTERFACE_GE1)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIn('description', actual)
-        self.assertEqual(u'Changed description',
-                         actual['description'])
-
-    def ignore_test_change_to_empty_interface_description(self):
-        """Test that interface description can be changed to empty string.
-
-        This is here to prevent regression, where the CSR was rejecting
-        an attempt to set the description to an empty string.
-        """
-        self._register_local_put('interfaces', 'GigabitEthernet1')
-        payload = {'description': '',
-                   'if-name': self.original_if['if-name'],
-                   'ip-address': self.original_if['ip-address'],
-                   'subnet-mask': self.original_if['subnet-mask'],
-                   'type': self.original_if['type']}
-        actual = self.csr.put_request(URI_INTERFACE_GE1, payload=payload)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        self.assertIsNone(actual)
-        actual = self.csr.get_request(URI_INTERFACE_GE1)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIn('description', actual)
-        self.assertEqual('', actual['description'])
-
-
-class TestCsrDeleteRestApi(CiscoCsrBaseTestCase):
-
-    """Test CSR DELETE REST API."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered here, although they may replace it, as needed.
-        """
-        super(TestCsrDeleteRestApi, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-
-    def _make_dummy_user(self):
-        """Create a user that will be later deleted."""
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_USERS,
-            status_code=requests.codes.CREATED,
-            headers={'location': LOCAL_URL + URI_USERS + '/dummy'})
-        self.csr.post_request(URI_USERS,
-                              payload={'username': 'dummy',
-                                       'password': 'dummy',
-                                       'privilege': 15})
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-
-    def test_delete_requests(self):
-        """Simple DELETE requests (creating entry first)."""
-        self._register_local_delete(URI_USERS, 'dummy')
-        self._make_dummy_user()
-        self.csr.token = None  # Force login
-        self.csr.delete_request(URI_USERS + '/dummy')
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        # Delete again, but without logging in this time
-        self._make_dummy_user()
-        self.csr.delete_request(URI_USERS + '/dummy')
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-
-    def test_delete_non_existent_entry(self):
-        """Negative test of trying to delete a non-existent user."""
-        expected = {u'error-code': -1,
-                    u'error-message': u'user unknown not found'}
-        self._register_local_delete(URI_USERS, 'unknown',
-                                    result_code=requests.codes.NOT_FOUND,
-                                    json=expected)
-        actual = self.csr.delete_request(URI_USERS + '/unknown')
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-        self.assertDictSupersetOf(expected, actual)
-
-    def test_delete_not_allowed(self):
-        """Negative test of trying to delete the host-name."""
-        self._register_local_delete(
-            'global', 'host-name',
-            result_code=requests.codes.METHOD_NOT_ALLOWED)
-        self.csr.delete_request(URI_HOSTNAME)
-        self.assertEqual(requests.codes.METHOD_NOT_ALLOWED,
-                         self.csr.status)
-
-
-class TestCsrRestApiFailures(CiscoCsrBaseTestCase):
-
-    """Test failure cases common for all REST APIs.
-
-    Uses the lower level _do_request() to just perform the operation and get
-    the result, without any error handling.
-    """
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=0.1):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered here, although they may replace it, as needed.
-        """
-        super(TestCsrRestApiFailures, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-
-    def _simulate_timeout(self, request):
-        if URI_HOSTNAME in request.path_uri:
-            raise r_exc.Timeout()
-
-    def test_request_for_non_existent_resource(self):
-        """Negative test of non-existent resource on REST request."""
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + 'no/such/request',
-                                   status_code=requests.codes.NOT_FOUND)
-        self.csr.post_request('no/such/request')
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-        # The result is HTTP 404 message, so no error content to check
-
-    def _simulate_get_timeout(self, request):
-        """Will raise exception for any host request to this resource."""
-        if URI_HOSTNAME in request.path_url:
-            raise r_exc.Timeout()
-
-    def test_timeout_during_request(self):
-        """Negative test of timeout during REST request."""
-        self.requests.add_matcher(self._simulate_get_timeout)
-        self.csr._do_request('GET', URI_HOSTNAME)
-        self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status)
-
-    def _simulate_auth_failure(self, request):
-        """First time auth POST is done, re-report unauthorized."""
-        if URI_AUTH in request.path_url and not self.called_once:
-            self.called_once = True
-            resp = requests.Response()
-            resp.status_code = requests.codes.UNAUTHORIZED
-            return resp
-
-    def test_token_expired_on_request(self):
-        """Token expired before trying a REST request.
-
-        First, the token is set to a bogus value, to force it to
-        try to authenticate on the GET request. Second, a mock that
-        runs once, will simulate an auth failure. Third, the normal
-        auth mock will simulate success.
-        """
-
-        self._register_local_get(URI_HOSTNAME,
-                                 json={u'kind': u'object#host-name',
-                                       u'host-name': u'Router'})
-        self.called_once = False
-        self.requests.add_matcher(self._simulate_auth_failure)
-        self.csr.token = '123'  # These are 44 characters, so won't match
-        actual = self.csr._do_request('GET', URI_HOSTNAME)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertIn('host-name', actual)
-        self.assertIsNotNone(actual['host-name'])
-
-    def test_failed_to_obtain_token_for_request(self):
-        """Negative test of unauthorized user for REST request."""
-        self.csr.auth = ('stack', 'bogus')
-        self._register_local_get(URI_HOSTNAME,
-                                 result_code=requests.codes.UNAUTHORIZED)
-        self.csr._do_request('GET', URI_HOSTNAME)
-        self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status)
-
-
-class TestCsrRestIkePolicyCreate(CiscoCsrBaseTestCase):
-
-    """Test IKE policy create REST requests."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication and post mock
-        response registered, although the test may replace them, if needed.
-        """
-        super(TestCsrRestIkePolicyCreate, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-        self._helper_register_ike_policy_post(2)
-
-    def _helper_register_ike_policy_get(self):
-        content = {u'kind': u'object#ike-policy',
-                   u'priority-id': u'2',
-                   u'version': u'v1',
-                   u'local-auth-method': u'pre-share',
-                   u'encryption': u'aes256',
-                   u'hash': u'sha',
-                   u'dhGroup': 5,
-                   u'lifetime': 3600}
-        self._register_local_get(URI_IKE_POLICY_ID % '2', json=content)
-
-    def test_create_delete_ike_policy(self):
-        """Create and then delete IKE policy."""
-        self._helper_register_ike_policy_get()
-        policy_info = {u'priority-id': u'2',
-                       u'encryption': u'aes256',
-                       u'hash': u'sha',
-                       u'dhGroup': 5,
-                       u'lifetime': 3600}
-        location = self.csr.create_ike_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IKE_POLICY_ID % '2', location)
-        # Check the hard-coded items that get set as well...
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ike-policy',
-                           u'version': u'v1',
-                           u'local-auth-method': u'pre-share'}
-        expected_policy.update(policy_info)
-        self.assertEqual(expected_policy, actual)
-
-        # Now delete and verify the IKE policy is gone
-        self._register_local_delete(URI_IKE_POLICY, 2)
-        self._register_local_get_not_found(URI_IKE_POLICY, 2)
-
-        self.csr.delete_ike_policy(2)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-
-    def test_create_ike_policy_with_defaults(self):
-        """Create IKE policy using defaults for all optional values."""
-        policy = {u'kind': u'object#ike-policy',
-                  u'priority-id': u'2',
-                  u'version': u'v1',
-                  u'local-auth-method': u'pre-share',
-                  u'encryption': u'des',
-                  u'hash': u'sha',
-                  u'dhGroup': 1,
-                  u'lifetime': 86400}
-        self._register_local_get(URI_IKE_POLICY_ID % '2', json=policy)
-        policy_info = {u'priority-id': u'2'}
-        location = self.csr.create_ike_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IKE_POLICY_ID % '2', location)
-
-        # Check the hard-coded items that get set as well...
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ike-policy',
-                           u'version': u'v1',
-                           u'encryption': u'des',
-                           u'hash': u'sha',
-                           u'dhGroup': 1,
-                           u'lifetime': 86400,
-                           # Lower level sets this, but it is the default
-                           u'local-auth-method': u'pre-share'}
-        expected_policy.update(policy_info)
-        self.assertEqual(expected_policy, actual)
-
-    def test_create_duplicate_ike_policy(self):
-        """Negative test of trying to create a duplicate IKE policy."""
-        self._helper_register_ike_policy_get()
-        policy_info = {u'priority-id': u'2',
-                       u'encryption': u'aes',
-                       u'hash': u'sha',
-                       u'dhGroup': 5,
-                       u'lifetime': 3600}
-        location = self.csr.create_ike_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IKE_POLICY_ID % '2', location)
-        self.requests.register_uri(
-            'POST',
-            LOCAL_URL + URI_IKE_POLICY,
-            status_code=requests.codes.BAD_REQUEST,
-            json={u'error-code': -1,
-                  u'error-message': u'policy 2 exist, not allow to '
-                                    u'update policy using POST method'})
-        location = self.csr.create_ike_policy(policy_info)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-        expected = {u'error-code': -1,
-                    u'error-message': u'policy 2 exist, not allow to '
-                    u'update policy using POST method'}
-        self.assertDictSupersetOf(expected, location)
-
-
-class TestCsrRestIPSecPolicyCreate(CiscoCsrBaseTestCase):
-
-    """Test IPSec policy create REST requests."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Set up for each test in this suite.
-
-        Each test case will have a normal authentication and post mock
-        response registered, although the test may replace them, if needed.
-        """
-        super(TestCsrRestIPSecPolicyCreate, self).setUp(host,
-                                                        tunnel_ip,
-                                                        timeout)
-        self._helper_register_auth_request()
-        self._helper_register_ipsec_policy_post(123)
-
-    def _helper_register_ipsec_policy_get(self, override=None):
-        content = {u'kind': u'object#ipsec-policy',
-                   u'mode': u'tunnel',
-                   u'policy-id': u'123',
-                   u'protection-suite': {
-                       u'esp-encryption': u'esp-256-aes',
-                       u'esp-authentication': u'esp-sha-hmac',
-                       u'ah': u'ah-sha-hmac',
-                   },
-                   u'anti-replay-window-size': u'Disable',
-                   u'lifetime-sec': 120,
-                   u'pfs': u'group5',
-                   u'lifetime-kb': 4608000,
-                   u'idle-time': None}
-        if override:
-            content.update(override)
-        self._register_local_get(URI_IPSEC_POLICY + '/123', json=content)
-
-    def test_create_delete_ipsec_policy(self):
-        """Create and then delete IPSec policy."""
-        policy_info = {
-            u'policy-id': u'123',
-            u'protection-suite': {
-                u'esp-encryption': u'esp-256-aes',
-                u'esp-authentication': u'esp-sha-hmac',
-                u'ah': u'ah-sha-hmac',
-            },
-            u'lifetime-sec': 120,
-            u'pfs': u'group5',
-            u'anti-replay-window-size': u'disable'
-        }
-        location = self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_POLICY + '/123', location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_policy_get()
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ipsec-policy',
-                           u'mode': u'tunnel',
-                           u'lifetime-kb': 4608000,
-                           u'idle-time': None}
-        expected_policy.update(policy_info)
-        # CSR will respond with capitalized value
-        expected_policy[u'anti-replay-window-size'] = u'Disable'
-        self.assertEqual(expected_policy, actual)
-
-        # Now delete and verify the IPSec policy is gone
-        self._register_local_delete(URI_IPSEC_POLICY, 123)
-        self._register_local_get_not_found(URI_IPSEC_POLICY, 123)
-
-        self.csr.delete_ipsec_policy('123')
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-
-    def test_create_ipsec_policy_with_defaults(self):
-        """Create IPSec policy with default for all optional values."""
-        policy_info = {u'policy-id': u'123'}
-        location = self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_POLICY + '/123', location)
-
-        # Check the hard-coded items that get set as well...
-        expected_policy = {u'kind': u'object#ipsec-policy',
-                           u'mode': u'tunnel',
-                           u'policy-id': u'123',
-                           u'protection-suite': {},
-                           u'lifetime-sec': 3600,
-                           u'pfs': u'Disable',
-                           u'anti-replay-window-size': u'None',
-                           u'lifetime-kb': 4608000,
-                           u'idle-time': None}
-        self._register_local_get(URI_IPSEC_POLICY + '/123',
-                                 json=expected_policy)
-
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_policy, actual)
-
-    def test_create_ipsec_policy_with_uuid(self):
-        """Create IPSec policy using UUID for id."""
-        # Override normal POST response w/one that has a different policy ID
-        self._helper_register_ipsec_policy_post(dummy_policy_id)
-        policy_info = {
-            u'policy-id': u'%s' % dummy_policy_id,
-            u'protection-suite': {
-                u'esp-encryption': u'esp-256-aes',
-                u'esp-authentication': u'esp-sha-hmac',
-                u'ah': u'ah-sha-hmac',
-            },
-            u'lifetime-sec': 120,
-            u'pfs': u'group5',
-            u'anti-replay-window-size': u'disable'
-        }
-        location = self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_POLICY_ID % dummy_policy_id, location)
-
-        # Check the hard-coded items that get set as well...
-        expected_policy = {u'kind': u'object#ipsec-policy',
-                           u'mode': u'tunnel',
-                           u'lifetime-kb': 4608000,
-                           u'idle-time': None}
-        expected_policy.update(policy_info)
-        # CSR will respond with capitalized value
-        expected_policy[u'anti-replay-window-size'] = u'Disable'
-        self._register_local_get(URI_IPSEC_POLICY_ID % dummy_policy_id,
-                                 json=expected_policy)
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_policy, actual)
-
-    def test_create_ipsec_policy_without_ah(self):
-        """Create IPSec policy."""
-        policy_info = {
-            u'policy-id': u'123',
-            u'protection-suite': {
-                u'esp-encryption': u'esp-aes',
-                u'esp-authentication': u'esp-sha-hmac',
-            },
-            u'lifetime-sec': 120,
-            u'pfs': u'group5',
-            u'anti-replay-window-size': u'128'
-        }
-        location = self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_POLICY_ID % '123', location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_policy_get(
-            override={u'anti-replay-window-size': u'128',
-                      u'protection-suite': {
-                          u'esp-encryption': u'esp-aes',
-                          u'esp-authentication': u'esp-sha-hmac'}})
-
-        actual = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ipsec-policy',
-                           u'mode': u'tunnel',
-                           u'lifetime-kb': 4608000,
-                           u'idle-time': None}
-        expected_policy.update(policy_info)
-        self.assertEqual(expected_policy, actual)
-
-    def test_invalid_ipsec_policy_lifetime(self):
-        """Failure test of IPSec policy with unsupported lifetime."""
-        # Override normal POST response with one that indicates bad request
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_IPSEC_POLICY,
-                                   status_code=requests.codes.BAD_REQUEST)
-        policy_info = {
-            u'policy-id': u'123',
-            u'protection-suite': {
-                u'esp-encryption': u'esp-aes',
-                u'esp-authentication': u'esp-sha-hmac',
-                u'ah': u'ah-sha-hmac',
-            },
-            u'lifetime-sec': 119,
-            u'pfs': u'group5',
-            u'anti-replay-window-size': u'128'
-        }
-        self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def test_create_ipsec_policy_with_invalid_name(self):
-        """Failure test of creating IPSec policy with name too long."""
-        # Override normal POST response with one that indicates bad request
-        self.requests.register_uri('POST',
-                                   LOCAL_URL + URI_IPSEC_POLICY,
-                                   status_code=requests.codes.BAD_REQUEST)
-        policy_info = {u'policy-id': u'policy-name-is-too-long-32-chars'}
-        self.csr.create_ipsec_policy(policy_info)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-
-class TestCsrRestPreSharedKeyCreate(CiscoCsrBaseTestCase):
-
-    """Test Pre-shared key (PSK) create REST requests."""
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Set up for each test in this suite.
-
-        Each test case will have a normal authentication and post mock
-        response registered, although the test may replace them, if needed.
-        """
-        super(TestCsrRestPreSharedKeyCreate, self).setUp(host,
-                                                         tunnel_ip,
-                                                         timeout)
-        self._helper_register_auth_request()
-        self._helper_register_psk_post(5)
-
-    def _helper_register_psk_get(self, override=None):
-        content = {u'kind': u'object#ike-keyring',
-                   u'keyring-name': u'5',
-                   u'pre-shared-key-list': [
-                       {u'key': u'super-secret',
-                        u'encrypted': False,
-                        u'peer-address': u'10.10.10.20 255.255.255.0'}
-                   ]}
-        if override:
-            content.update(override)
-        self._register_local_get(URI_PSK_ID % '5', json=content)
-
-    def test_create_delete_pre_shared_key(self):
-        """Create and then delete a keyring entry for pre-shared key."""
-        psk_info = {u'keyring-name': u'5',
-                    u'pre-shared-key-list': [
-                        {u'key': u'super-secret',
-                         u'encrypted': False,
-                         u'peer-address': u'10.10.10.20/24'}
-                    ]}
-        location = self.csr.create_pre_shared_key(psk_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_PSK_ID % '5', location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_psk_get()
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ike-keyring'}
-        expected_policy.update(psk_info)
-        # Note: the peer CIDR is returned as an IP and mask
-        expected_policy[u'pre-shared-key-list'][0][u'peer-address'] = (
-            u'10.10.10.20 255.255.255.0')
-        self.assertEqual(expected_policy, content)
-
-        # Now delete and verify pre-shared key is gone
-        self._register_local_delete(URI_PSK, 5)
-        self._register_local_get_not_found(URI_PSK, 5)
-
-        self.csr.delete_pre_shared_key('5')
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-
-    def test_create_pre_shared_key_with_fqdn_peer(self):
-        """Create pre-shared key using FQDN for peer address."""
-        psk_info = {u'keyring-name': u'5',
-                    u'pre-shared-key-list': [
-                        {u'key': u'super-secret',
-                         u'encrypted': False,
-                         u'peer-address': u'cisco.com'}
-                    ]}
-        location = self.csr.create_pre_shared_key(psk_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_PSK_ID % '5', location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_psk_get(
-            override={u'pre-shared-key-list': [
-                          {u'key': u'super-secret',
-                           u'encrypted': False,
-                           u'peer-address': u'cisco.com'}
-                      ]}
-        )
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected_policy = {u'kind': u'object#ike-keyring'}
-        expected_policy.update(psk_info)
-        self.assertEqual(expected_policy, content)
-
-
-class TestCsrRestIPSecConnectionCreate(CiscoCsrBaseTestCase):
-
-    """Test IPSec site-to-site connection REST requests.
-
-    This requires us to have first created an IKE policy, IPSec policy,
-    and pre-shared key, so it's more of an itegration test, when used
-    with a real CSR (as we can't mock out these pre-conditions).
-    """
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Setup for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered here, although they may replace it, as needed.
-        """
-        super(TestCsrRestIPSecConnectionCreate, self).setUp(host,
-                                                            tunnel_ip,
-                                                            timeout)
-        self._helper_register_auth_request()
-        self.route_id = '10.1.0.0_24_GigabitEthernet1'
-
-    def _make_psk_for_test(self):
-        psk_id = generate_pre_shared_key_id()
-        self._remove_resource_for_test(self.csr.delete_pre_shared_key,
-                                       psk_id)
-        self._helper_register_psk_post(psk_id)
-        psk_info = {u'keyring-name': u'%d' % psk_id,
-                    u'pre-shared-key-list': [
-                        {u'key': u'super-secret',
-                         u'encrypted': False,
-                         u'peer-address': u'10.10.10.20/24'}
-                    ]}
-        self.csr.create_pre_shared_key(psk_info)
-        if self.csr.status != requests.codes.CREATED:
-            self.fail("Unable to create PSK for test case")
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_pre_shared_key, psk_id)
-        return psk_id
-
-    def _make_ike_policy_for_test(self):
-        policy_id = generate_ike_policy_id()
-        self._remove_resource_for_test(self.csr.delete_ike_policy,
-                                       policy_id)
-        self._helper_register_ike_policy_post(policy_id)
-        policy_info = {u'priority-id': u'%d' % policy_id,
-                       u'encryption': u'aes',
-                       u'hash': u'sha',
-                       u'dhGroup': 5,
-                       u'lifetime': 3600}
-        self.csr.create_ike_policy(policy_info)
-        if self.csr.status != requests.codes.CREATED:
-            self.fail("Unable to create IKE policy for test case")
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ike_policy, policy_id)
-        return policy_id
-
-    def _make_ipsec_policy_for_test(self):
-        policy_id = generate_ipsec_policy_id()
-        self._remove_resource_for_test(self.csr.delete_ipsec_policy,
-                                       policy_id)
-        self._helper_register_ipsec_policy_post(policy_id)
-        policy_info = {
-            u'policy-id': u'%d' % policy_id,
-            u'protection-suite': {
-                u'esp-encryption': u'esp-aes',
-                u'esp-authentication': u'esp-sha-hmac',
-                u'ah': u'ah-sha-hmac',
-            },
-            u'lifetime-sec': 120,
-            u'pfs': u'group5',
-            u'anti-replay-window-size': u'disable'
-        }
-        self.csr.create_ipsec_policy(policy_info)
-        if self.csr.status != requests.codes.CREATED:
-            self.fail("Unable to create IPSec policy for test case")
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_policy, policy_id)
-        return policy_id
-
-    def _remove_resource_for_test(self, delete_resource, resource_id):
-        self._register_local_delete_by_id(resource_id)
-        delete_resource(resource_id)
-
-    def _prepare_for_site_conn_create(self, skip_psk=False, skip_ike=False,
-                                      skip_ipsec=False):
-        """Create the policies and PSK so can then create site conn."""
-        if not skip_psk:
-            ike_policy_id = self._make_psk_for_test()
-        else:
-            ike_policy_id = generate_ike_policy_id()
-        if not skip_ike:
-            self._make_ike_policy_for_test()
-        if not skip_ipsec:
-            ipsec_policy_id = self._make_ipsec_policy_for_test()
-        else:
-            ipsec_policy_id = generate_ipsec_policy_id()
-        # Note: Use same ID number for tunnel and IPSec policy, so that when
-        # GET tunnel info, the mocks can infer the IPSec policy ID from the
-        # tunnel number.
-        return (ike_policy_id, ipsec_policy_id, ipsec_policy_id)
-
-    def _helper_register_ipsec_conn_get(self, tunnel, override=None):
-        # Use same number, to allow mock to generate IPSec policy ID
-        ipsec_policy_id = tunnel[6:]
-        content = {u'kind': u'object#vpn-site-to-site',
-                   u'vpn-interface-name': u'%s' % tunnel,
-                   u'ip-version': u'ipv4',
-                   u'vpn-type': u'site-to-site',
-                   u'ipsec-policy-id': u'%s' % ipsec_policy_id,
-                   u'ike-profile-id': None,
-                   u'mtu': 1500,
-                   u'tunnel-vrf': TEST_VRF,
-                   u'local-device': {
-                       u'ip-address': '10.3.0.1/24',
-                       u'tunnel-ip-address': '10.10.10.10'
-                   },
-                   u'remote-device': {
-                       u'tunnel-ip-address': '10.10.10.20'
-                   }}
-        if override:
-            content.update(override)
-        self._register_local_get(URI_IPSEC_CONN_ID % tunnel, json=content)
-
-    def test_create_delete_ipsec_connection(self):
-        """Create and then delete an IPSec connection."""
-        ike_policy_id, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 1500,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        expected_connection = {u'kind': u'object#vpn-site-to-site',
-                               u'ike-profile-id': None,
-                               u'vpn-type': u'site-to-site',
-                               u'mtu': 1500,
-                               u'tunnel-vrf': TEST_VRF,
-                               u'ip-version': u'ipv4'}
-        expected_connection.update(connection_info)
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_conn_get(tunnel_name)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_connection, content)
-
-        # Now delete and verify that site-to-site connection is gone
-        self._register_local_delete_by_id(tunnel_name)
-        self._register_local_delete_by_id(ipsec_policy_id)
-        self._register_local_delete_by_id(ike_policy_id)
-        self._register_local_get_not_found(URI_IPSEC_CONN,
-                                           tunnel_name)
-        # Only delete connection. Cleanup will take care of prerequisites
-        self.csr.delete_ipsec_connection(tunnel_name)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
-
-    def test_create_ipsec_connection_with_no_tunnel_subnet(self):
-        """Create an IPSec connection without an IP address on tunnel."""
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'local-device': {u'ip-address': u'GigabitEthernet3',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        expected_connection = {u'kind': u'object#vpn-site-to-site',
-                               u'ike-profile-id': None,
-                               u'vpn-type': u'site-to-site',
-                               u'mtu': 1500,
-                               u'tunnel-vrf': TEST_VRF,
-                               u'ip-version': u'ipv4'}
-        expected_connection.update(connection_info)
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_conn_get(tunnel_name, override={
-            u'local-device': {
-                u'ip-address': u'GigabitEthernet3',
-                u'tunnel-ip-address': u'10.10.10.10'
-            }})
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_connection, content)
-
-    def test_create_ipsec_connection_no_pre_shared_key(self):
-        """Test of connection create without associated pre-shared key.
-
-        The CSR will create the connection, but will not be able to pass
-        traffic without the pre-shared key.
-        """
-
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create(skip_psk=True))
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 1500,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        expected_connection = {u'kind': u'object#vpn-site-to-site',
-                               u'ike-profile-id': None,
-                               u'tunnel-vrf': TEST_VRF,
-                               u'vpn-type': u'site-to-site',
-                               u'ip-version': u'ipv4'}
-        expected_connection.update(connection_info)
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_conn_get(tunnel_name)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_connection, content)
-
-    def test_create_ipsec_connection_with_default_ike_policy(self):
-        """Test of connection create without IKE policy (uses default).
-
-        Without an IKE policy, the CSR will use a built-in default IKE
-        policy setting for the connection.
-        """
-
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create(skip_ike=True))
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 1500,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        expected_connection = {u'kind': u'object#vpn-site-to-site',
-                               u'ike-profile-id': None,
-                               u'tunnel-vrf': TEST_VRF,
-                               u'vpn-type': u'site-to-site',
-                               u'ip-version': u'ipv4'}
-        expected_connection.update(connection_info)
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_conn_get(tunnel_name)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_connection, content)
-
-    def test_set_ipsec_connection_admin_state_changes(self):
-        """Create IPSec connection in admin down state."""
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 1500,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        state_url = location + "/state"
-        state_uri = URI_IPSEC_CONN_ID % tunnel_name + '/state'
-        # Note: When created, the tunnel will be in admin 'up' state
-        # Note: Line protocol state will be down, unless have an active conn.
-        expected_state = {u'kind': u'object#vpn-site-to-site-state',
-                          u'vpn-interface-name': tunnel_name,
-                          u'line-protocol-state': u'down',
-                          u'enabled': False}
-        self._register_local_put(URI_IPSEC_CONN_ID % tunnel_name, 'state')
-        self.csr.set_ipsec_connection_state(tunnel_name, admin_up=False)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-
-        self._register_local_get(state_uri, json=expected_state)
-        content = self.csr.get_request(state_url, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_state, content)
-
-        self.csr.set_ipsec_connection_state(tunnel_name, admin_up=True)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        expected_state = {u'kind': u'object#vpn-site-to-site-state',
-                          u'vpn-interface-name': tunnel_name,
-                          u'line-protocol-state': u'down',
-                          u'enabled': True}
-        self._register_local_get(state_uri, json=expected_state)
-        content = self.csr.get_request(state_url, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_state, content)
-
-    def test_create_ipsec_connection_missing_ipsec_policy(self):
-        """Negative test of connection create without IPSec policy."""
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create(skip_ipsec=True))
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._register_local_post(URI_IPSEC_CONN, tunnel_name,
-                                  result_code=requests.codes.BAD_REQUEST)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        'Tunnel%d' % tunnel_id)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def _determine_conflicting_ip(self):
-        content = {u'kind': u'object#interface',
-                   u'subnet-mask': u'255.255.255.0',
-                   u'ip-address': u'10.5.0.2'}
-        self._register_local_get('interfaces/GigabitEthernet3', json=content)
-        details = self.csr.get_request('interfaces/GigabitEthernet3')
-        if self.csr.status != requests.codes.OK:
-            self.fail("Unable to obtain interface GigabitEthernet3's IP")
-        if_ip = details.get('ip-address')
-        if not if_ip:
-            self.fail("No IP address for GigabitEthernet3 interface")
-        return '.'.join(if_ip.split('.')[:3]) + '.10'
-
-    def test_create_ipsec_connection_conficting_tunnel_ip(self):
-        """Negative test of connection create with conflicting tunnel IP.
-
-        Find out the IP of a local interface (GigabitEthernet3) and create an
-        IP that is on the same subnet. Note: this interface needs to be up.
-        """
-
-        conflicting_ip = self._determine_conflicting_ip()
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._register_local_post(URI_IPSEC_CONN, tunnel_name,
-                                  result_code=requests.codes.BAD_REQUEST)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'local-device': {u'ip-address': u'%s/24' % conflicting_ip,
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def test_create_ipsec_connection_with_max_mtu(self):
-        """Create an IPSec connection with max MTU value."""
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 9192,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        expected_connection = {u'kind': u'object#vpn-site-to-site',
-                               u'ike-profile-id': None,
-                               u'tunnel-vrf': TEST_VRF,
-                               u'vpn-type': u'site-to-site',
-                               u'ip-version': u'ipv4'}
-        expected_connection.update(connection_info)
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Check the hard-coded items that get set as well...
-        self._helper_register_ipsec_conn_get(tunnel_name, override={
-            u'mtu': 9192})
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_connection, content)
-
-    def test_create_ipsec_connection_with_bad_mtu(self):
-        """Negative test of connection create with unsupported MTU value."""
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._register_local_post(URI_IPSEC_CONN, tunnel_name,
-                                  result_code=requests.codes.BAD_REQUEST)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'mtu': 9193,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
-
-    def test_status_when_no_tunnels_exist(self):
-        """Get status, when there are no tunnels."""
-        content = {u'kind': u'collection#vpn-active-sessions',
-                   u'items': []}
-        self._register_local_get(URI_SESSIONS, json=content)
-        tunnels = self.csr.read_tunnel_statuses()
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual([], tunnels)
-
-    def test_status_for_one_tunnel(self):
-        """Get status of one tunnel."""
-        # Create the IPsec site-to-site connection first
-        _, ipsec_policy_id, tunnel_id = (
-            self._prepare_for_site_conn_create())
-        tunnel_name = u'Tunnel%s' % tunnel_id
-        self._helper_register_tunnel_post(tunnel_name)
-        self._register_local_post(URI_ROUTES, self.route_id)
-        connection_info = {
-            u'vpn-interface-name': tunnel_name,
-            u'ipsec-policy-id': u'%d' % ipsec_policy_id,
-            u'local-device': {u'ip-address': u'10.3.0.1/24',
-                              u'tunnel-ip-address': u'10.10.10.10'},
-            u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
-        }
-        location = self.csr.create_ipsec_connection(connection_info)
-        self.addCleanup(self._remove_resource_for_test,
-                        self.csr.delete_ipsec_connection,
-                        tunnel_name)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
-
-        # Now, check the status
-        content = {u'kind': u'collection#vpn-active-sessions',
-                   u'items': [{u'status': u'DOWN-NEGOTIATING',
-                               u'vpn-interface-name': tunnel_name}, ]}
-        self._register_local_get(URI_SESSIONS, json=content)
-        self._helper_register_ipsec_conn_get(tunnel_name)
-        tunnels = self.csr.read_tunnel_statuses()
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual([(tunnel_name, u'DOWN-NEGOTIATING'), ], tunnels)
-
-
-class TestCsrRestIkeKeepaliveCreate(CiscoCsrBaseTestCase):
-
-    """Test IKE keepalive REST requests.
-
-    Note: On the Cisco CSR, the IKE keepalive for v1 is a global configuration
-    that applies to all VPN tunnels to specify Dead Peer Detection information.
-    As a result, this REST API is not used in the OpenStack device driver, and
-    the keepalive will default to zero (disabled).
-    """
-
-    def _save_dpd_info(self):
-        details = self.csr.get_request(URI_KEEPALIVE)
-        if self.csr.status == requests.codes.OK:
-            self.dpd = details
-            self.addCleanup(self._restore_dpd_info)
-        elif self.csr.status != requests.codes.NOT_FOUND:
-            self.fail("Unable to save original DPD info")
-
-    def _restore_dpd_info(self):
-        payload = {'interval': self.dpd['interval'],
-                   'retry': self.dpd['retry']}
-        self.csr.put_request(URI_KEEPALIVE, payload=payload)
-        if self.csr.status != requests.codes.NO_CONTENT:
-            self.fail("Unable to restore DPD info after test")
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Set up for each test in this suite.
-
-        Each test case will have a normal authentication, get, and put mock
-        responses registered, although the test may replace them, if needed.
-        Dead Peer Detection settions will be saved for each test, and
-        restored afterwards.
-        """
-        super(TestCsrRestIkeKeepaliveCreate, self).setUp(host,
-                                                         tunnel_ip,
-                                                         timeout)
-        self._helper_register_auth_request()
-        self._helper_register_keepalive_get()
-        self._register_local_put('vpn-svc/ike', 'keepalive')
-        self._save_dpd_info()
-        self.csr.token = None
-
-    def _helper_register_keepalive_get(self, override=None):
-        content = {u'interval': 60,
-                   u'retry': 4,
-                   u'periodic': True}
-        if override:
-            content.update(override)
-        self._register_local_get(URI_KEEPALIVE, json=content)
-
-    def test_configure_ike_keepalive(self):
-        """Set IKE keep-alive (aka Dead Peer Detection) for the CSR."""
-        keepalive_info = {'interval': 60, 'retry': 4}
-        self.csr.configure_ike_keepalive(keepalive_info)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        content = self.csr.get_request(URI_KEEPALIVE)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        expected = {'periodic': False}
-        expected.update(keepalive_info)
-        self.assertDictSupersetOf(expected, content)
-
-    def test_disable_ike_keepalive(self):
-        """Disable IKE keep-alive (aka Dead Peer Detection) for the CSR."""
-        keepalive_info = {'interval': 0, 'retry': 4}
-        self.csr.configure_ike_keepalive(keepalive_info)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-
-
-class TestCsrRestStaticRoute(CiscoCsrBaseTestCase):
-
-    """Test static route REST requests.
-
-    A static route is added for the peer's private network. Would create
-    a route for each of the peer CIDRs specified for the VPN connection.
-    """
-
-    def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
-        """Set up for each test in this suite.
-
-        Each test case will have a normal authentication mock response
-        registered, although the test may replace it, if needed.
-        """
-        super(TestCsrRestStaticRoute, self).setUp(host, tunnel_ip, timeout)
-        self._helper_register_auth_request()
-
-    def test_create_delete_static_route(self):
-        """Create and then delete a static route for the tunnel."""
-        expected_id = '10.1.0.0_24_GigabitEthernet1'
-        self._register_local_post(URI_ROUTES, resource_id=expected_id)
-        cidr = u'10.1.0.0/24'
-        interface = u'GigabitEthernet1'
-        route_info = {u'destination-network': cidr,
-                      u'outgoing-interface': interface}
-        location = self.csr.create_static_route(route_info)
-        self.assertEqual(requests.codes.CREATED, self.csr.status)
-        self.assertIn(URI_ROUTES_ID % expected_id, location)
-
-        # Check the hard-coded items that get set as well...
-        expected_route = {u'destination-network': u'10.1.0.0/24',
-                          u'kind': u'object#static-route',
-                          u'next-hop-router': None,
-                          u'outgoing-interface': u'GigabitEthernet1',
-                          u'admin-distance': 1}
-        self._register_local_get(URI_ROUTES_ID % expected_id,
-                                 json=expected_route)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.OK, self.csr.status)
-        self.assertEqual(expected_route, content)
-
-        # Now delete and verify that static route is gone
-        self._register_local_delete(URI_ROUTES, expected_id)
-        self._register_local_get_not_found(URI_ROUTES, expected_id)
-        route_id = csr_client.make_route_id(cidr, interface)
-        self.csr.delete_static_route(route_id)
-        self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
-        content = self.csr.get_request(location, full_url=True)
-        self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
diff --git a/neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py b/neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py
deleted file mode 100644 (file)
index f9bd534..0000000
+++ /dev/null
@@ -1,1553 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import httplib
-import operator
-
-import mock
-
-from neutron import context
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.vpn.device_drivers import (
-    cisco_csr_rest_client as csr_client)
-from neutron.services.vpn.device_drivers import cisco_ipsec as ipsec_driver
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-FAKE_HOST = 'fake_host'
-FAKE_ROUTER_ID = _uuid()
-FAKE_VPN_SERVICE = {
-    'id': _uuid(),
-    'router_id': FAKE_ROUTER_ID,
-    'admin_state_up': True,
-    'status': constants.PENDING_CREATE,
-    'subnet': {'cidr': '10.0.0.0/24'},
-    'ipsec_site_connections': [
-        {'peer_cidrs': ['20.0.0.0/24',
-                        '30.0.0.0/24']},
-        {'peer_cidrs': ['40.0.0.0/24',
-                        '50.0.0.0/24']}]
-}
-FIND_CFG_FOR_CSRS = ('neutron.services.vpn.device_drivers.cisco_ipsec.'
-                     'find_available_csrs_from_config')
-
-
-class TestCiscoCsrIPSecConnection(base.BaseTestCase):
-    def setUp(self):
-        super(TestCiscoCsrIPSecConnection, self).setUp()
-        self.conn_info = {
-            u'id': '123',
-            u'status': constants.PENDING_CREATE,
-            u'admin_state_up': True,
-            'psk': 'secret',
-            'peer_address': '192.168.1.2',
-            'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
-            'mtu': 1500,
-            'ike_policy': {'auth_algorithm': 'sha1',
-                           'encryption_algorithm': 'aes-128',
-                           'pfs': 'Group5',
-                           'ike_version': 'v1',
-                           'lifetime_units': 'seconds',
-                           'lifetime_value': 3600},
-            'ipsec_policy': {'transform_protocol': 'ah',
-                             'encryption_algorithm': 'aes-128',
-                             'auth_algorithm': 'sha1',
-                             'pfs': 'group5',
-                             'lifetime_units': 'seconds',
-                             'lifetime_value': 3600},
-            'cisco': {'site_conn_id': 'Tunnel0',
-                      'ike_policy_id': 222,
-                      'ipsec_policy_id': 333}
-        }
-        self.csr = mock.Mock(spec=csr_client.CsrRestClient)
-        self.csr.status = 201  # All calls to CSR REST API succeed
-        self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info,
-                                                               self.csr)
-
-    def test_create_ipsec_site_connection(self):
-        """Ensure all steps are done to create an IPSec site connection.
-
-        Verify that each of the driver calls occur (in order), and
-        the right information is stored for later deletion.
-        """
-        expected = ['create_pre_shared_key',
-                    'create_ike_policy',
-                    'create_ipsec_policy',
-                    'create_ipsec_connection',
-                    'create_static_route',
-                    'create_static_route']
-        expected_rollback_steps = [
-            ipsec_driver.RollbackStep(action='pre_shared_key',
-                                      resource_id='123',
-                                      title='Pre-Shared Key'),
-            ipsec_driver.RollbackStep(action='ike_policy',
-                                      resource_id=222,
-                                      title='IKE Policy'),
-            ipsec_driver.RollbackStep(action='ipsec_policy',
-                                      resource_id=333,
-                                      title='IPSec Policy'),
-            ipsec_driver.RollbackStep(action='ipsec_connection',
-                                      resource_id='Tunnel0',
-                                      title='IPSec Connection'),
-            ipsec_driver.RollbackStep(action='static_route',
-                                      resource_id='10.1.0.0_24_Tunnel0',
-                                      title='Static Route'),
-            ipsec_driver.RollbackStep(action='static_route',
-                                      resource_id='10.2.0.0_24_Tunnel0',
-                                      title='Static Route')]
-        self.ipsec_conn.create_ipsec_site_connection(mock.Mock(),
-                                                     self.conn_info)
-        client_calls = [c[0] for c in self.csr.method_calls]
-        self.assertEqual(expected, client_calls)
-        self.assertEqual(expected_rollback_steps, self.ipsec_conn.steps)
-
-    def test_create_ipsec_site_connection_with_rollback(self):
-        """Failure test of IPSec site conn creation that fails and rolls back.
-
-        Simulate a failure in the last create step (making routes for the
-        peer networks), and ensure that the create steps are called in
-        order (except for create_static_route), and that the delete
-        steps are called in reverse order. At the end, there should be no
-        rollback infromation for the connection.
-        """
-        def fake_route_check_fails(*args):
-            if args[0] == 'Static Route':
-                # So that subsequent calls to CSR rest client (for rollback)
-                # will fake as passing.
-                self.csr.status = httplib.NO_CONTENT
-                raise ipsec_driver.CsrResourceCreateFailure(resource=args[0],
-                                                            which=args[1])
-
-        with mock.patch.object(ipsec_driver.CiscoCsrIPSecConnection,
-                               '_check_create',
-                               side_effect=fake_route_check_fails):
-
-            expected = ['create_pre_shared_key',
-                        'create_ike_policy',
-                        'create_ipsec_policy',
-                        'create_ipsec_connection',
-                        'create_static_route',
-                        'delete_ipsec_connection',
-                        'delete_ipsec_policy',
-                        'delete_ike_policy',
-                        'delete_pre_shared_key']
-            self.ipsec_conn.create_ipsec_site_connection(mock.Mock(),
-                                                         self.conn_info)
-            client_calls = [c[0] for c in self.csr.method_calls]
-            self.assertEqual(expected, client_calls)
-            self.assertEqual([], self.ipsec_conn.steps)
-
-    def test_create_verification_with_error(self):
-        """Negative test of create check step had failed."""
-        self.csr.status = httplib.NOT_FOUND
-        self.assertRaises(ipsec_driver.CsrResourceCreateFailure,
-                          self.ipsec_conn._check_create, 'name', 'id')
-
-    def test_failure_with_invalid_create_step(self):
-        """Negative test of invalid create step (programming error)."""
-        self.ipsec_conn.steps = []
-        try:
-            self.ipsec_conn.do_create_action('bogus', None, '123', 'Bad Step')
-        except ipsec_driver.CsrResourceCreateFailure:
-            pass
-        else:
-            self.fail('Expected exception with invalid create step')
-
-    def test_failure_with_invalid_delete_step(self):
-        """Negative test of invalid delete step (programming error)."""
-        self.ipsec_conn.steps = [ipsec_driver.RollbackStep(action='bogus',
-                                                           resource_id='123',
-                                                           title='Bogus Step')]
-        try:
-            self.ipsec_conn.do_rollback()
-        except ipsec_driver.CsrResourceCreateFailure:
-            pass
-        else:
-            self.fail('Expected exception with invalid delete step')
-
-    def test_delete_ipsec_connection(self):
-        """Perform delete of IPSec site connection and check steps done."""
-        # Simulate that a create was done with rollback steps stored
-        self.ipsec_conn.steps = [
-            ipsec_driver.RollbackStep(action='pre_shared_key',
-                                      resource_id='123',
-                                      title='Pre-Shared Key'),
-            ipsec_driver.RollbackStep(action='ike_policy',
-                                      resource_id=222,
-                                      title='IKE Policy'),
-            ipsec_driver.RollbackStep(action='ipsec_policy',
-                                      resource_id=333,
-                                      title='IPSec Policy'),
-            ipsec_driver.RollbackStep(action='ipsec_connection',
-                                      resource_id='Tunnel0',
-                                      title='IPSec Connection'),
-            ipsec_driver.RollbackStep(action='static_route',
-                                      resource_id='10.1.0.0_24_Tunnel0',
-                                      title='Static Route'),
-            ipsec_driver.RollbackStep(action='static_route',
-                                      resource_id='10.2.0.0_24_Tunnel0',
-                                      title='Static Route')]
-        expected = ['delete_static_route',
-                    'delete_static_route',
-                    'delete_ipsec_connection',
-                    'delete_ipsec_policy',
-                    'delete_ike_policy',
-                    'delete_pre_shared_key']
-        self.ipsec_conn.delete_ipsec_site_connection(mock.Mock(), 123)
-        client_calls = [c[0] for c in self.csr.method_calls]
-        self.assertEqual(expected, client_calls)
-
-
-class TestCiscoCsrIPsecConnectionCreateTransforms(base.BaseTestCase):
-
-    """Verifies that config info is prepared/transformed correctly."""
-
-    def setUp(self):
-        super(TestCiscoCsrIPsecConnectionCreateTransforms, self).setUp()
-        self.conn_info = {
-            u'id': '123',
-            u'status': constants.PENDING_CREATE,
-            u'admin_state_up': True,
-            'psk': 'secret',
-            'peer_address': '192.168.1.2',
-            'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
-            'mtu': 1500,
-            'ike_policy': {'auth_algorithm': 'sha1',
-                           'encryption_algorithm': 'aes-128',
-                           'pfs': 'Group5',
-                           'ike_version': 'v1',
-                           'lifetime_units': 'seconds',
-                           'lifetime_value': 3600},
-            'ipsec_policy': {'transform_protocol': 'ah',
-                             'encryption_algorithm': 'aes-128',
-                             'auth_algorithm': 'sha1',
-                             'pfs': 'group5',
-                             'lifetime_units': 'seconds',
-                             'lifetime_value': 3600},
-            'cisco': {'site_conn_id': 'Tunnel0',
-                      'ike_policy_id': 222,
-                      'ipsec_policy_id': 333}
-        }
-        self.csr = mock.Mock(spec=csr_client.CsrRestClient)
-        self.csr.tunnel_ip = '172.24.4.23'
-        self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info,
-                                                               self.csr)
-
-    def test_invalid_attribute(self):
-        """Negative test of unknown attribute - programming error."""
-        self.assertRaises(ipsec_driver.CsrDriverMismatchError,
-                          self.ipsec_conn.translate_dialect,
-                          'ike_policy', 'unknown_attr', self.conn_info)
-
-    def test_driver_unknown_mapping(self):
-        """Negative test of service driver providing unknown value to map."""
-        self.conn_info['ike_policy']['pfs'] = "unknown_value"
-        self.assertRaises(ipsec_driver.CsrUnknownMappingError,
-                          self.ipsec_conn.translate_dialect,
-                          'ike_policy', 'pfs', self.conn_info['ike_policy'])
-
-    def test_psk_create_info(self):
-        """Ensure that pre-shared key info is created correctly."""
-        expected = {u'keyring-name': '123',
-                    u'pre-shared-key-list': [
-                        {u'key': 'secret',
-                         u'encrypted': False,
-                         u'peer-address': '192.168.1.2'}]}
-        psk_id = self.conn_info['id']
-        psk_info = self.ipsec_conn.create_psk_info(psk_id, self.conn_info)
-        self.assertEqual(expected, psk_info)
-
-    def test_create_ike_policy_info(self):
-        """Ensure that IKE policy info is mapped/created correctly."""
-        expected = {u'priority-id': 222,
-                    u'encryption': u'aes',
-                    u'hash': u'sha',
-                    u'dhGroup': 5,
-                    u'version': u'v1',
-                    u'lifetime': 3600}
-        policy_id = self.conn_info['cisco']['ike_policy_id']
-        policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
-                                                             self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_create_ike_policy_info_different_encryption(self):
-        """Ensure that IKE policy info is mapped/created correctly."""
-        self.conn_info['ike_policy']['encryption_algorithm'] = 'aes-192'
-        expected = {u'priority-id': 222,
-                    u'encryption': u'aes192',
-                    u'hash': u'sha',
-                    u'dhGroup': 5,
-                    u'version': u'v1',
-                    u'lifetime': 3600}
-        policy_id = self.conn_info['cisco']['ike_policy_id']
-        policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
-                                                             self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_create_ike_policy_info_non_defaults(self):
-        """Ensure that IKE policy info with different values."""
-        self.conn_info['ike_policy'] = {
-            'auth_algorithm': 'sha1',
-            'encryption_algorithm': 'aes-256',
-            'pfs': 'Group14',
-            'ike_version': 'v1',
-            'lifetime_units': 'seconds',
-            'lifetime_value': 60
-        }
-        expected = {u'priority-id': 222,
-                    u'encryption': u'aes256',
-                    u'hash': u'sha',
-                    u'dhGroup': 14,
-                    u'version': u'v1',
-                    u'lifetime': 60}
-        policy_id = self.conn_info['cisco']['ike_policy_id']
-        policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
-                                                             self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_ipsec_policy_info(self):
-        """Ensure that IPSec policy info is mapped/created correctly.
-
-        Note: That although the default for anti-replay-window-size on the
-        CSR is 64, we force it to disabled, for OpenStack use.
-        """
-        expected = {u'policy-id': 333,
-                    u'protection-suite': {
-                        u'esp-encryption': u'esp-aes',
-                        u'esp-authentication': u'esp-sha-hmac',
-                        u'ah': u'ah-sha-hmac'
-                    },
-                    u'lifetime-sec': 3600,
-                    u'pfs': u'group5',
-                    u'anti-replay-window-size': u'disable'}
-        ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
-        policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
-                                                               self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_ipsec_policy_info_different_encryption(self):
-        """Create IPSec policy with different settings."""
-        self.conn_info['ipsec_policy']['transform_protocol'] = 'ah-esp'
-        self.conn_info['ipsec_policy']['encryption_algorithm'] = 'aes-192'
-        expected = {u'policy-id': 333,
-                    u'protection-suite': {
-                        u'esp-encryption': u'esp-192-aes',
-                        u'esp-authentication': u'esp-sha-hmac',
-                        u'ah': u'ah-sha-hmac'
-                    },
-                    u'lifetime-sec': 3600,
-                    u'pfs': u'group5',
-                    u'anti-replay-window-size': u'disable'}
-        ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
-        policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
-                                                               self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_ipsec_policy_info_non_defaults(self):
-        """Create/map IPSec policy info with different values."""
-        self.conn_info['ipsec_policy'] = {'transform_protocol': 'esp',
-                                          'encryption_algorithm': '3des',
-                                          'auth_algorithm': 'sha1',
-                                          'pfs': 'group14',
-                                          'lifetime_units': 'seconds',
-                                          'lifetime_value': 120,
-                                          'anti-replay-window-size': 'disable'}
-        expected = {u'policy-id': 333,
-                    u'protection-suite': {
-                        u'esp-encryption': u'esp-3des',
-                        u'esp-authentication': u'esp-sha-hmac'
-                    },
-                    u'lifetime-sec': 120,
-                    u'pfs': u'group14',
-                    u'anti-replay-window-size': u'disable'}
-        ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
-        policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
-                                                               self.conn_info)
-        self.assertEqual(expected, policy_info)
-
-    def test_site_connection_info(self):
-        """Ensure site-to-site connection info is created/mapped correctly."""
-        expected = {u'vpn-interface-name': 'Tunnel0',
-                    u'ipsec-policy-id': 333,
-                    u'remote-device': {
-                        u'tunnel-ip-address': '192.168.1.2'
-                    },
-                    u'mtu': 1500}
-        ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
-        site_conn_id = self.conn_info['cisco']['site_conn_id']
-        conn_info = self.ipsec_conn.create_site_connection_info(
-            site_conn_id, ipsec_policy_id, self.conn_info)
-        self.assertEqual(expected, conn_info)
-
-    def test_static_route_info(self):
-        """Create static route info for peer CIDRs."""
-        expected = [('10.1.0.0_24_Tunnel0',
-                     {u'destination-network': '10.1.0.0/24',
-                      u'outgoing-interface': 'Tunnel0'}),
-                    ('10.2.0.0_24_Tunnel0',
-                     {u'destination-network': '10.2.0.0/24',
-                      u'outgoing-interface': 'Tunnel0'})]
-#         self.driver.csr.make_route_id.side_effect = ['10.1.0.0_24_Tunnel0',
-#                                                      '10.2.0.0_24_Tunnel0']
-        site_conn_id = self.conn_info['cisco']['site_conn_id']
-        routes_info = self.ipsec_conn.create_routes_info(site_conn_id,
-                                                         self.conn_info)
-        self.assertEqual(2, len(routes_info))
-        self.assertEqual(expected, routes_info)
-
-
-class TestCiscoCsrIPsecDeviceDriverSyncStatuses(base.BaseTestCase):
-
-    """Test status/state of services and connections, after sync."""
-
-    def setUp(self):
-        super(TestCiscoCsrIPsecDeviceDriverSyncStatuses, self).setUp()
-        for klass in ['neutron.common.rpc.create_connection',
-                      'neutron.context.get_admin_context_without_session',
-                      'neutron.openstack.common.'
-                      'loopingcall.FixedIntervalLoopingCall']:
-            mock.patch(klass).start()
-        self.context = context.Context('some_user', 'some_tenant')
-        self.agent = mock.Mock()
-        self.driver = ipsec_driver.CiscoCsrIPsecDriver(self.agent, FAKE_HOST)
-        self.driver.agent_rpc = mock.Mock()
-        self.conn_create = mock.patch.object(
-            ipsec_driver.CiscoCsrIPSecConnection,
-            'create_ipsec_site_connection').start()
-        self.conn_delete = mock.patch.object(
-            ipsec_driver.CiscoCsrIPSecConnection,
-            'delete_ipsec_site_connection').start()
-        self.admin_state = mock.patch.object(
-            ipsec_driver.CiscoCsrIPSecConnection,
-            'set_admin_state').start()
-        self.csr = mock.Mock()
-        self.router_info = {
-            u'router_info': {'rest_mgmt_ip': '2.2.2.2',
-                             'tunnel_ip': '1.1.1.3',
-                             'username': 'me',
-                             'password': 'password',
-                             'timeout': 120,
-                             'outer_if_name': u'GigabitEthernet3.102',
-                             'inner_if_name': u'GigabitEthernet3.101'}}
-        self.service123_data = {u'id': u'123',
-                                u'status': constants.DOWN,
-                                u'admin_state_up': False}
-        self.service123_data.update(self.router_info)
-        self.conn1_data = {u'id': u'1',
-                           u'status': constants.ACTIVE,
-                           u'admin_state_up': True,
-                           u'mtu': 1500,
-                           u'psk': u'secret',
-                           u'peer_address': '192.168.1.2',
-                           u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
-                           u'ike_policy': {
-                               u'auth_algorithm': u'sha1',
-                               u'encryption_algorithm': u'aes-128',
-                               u'pfs': u'Group5',
-                               u'ike_version': u'v1',
-                               u'lifetime_units': u'seconds',
-                               u'lifetime_value': 3600},
-                           u'ipsec_policy': {
-                               u'transform_protocol': u'ah',
-                               u'encryption_algorithm': u'aes-128',
-                               u'auth_algorithm': u'sha1',
-                               u'pfs': u'group5',
-                               u'lifetime_units': u'seconds',
-                               u'lifetime_value': 3600},
-                           u'cisco': {u'site_conn_id': u'Tunnel0'}}
-
-    # NOTE: For sync, there is mark (trivial), update (tested),
-    # sweep (tested), and report(tested) phases.
-
-    def test_update_ipsec_connection_create_notify(self):
-        """Notified of connection create request - create."""
-        # Make the (existing) service
-        self.driver.create_vpn_service(self.service123_data)
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'status'] = constants.PENDING_CREATE
-
-        connection = self.driver.update_connection(self.context,
-                                                   u'123', conn_data)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.PENDING_CREATE, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-
-    def test_detect_no_change_to_ipsec_connection(self):
-        """No change to IPSec connection - nop."""
-        # Make existing service, and connection that was active
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        connection = vpn_service.create_connection(self.conn1_data)
-
-        self.assertFalse(connection.check_for_changes(self.conn1_data))
-
-    def test_detect_state_only_change_to_ipsec_connection(self):
-        """Only IPSec connection state changed - update."""
-        # Make existing service, and connection that was active
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        connection = vpn_service.create_connection(self.conn1_data)
-
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'admin_state_up'] = False
-        self.assertFalse(connection.check_for_changes(conn_data))
-
-    def test_detect_non_state_change_to_ipsec_connection(self):
-        """Connection change instead of/in addition to state - update."""
-        # Make existing service, and connection that was active
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        connection = vpn_service.create_connection(self.conn1_data)
-
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'ipsec_policy'][u'encryption_algorithm'] = u'aes-256'
-        self.assertTrue(connection.check_for_changes(conn_data))
-
-    def test_update_ipsec_connection_changed_admin_down(self):
-        """Notified of connection state change - update.
-
-        For a connection that was previously created, expect to
-        force connection down on an admin down (only) change.
-        """
-
-        # Make existing service, and connection that was active
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        vpn_service.create_connection(self.conn1_data)
-
-        # Simulate that notification of connection update received
-        self.driver.mark_existing_connections_as_dirty()
-        # Modify the connection data for the 'sync'
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'admin_state_up'] = False
-
-        connection = self.driver.update_connection(self.context,
-                                                   '123', conn_data)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.ACTIVE, connection.last_status)
-        self.assertFalse(self.conn_create.called)
-        self.assertFalse(connection.is_admin_up)
-        self.assertTrue(connection.forced_down)
-        self.assertEqual(1, self.admin_state.call_count)
-
-    def test_update_ipsec_connection_changed_config(self):
-        """Notified of connection changing config - update.
-
-        Goal here is to detect that the connection is deleted and then
-        created, but not that the specific values have changed, so picking
-        arbitrary value (MTU).
-        """
-        # Make existing service, and connection that was active
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        vpn_service.create_connection(self.conn1_data)
-
-        # Simulate that notification of connection update received
-        self.driver.mark_existing_connections_as_dirty()
-        # Modify the connection data for the 'sync'
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'mtu'] = 9200
-
-        connection = self.driver.update_connection(self.context,
-                                                   '123', conn_data)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.ACTIVE, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-        self.assertEqual(1, self.conn_delete.call_count)
-        self.assertTrue(connection.is_admin_up)
-        self.assertFalse(connection.forced_down)
-        self.assertFalse(self.admin_state.called)
-
-    def test_update_of_unknown_ipsec_connection(self):
-        """Notified of update of unknown connection - create.
-
-        Occurs if agent restarts and receives a notification of change
-        to connection, but has no previous record of the connection.
-        Result will be to rebuild the connection.
-        """
-        # Will have previously created service, but don't know of connection
-        self.driver.create_vpn_service(self.service123_data)
-
-        # Simulate that notification of connection update received
-        self.driver.mark_existing_connections_as_dirty()
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'status'] = constants.DOWN
-
-        connection = self.driver.update_connection(self.context,
-                                                   u'123', conn_data)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.DOWN, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-        self.assertTrue(connection.is_admin_up)
-        self.assertFalse(connection.forced_down)
-        self.assertFalse(self.admin_state.called)
-
-    def test_update_missing_connection_admin_down(self):
-        """Connection not present is in admin down state - nop.
-
-        If the agent has restarted, and a sync notification occurs with
-        a connection that is in admin down state, recreate the connection,
-        but indicate that the connection is down.
-        """
-        # Make existing service, but no connection
-        self.driver.create_vpn_service(self.service123_data)
-
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data.update({u'status': constants.DOWN,
-                          u'admin_state_up': False})
-        connection = self.driver.update_connection(self.context,
-                                                   u'123', conn_data)
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(1, self.conn_create.call_count)
-        self.assertFalse(connection.is_admin_up)
-        self.assertTrue(connection.forced_down)
-        self.assertEqual(1, self.admin_state.call_count)
-
-    def test_update_connection_admin_up(self):
-        """Connection updated to admin up state - record."""
-        # Make existing service, and connection that was admin down
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data.update({u'status': constants.DOWN, u'admin_state_up': False})
-        service_data = {u'id': u'123',
-                        u'status': constants.DOWN,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data]}
-        service_data.update(self.router_info)
-        self.driver.update_service(self.context, service_data)
-
-        # Simulate that notification of connection update received
-        self.driver.mark_existing_connections_as_dirty()
-        # Now simulate that the notification shows the connection admin up
-        new_conn_data = copy.deepcopy(conn_data)
-        new_conn_data[u'admin_state_up'] = True
-
-        connection = self.driver.update_connection(self.context,
-                                                   u'123', new_conn_data)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.DOWN, connection.last_status)
-        self.assertTrue(connection.is_admin_up)
-        self.assertFalse(connection.forced_down)
-        self.assertEqual(2, self.admin_state.call_count)
-
-    def test_update_for_vpn_service_create(self):
-        """Creation of new IPSec connection on new VPN service - create.
-
-        Service will be created and marked as 'clean', and update
-        processing for connection will occur (create).
-        """
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'status'] = constants.PENDING_CREATE
-        service_data = {u'id': u'123',
-                        u'status': constants.PENDING_CREATE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.PENDING_CREATE, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-        self.assertTrue(connection.is_admin_up)
-        self.assertFalse(connection.forced_down)
-        self.assertFalse(self.admin_state.called)
-
-    def test_update_for_new_connection_on_existing_service(self):
-        """Creating a new IPSec connection on an existing service."""
-        # Create the service before testing, and mark it dirty
-        prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
-        self.driver.mark_existing_connections_as_dirty()
-        conn_data = copy.deepcopy(self.conn1_data)
-        conn_data[u'status'] = constants.PENDING_CREATE
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        # Should reuse the entry and update the status
-        self.assertEqual(prev_vpn_service, vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.PENDING_CREATE, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-
-    def test_update_for_vpn_service_with_one_unchanged_connection(self):
-        """Existing VPN service and IPSec connection without any changes - nop.
-
-        Service and connection will be marked clean. No processing for
-        either, as there are no changes.
-        """
-        # Create a service and add in a connection that is active
-        prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
-        prev_vpn_service.create_connection(self.conn1_data)
-
-        self.driver.mark_existing_connections_as_dirty()
-        # Create notification with conn unchanged and service already created
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [self.conn1_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        # Should reuse the entry and update the status
-        self.assertEqual(prev_vpn_service, vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.ACTIVE, connection.last_status)
-        self.assertFalse(self.conn_create.called)
-
-    def test_update_service_admin_down(self):
-        """VPN service updated to admin down state - force all down.
-
-        If service is down, then all connections are forced down.
-        """
-        # Create an "existing" service, prior to notification
-        prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
-
-        self.driver.mark_existing_connections_as_dirty()
-        service_data = {u'id': u'123',
-                        u'status': constants.DOWN,
-                        u'admin_state_up': False,
-                        u'ipsec_conns': [self.conn1_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertEqual(prev_vpn_service, vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertFalse(vpn_service.is_admin_up)
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        conn = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(conn)
-        self.assertFalse(conn.is_dirty)
-        self.assertTrue(conn.forced_down)
-        self.assertTrue(conn.is_admin_up)
-
-    def test_update_new_service_admin_down(self):
-        """Unknown VPN service updated to admin down state - nop.
-
-        Can happen if agent restarts and then gets its first notificaiton
-        of a service that is in the admin down state. Structures will be
-        created, but forced down.
-        """
-        service_data = {u'id': u'123',
-                        u'status': constants.DOWN,
-                        u'admin_state_up': False,
-                        u'ipsec_conns': [self.conn1_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertIsNotNone(vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertFalse(vpn_service.is_admin_up)
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        conn = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(conn)
-        self.assertFalse(conn.is_dirty)
-        self.assertTrue(conn.forced_down)
-        self.assertTrue(conn.is_admin_up)
-
-    def test_update_service_admin_up(self):
-        """VPN service updated to admin up state - restore.
-
-        If service is up now, then connections that are admin up will come
-        up and connections that are admin down, will remain down.
-        """
-        # Create an "existing" service, prior to notification
-        prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
-        self.driver.mark_existing_connections_as_dirty()
-        conn_data1 = {u'id': u'1', u'status': constants.DOWN,
-                      u'admin_state_up': False,
-                      u'cisco': {u'site_conn_id': u'Tunnel0'}}
-        conn_data2 = {u'id': u'2', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.DOWN,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data1, conn_data2]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertEqual(prev_vpn_service, vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertTrue(vpn_service.is_admin_up)
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        conn1 = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(conn1)
-        self.assertFalse(conn1.is_dirty)
-        self.assertTrue(conn1.forced_down)
-        self.assertFalse(conn1.is_admin_up)
-        conn2 = vpn_service.get_connection(u'2')
-        self.assertIsNotNone(conn2)
-        self.assertFalse(conn2.is_dirty)
-        self.assertFalse(conn2.forced_down)
-        self.assertTrue(conn2.is_admin_up)
-
-    def test_update_of_unknown_service_create(self):
-        """Create of VPN service that is currently unknown - record.
-
-        If agent is restarted or user changes VPN service to admin up, the
-        notification may contain a VPN service with an IPSec connection
-        that is not in PENDING_CREATE state.
-        """
-        conn_data = {u'id': u'1', u'status': constants.DOWN,
-                     u'admin_state_up': True,
-                     u'cisco': {u'site_conn_id': u'Tunnel0'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertFalse(vpn_service.is_dirty)
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-        self.assertEqual(u'Tunnel0', connection.tunnel)
-        self.assertEqual(constants.DOWN, connection.last_status)
-        self.assertEqual(1, self.conn_create.call_count)
-
-    def _check_connection_for_service(self, count, vpn_service):
-        """Helper to check the connection information for a service."""
-        connection = vpn_service.get_connection(u'%d' % count)
-        self.assertIsNotNone(connection, "for connection %d" % count)
-        self.assertFalse(connection.is_dirty, "for connection %d" % count)
-        self.assertEqual(u'Tunnel%d' % count, connection.tunnel,
-                         "for connection %d" % count)
-        self.assertEqual(constants.PENDING_CREATE, connection.last_status,
-                         "for connection %d" % count)
-        return count + 1
-
-    def notification_for_two_services_with_two_conns(self):
-        """Helper used by tests to create two services, each with two conns."""
-        conn1_data = {u'id': u'1', u'status': constants.PENDING_CREATE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        conn2_data = {u'id': u'2', u'status': constants.PENDING_CREATE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel2'}}
-        service1_data = {u'id': u'123',
-                         u'status': constants.PENDING_CREATE,
-                         u'admin_state_up': True,
-                         u'ipsec_conns': [conn1_data, conn2_data]}
-        service1_data.update(self.router_info)
-        conn3_data = {u'id': u'3', u'status': constants.PENDING_CREATE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel3'}}
-        conn4_data = {u'id': u'4', u'status': constants.PENDING_CREATE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel4'}}
-        service2_data = {u'id': u'456',
-                         u'status': constants.PENDING_CREATE,
-                         u'admin_state_up': True,
-                         u'ipsec_conns': [conn3_data, conn4_data]}
-        service2_data.update(self.router_info)
-        return service1_data, service2_data
-
-    def test_create_two_connections_on_two_services(self):
-        """High level test of multiple VPN services with connections."""
-        # Build notification message
-        (service1_data,
-         service2_data) = self.notification_for_two_services_with_two_conns()
-        # Simulate plugin returning notification, when requested
-        self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
-            service1_data, service2_data]
-        vpn_services = self.driver.update_all_services_and_connections(
-            self.context)
-        self.assertEqual(2, len(vpn_services))
-        count = 1
-        for vpn_service in vpn_services:
-            self.assertFalse(vpn_service.is_dirty,
-                             "for service %s" % vpn_service)
-            self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status,
-                             "for service %s" % vpn_service)
-            count = self._check_connection_for_service(count, vpn_service)
-            count = self._check_connection_for_service(count, vpn_service)
-        self.assertEqual(4, self.conn_create.call_count)
-
-    def test_sweep_connection_marked_as_clean(self):
-        """Sync updated connection - no action."""
-        # Create a service and connection
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        connection = vpn_service.create_connection(self.conn1_data)
-        self.driver.mark_existing_connections_as_dirty()
-        # Simulate that the update phase visted both of them
-        vpn_service.is_dirty = False
-        connection.is_dirty = False
-        self.driver.remove_unknown_connections(self.context)
-        vpn_service = self.driver.service_state.get(u'123')
-        self.assertIsNotNone(vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertFalse(connection.is_dirty)
-
-    def test_sweep_connection_dirty(self):
-        """Sync did not update connection - delete."""
-        # Create a service and connection
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        vpn_service.create_connection(self.conn1_data)
-        self.driver.mark_existing_connections_as_dirty()
-        # Simulate that the update phase only visited the service
-        vpn_service.is_dirty = False
-        self.driver.remove_unknown_connections(self.context)
-        vpn_service = self.driver.service_state.get(u'123')
-        self.assertIsNotNone(vpn_service)
-        self.assertFalse(vpn_service.is_dirty)
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNone(connection)
-        self.assertEqual(1, self.conn_delete.call_count)
-
-    def test_sweep_service_dirty(self):
-        """Sync did not update service - delete it and all conns."""
-        # Create a service and connection
-        vpn_service = self.driver.create_vpn_service(self.service123_data)
-        vpn_service.create_connection(self.conn1_data)
-        self.driver.mark_existing_connections_as_dirty()
-        # Both the service and the connection are still 'dirty'
-        self.driver.remove_unknown_connections(self.context)
-        self.assertIsNone(self.driver.service_state.get(u'123'))
-        self.assertEqual(1, self.conn_delete.call_count)
-
-    def test_sweep_multiple_services(self):
-        """One service and conn updated, one service and conn not."""
-        # Create two services, each with a connection
-        vpn_service1 = self.driver.create_vpn_service(self.service123_data)
-        vpn_service1.create_connection(self.conn1_data)
-        service456_data = {u'id': u'456',
-                           u'status': constants.ACTIVE,
-                           u'admin_state_up': False}
-        service456_data.update(self.router_info)
-        conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel0'}}
-        prev_vpn_service2 = self.driver.create_vpn_service(service456_data)
-        prev_connection2 = prev_vpn_service2.create_connection(conn2_data)
-        self.driver.mark_existing_connections_as_dirty()
-        # Simulate that the update phase visited the first service and conn
-        prev_vpn_service2.is_dirty = False
-        prev_connection2.is_dirty = False
-        self.driver.remove_unknown_connections(self.context)
-        self.assertIsNone(self.driver.service_state.get(u'123'))
-        vpn_service2 = self.driver.service_state.get(u'456')
-        self.assertEqual(prev_vpn_service2, vpn_service2)
-        self.assertFalse(vpn_service2.is_dirty)
-        connection2 = vpn_service2.get_connection(u'2')
-        self.assertEqual(prev_connection2, connection2)
-        self.assertFalse(connection2.is_dirty)
-        self.assertEqual(1, self.conn_delete.call_count)
-
-    def simulate_mark_update_sweep_for_service_with_conn(self, service_state,
-                                                         connection_state):
-        """Create internal structures for single service with connection.
-
-        Creates a service and corresponding connection. Then, simluates
-        the mark/update/sweep operation by marking both the service and
-        connection as clean and updating their status. Override the REST
-        client created for the service, with a mock, so that all calls
-        can be mocked out.
-        """
-        conn_data = {u'id': u'1', u'status': connection_state,
-                     u'admin_state_up': True,
-                     u'cisco': {u'site_conn_id': u'Tunnel0'}}
-        service_data = {u'id': u'123',
-                        u'admin_state_up': True}
-        service_data.update(self.router_info)
-        # Create a service and connection
-        vpn_service = self.driver.create_vpn_service(service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        connection = vpn_service.create_connection(conn_data)
-        # Simulate that the update phase visited both of them
-        vpn_service.is_dirty = False
-        vpn_service.connections_removed = False
-        vpn_service.last_status = service_state
-        vpn_service.is_admin_up = True
-        connection.is_dirty = False
-        connection.last_status = connection_state
-        connection.is_admin_up = True
-        connection.forced_down = False
-        return vpn_service
-
-    def test_report_fragment_connection_created(self):
-        """Generate report section for a created connection."""
-        # Prepare service and connection in PENDING_CREATE state
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.PENDING_CREATE, constants.PENDING_CREATE)
-        # Simulate that CSR has reported the connection is still up
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-ACTIVE'), ]
-
-        # Get the statuses for connections existing on CSR
-        tunnels = vpn_service.get_ipsec_connections_status()
-        self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
-
-        # Check that there is a status for this connection
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        current_status = connection.find_current_status_in(tunnels)
-        self.assertEqual(constants.ACTIVE, current_status)
-
-        # Create report fragment due to change
-        self.assertNotEqual(connection.last_status, current_status)
-        report_frag = connection.update_status_and_build_report(current_status)
-        self.assertEqual(current_status, connection.last_status)
-        expected = {'1': {'status': constants.ACTIVE,
-                    'updated_pending_status': True}}
-        self.assertEqual(expected, report_frag)
-
-    def test_report_fragment_connection_unchanged_status(self):
-        """No report section generated for a created connection."""
-        # Prepare service and connection in ACTIVE state
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.ACTIVE, constants.ACTIVE)
-        # Simulate that CSR has reported the connection is up
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-IDLE'), ]
-
-        # Get the statuses for connections existing on CSR
-        tunnels = vpn_service.get_ipsec_connections_status()
-        self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
-
-        # Check that there is a status for this connection
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        current_status = connection.find_current_status_in(tunnels)
-        self.assertEqual(constants.ACTIVE, current_status)
-
-        # Should be no report, as no change
-        self.assertEqual(connection.last_status, current_status)
-        report_frag = connection.update_status_and_build_report(current_status)
-        self.assertEqual(current_status, connection.last_status)
-        self.assertEqual({}, report_frag)
-
-    def test_report_fragment_connection_changed_status(self):
-        """Generate report section for connection with changed state."""
-        # Prepare service in ACTIVE state and connection in DOWN state
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.ACTIVE, constants.DOWN)
-        # Simulate that CSR has reported the connection is still up
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-NO-IKE'), ]
-
-        # Get the statuses for connections existing on CSR
-        tunnels = vpn_service.get_ipsec_connections_status()
-        self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
-
-        # Check that there is a status for this connection
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        current_status = connection.find_current_status_in(tunnels)
-        self.assertEqual(constants.ACTIVE, current_status)
-
-        # Create report fragment due to change
-        self.assertNotEqual(connection.last_status, current_status)
-        report_frag = connection.update_status_and_build_report(current_status)
-        self.assertEqual(current_status, connection.last_status)
-        expected = {'1': {'status': constants.ACTIVE,
-                    'updated_pending_status': False}}
-        self.assertEqual(expected, report_frag)
-
-    def test_report_fragment_connection_failed_create(self):
-        """Failure test of report fragment for conn that failed creation.
-
-        Normally, without any status from the CSR, the connection report would
-        be skipped, but we need to report back failures.
-        """
-        # Prepare service and connection in PENDING_CREATE state
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.PENDING_CREATE, constants.PENDING_CREATE)
-        # Simulate that CSR does NOT report the status (no tunnel)
-        self.csr.read_tunnel_statuses.return_value = []
-
-        # Get the statuses for connections existing on CSR
-        tunnels = vpn_service.get_ipsec_connections_status()
-        self.assertEqual({}, tunnels)
-
-        # Check that there is a status for this connection
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        current_status = connection.find_current_status_in(tunnels)
-        self.assertEqual(constants.ERROR, current_status)
-
-        # Create report fragment due to change
-        self.assertNotEqual(connection.last_status, current_status)
-        report_frag = connection.update_status_and_build_report(current_status)
-        self.assertEqual(current_status, connection.last_status)
-        expected = {'1': {'status': constants.ERROR,
-                    'updated_pending_status': True}}
-        self.assertEqual(expected, report_frag)
-
-    def test_report_fragment_connection_admin_down(self):
-        """Report for a connection that is in admin down state."""
-        # Prepare service and connection with previous status ACTIVE, but
-        # with connection admin down
-        conn_data = {u'id': u'1', u'status': constants.ACTIVE,
-                     u'admin_state_up': False,
-                     u'cisco': {u'site_conn_id': u'Tunnel0'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        # Tunnel would have been deleted, so simulate no status
-        self.csr.read_tunnel_statuses.return_value = []
-
-        connection = vpn_service.get_connection(u'1')
-        self.assertIsNotNone(connection)
-        self.assertTrue(connection.forced_down)
-        self.assertEqual(constants.ACTIVE, connection.last_status)
-
-        # Create report fragment due to change
-        report_frag = self.driver.build_report_for_connections_on(vpn_service)
-        self.assertEqual(constants.DOWN, connection.last_status)
-        expected = {'1': {'status': constants.DOWN,
-                    'updated_pending_status': False}}
-        self.assertEqual(expected, report_frag)
-
-    def test_report_fragment_two_connections(self):
-        """Generate report fragment for two connections on a service."""
-        # Prepare service with two connections, one ACTIVE, one DOWN
-        conn1_data = {u'id': u'1', u'status': constants.DOWN,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel2'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn1_data, conn2_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        # Simulate that CSR has reported the connections with diff status
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel1', u'UP-IDLE'), (u'Tunnel2', u'DOWN-NEGOTIATING')]
-
-        # Get the report fragments for the connections
-        report_frag = self.driver.build_report_for_connections_on(vpn_service)
-        expected = {u'1': {u'status': constants.ACTIVE,
-                           u'updated_pending_status': False},
-                    u'2': {u'status': constants.DOWN,
-                           u'updated_pending_status': False}}
-        self.assertEqual(expected, report_frag)
-
-    def test_report_service_create(self):
-        """VPN service and IPSec connection created - report."""
-        # Simulate creation of the service and connection
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.PENDING_CREATE, constants.PENDING_CREATE)
-        # Simulate that the CSR has created the connection
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-ACTIVE'), ]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': True,
-            u'status': constants.ACTIVE,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.ACTIVE,
-                       u'updated_pending_status': True}
-            }
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_report_service_create_of_first_conn_fails(self):
-        """VPN service and IPSec conn created, but conn failed - report.
-
-        Since this is the sole IPSec connection on the service, and the
-        create failed (connection in ERROR state), the VPN service's
-        status will be set to DOWN.
-        """
-        # Simulate creation of the service and connection
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.PENDING_CREATE, constants.PENDING_CREATE)
-        # Simulate that the CSR has no info due to failed create
-        self.csr.read_tunnel_statuses.return_value = []
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': True,
-            u'status': constants.DOWN,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.ERROR,
-                       u'updated_pending_status': True}
-            }
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        self.assertEqual(constants.ERROR,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_report_connection_created_on_existing_service(self):
-        """Creating connection on existing service - report."""
-        # Simulate existing service and connection create
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.ACTIVE, constants.PENDING_CREATE)
-        # Simulate that the CSR has created the connection
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-IDLE'), ]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.ACTIVE,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.ACTIVE,
-                       u'updated_pending_status': True}
-            }
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_no_report_no_changes(self):
-        """VPN service with unchanged IPSec connection - no report.
-
-        Note: No report will be generated if the last connection on the
-        service is deleted. The service (and connection) objects will
-        have been removed by the sweep operation and thus not reported.
-        On the plugin, the service should be changed to DOWN. Likewise,
-        if the service goes to admin down state.
-        """
-        # Simulate an existing service and connection that are ACTIVE
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.ACTIVE, constants.ACTIVE)
-        # Simulate that the CSR reports the connection still active
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-ACTIVE'), ]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        self.assertEqual({}, report)
-        # Check that service and connection statuses are still same
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_report_sole_connection_goes_down(self):
-        """Only connection on VPN service goes down - report.
-
-        In addition to reporting the status change and recording the new
-        state for the IPSec connection, the VPN service status will be
-        DOWN.
-        """
-        # Simulate an existing service and connection that are ACTIVE
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.ACTIVE, constants.ACTIVE)
-        # Simulate that the CSR reports the connection went down
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'DOWN-NEGOTIATING'), ]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.DOWN,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.DOWN,
-                       u'updated_pending_status': False}
-            }
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_report_sole_connection_comes_up(self):
-        """Only connection on VPN service comes up - report.
-
-        In addition to reporting the status change and recording the new
-        state for the IPSec connection, the VPN service status will be
-        ACTIVE.
-        """
-        # Simulate an existing service and connection that are DOWN
-        vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
-            constants.DOWN, constants.DOWN)
-        # Simulate that the CSR reports the connection came up
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel0', u'UP-NO-IKE'), ]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.ACTIVE,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.ACTIVE,
-                       u'updated_pending_status': False}
-            }
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service.get_connection(u'1').last_status)
-
-    def test_report_service_with_two_connections_gone_down(self):
-        """One service with two connections that went down - report.
-
-        Shows the case where all the connections are down, so that the
-        service should report as DOWN, as well.
-        """
-        # Simulate one service with two ACTIVE connections
-        conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel2'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn1_data, conn2_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        # Simulate that the CSR has reported that the connections are DOWN
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel1', u'DOWN-NEGOTIATING'), (u'Tunnel2', u'DOWN')]
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.DOWN,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.DOWN,
-                       u'updated_pending_status': False},
-                u'2': {u'status': constants.DOWN,
-                       u'updated_pending_status': False}}
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'1').last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'2').last_status)
-
-    def test_report_service_with_connection_removed(self):
-        """One service with two connections where one is removed - report.
-
-        With a connection removed and the other connection unchanged,
-        normally there would be nothing to report for the connections, but
-        we need to report any possible change to the service state. In this
-        case, the service was ACTIVE, but since the only ACTIVE connection
-        is deleted and the remaining connection is DOWN, the service will
-        indicate as DOWN.
-        """
-        # Simulate one service with one connection up, one down
-        conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'mtu': 1500,
-                      u'psk': u'secret',
-                      u'peer_address': '192.168.1.2',
-                      u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
-                      u'ike_policy': {u'auth_algorithm': u'sha1',
-                                      u'encryption_algorithm': u'aes-128',
-                                      u'pfs': u'Group5',
-                                      u'ike_version': u'v1',
-                                      u'lifetime_units': u'seconds',
-                                      u'lifetime_value': 3600},
-                      u'ipsec_policy': {u'transform_protocol': u'ah',
-                                        u'encryption_algorithm': u'aes-128',
-                                        u'auth_algorithm': u'sha1',
-                                        u'pfs': u'group5',
-                                        u'lifetime_units': u'seconds',
-                                        u'lifetime_value': 3600},
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        conn2_data = {u'id': u'2', u'status': constants.DOWN,
-                      u'admin_state_up': True,
-                      u'mtu': 1500,
-                      u'psk': u'secret',
-                      u'peer_address': '192.168.1.2',
-                      u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
-                      u'ike_policy': {u'auth_algorithm': u'sha1',
-                                      u'encryption_algorithm': u'aes-128',
-                                      u'pfs': u'Group5',
-                                      u'ike_version': u'v1',
-                                      u'lifetime_units': u'seconds',
-                                      u'lifetime_value': 3600},
-                      u'ipsec_policy': {u'transform_protocol': u'ah',
-                                        u'encryption_algorithm': u'aes-128',
-                                        u'auth_algorithm': u'sha1',
-                                        u'pfs': u'group5',
-                                        u'lifetime_units': u'seconds',
-                                        u'lifetime_value': 3600},
-                      u'cisco': {u'site_conn_id': u'Tunnel2'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn1_data, conn2_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service.get_connection(u'1').last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'2').last_status)
-
-        # Simulate that one is deleted
-        self.driver.mark_existing_connections_as_dirty()
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': True,
-                        u'ipsec_conns': [conn2_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        self.driver.remove_unknown_connections(self.context)
-        self.assertTrue(vpn_service.connections_removed)
-        self.assertEqual(constants.ACTIVE, vpn_service.last_status)
-        self.assertIsNone(vpn_service.get_connection(u'1'))
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'2').last_status)
-
-        # Simulate that only one connection reports and status is unchanged,
-        # so there will be NO connection info to report.
-        self.csr.read_tunnel_statuses.return_value = [(u'Tunnel2', u'DOWN')]
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.DOWN,
-            u'ipsec_site_connections': {}
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'2').last_status)
-
-    def test_report_service_admin_down_with_two_connections(self):
-        """One service admin down, with two connections - report.
-
-        When the service is admin down, all the connections will report
-        as DOWN.
-        """
-        # Simulate one service (admin down) with two ACTIVE connections
-        conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel1'}}
-        conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
-                      u'admin_state_up': True,
-                      u'cisco': {u'site_conn_id': u'Tunnel2'}}
-        service_data = {u'id': u'123',
-                        u'status': constants.ACTIVE,
-                        u'admin_state_up': False,
-                        u'ipsec_conns': [conn1_data, conn2_data]}
-        service_data.update(self.router_info)
-        vpn_service = self.driver.update_service(self.context, service_data)
-        vpn_service.csr = self.csr  # Mocked REST client
-        # Since service admin down, connections will have been deleted
-        self.csr.read_tunnel_statuses.return_value = []
-
-        report = self.driver.build_report_for_service(vpn_service)
-        expected_report = {
-            u'id': u'123',
-            u'updated_pending_status': False,
-            u'status': constants.DOWN,
-            u'ipsec_site_connections': {
-                u'1': {u'status': constants.DOWN,
-                       u'updated_pending_status': False},
-                u'2': {u'status': constants.DOWN,
-                       u'updated_pending_status': False}}
-        }
-        self.assertEqual(expected_report, report)
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.DOWN, vpn_service.last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'1').last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service.get_connection(u'2').last_status)
-
-    def test_report_multiple_services(self):
-        """Status changes for several services - report."""
-        # Simulate creation of the service and connection
-        (service1_data,
-         service2_data) = self.notification_for_two_services_with_two_conns()
-        vpn_service1 = self.driver.update_service(self.context, service1_data)
-        vpn_service2 = self.driver.update_service(self.context, service2_data)
-        # Simulate that the CSR has created the connections
-        vpn_service1.csr = vpn_service2.csr = self.csr  # Mocked REST client
-        self.csr.read_tunnel_statuses.return_value = [
-            (u'Tunnel1', u'UP-ACTIVE'), (u'Tunnel2', u'DOWN'),
-            (u'Tunnel3', u'DOWN-NEGOTIATING'), (u'Tunnel4', u'UP-IDLE')]
-
-        report = self.driver.report_status(self.context)
-        expected_report = [{u'id': u'123',
-                            u'updated_pending_status': True,
-                            u'status': constants.ACTIVE,
-                            u'ipsec_site_connections': {
-                                u'1': {u'status': constants.ACTIVE,
-                                       u'updated_pending_status': True},
-                                u'2': {u'status': constants.DOWN,
-                                       u'updated_pending_status': True}}
-                            },
-                           {u'id': u'456',
-                            u'updated_pending_status': True,
-                            u'status': constants.ACTIVE,
-                            u'ipsec_site_connections': {
-                                u'3': {u'status': constants.DOWN,
-                                       u'updated_pending_status': True},
-                                u'4': {u'status': constants.ACTIVE,
-                                       u'updated_pending_status': True}}
-                            }]
-        self.assertEqual(expected_report,
-                         sorted(report, key=operator.itemgetter('id')))
-        # Check that service and connection statuses are updated
-        self.assertEqual(constants.ACTIVE, vpn_service1.last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service1.get_connection(u'1').last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service1.get_connection(u'2').last_status)
-        self.assertEqual(constants.ACTIVE, vpn_service2.last_status)
-        self.assertEqual(constants.DOWN,
-                         vpn_service2.get_connection(u'3').last_status)
-        self.assertEqual(constants.ACTIVE,
-                         vpn_service2.get_connection(u'4').last_status)
-
-    # TODO(pcm) FUTURE - UTs for update action, when supported.
-
-    def test_vpnservice_updated(self):
-        with mock.patch.object(self.driver, 'sync') as sync:
-            context = mock.Mock()
-            self.driver.vpnservice_updated(context)
-            sync.assert_called_once_with(context, [])
diff --git a/neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py b/neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py
deleted file mode 100644 (file)
index bec8b78..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import copy
-import mock
-
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.vpn.device_drivers import ipsec as ipsec_driver
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-FAKE_HOST = 'fake_host'
-FAKE_ROUTER_ID = _uuid()
-FAKE_VPN_SERVICE = {
-    'id': _uuid(),
-    'router_id': FAKE_ROUTER_ID,
-    'admin_state_up': True,
-    'status': constants.PENDING_CREATE,
-    'subnet': {'cidr': '10.0.0.0/24'},
-    'ipsec_site_connections': [
-        {'peer_cidrs': ['20.0.0.0/24',
-                        '30.0.0.0/24']},
-        {'peer_cidrs': ['40.0.0.0/24',
-                        '50.0.0.0/24']}]
-}
-
-
-class TestIPsecDeviceDriver(base.BaseTestCase):
-    def setUp(self, driver=ipsec_driver.OpenSwanDriver):
-        super(TestIPsecDeviceDriver, self).setUp()
-
-        for klass in [
-            'os.makedirs',
-            'os.path.isdir',
-            'neutron.agent.linux.utils.replace_file',
-            'neutron.common.rpc.create_connection',
-            'neutron.services.vpn.device_drivers.ipsec.'
-                'OpenSwanProcess._gen_config_content',
-            'shutil.rmtree',
-        ]:
-            mock.patch(klass).start()
-        self.execute = mock.patch(
-            'neutron.agent.linux.utils.execute').start()
-        self.agent = mock.Mock()
-        self.driver = driver(
-            self.agent,
-            FAKE_HOST)
-        self.driver.agent_rpc = mock.Mock()
-
-    def test_vpnservice_updated(self):
-        with mock.patch.object(self.driver, 'sync') as sync:
-            context = mock.Mock()
-            self.driver.vpnservice_updated(context)
-            sync.assert_called_once_with(context, [])
-
-    def test_create_router(self):
-        process_id = _uuid()
-        process = mock.Mock()
-        process.vpnservice = FAKE_VPN_SERVICE
-        self.driver.processes = {
-            process_id: process}
-        self.driver.create_router(process_id)
-        process.enable.assert_called_once_with()
-
-    def test_destroy_router(self):
-        process_id = _uuid()
-        process = mock.Mock()
-        process.vpnservice = FAKE_VPN_SERVICE
-        self.driver.processes = {
-            process_id: process}
-        self.driver.destroy_router(process_id)
-        process.disable.assert_called_once_with()
-        self.assertNotIn(process_id, self.driver.processes)
-
-    def test_sync_added(self):
-        self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
-            FAKE_VPN_SERVICE]
-        context = mock.Mock()
-        process = mock.Mock()
-        process.vpnservice = FAKE_VPN_SERVICE
-        process.connection_status = {}
-        process.status = constants.ACTIVE
-        process.updated_pending_status = True
-        self.driver.process_status_cache = {}
-        self.driver.processes = {
-            FAKE_ROUTER_ID: process}
-        self.driver.sync(context, [])
-        self.agent.assert_has_calls([
-            mock.call.add_nat_rule(
-                FAKE_ROUTER_ID,
-                'POSTROUTING',
-                '-s 10.0.0.0/24 -d 20.0.0.0/24 -m policy '
-                '--dir out --pol ipsec -j ACCEPT ',
-                top=True),
-            mock.call.add_nat_rule(
-                FAKE_ROUTER_ID,
-                'POSTROUTING',
-                '-s 10.0.0.0/24 -d 30.0.0.0/24 -m policy '
-                '--dir out --pol ipsec -j ACCEPT ',
-                top=True),
-            mock.call.add_nat_rule(
-                FAKE_ROUTER_ID,
-                'POSTROUTING',
-                '-s 10.0.0.0/24 -d 40.0.0.0/24 -m policy '
-                '--dir out --pol ipsec -j ACCEPT ',
-                top=True),
-            mock.call.add_nat_rule(
-                FAKE_ROUTER_ID,
-                'POSTROUTING',
-                '-s 10.0.0.0/24 -d 50.0.0.0/24 -m policy '
-                '--dir out --pol ipsec -j ACCEPT ',
-                top=True),
-            mock.call.iptables_apply(FAKE_ROUTER_ID)
-        ])
-        process.update.assert_called_once_with()
-        self.driver.agent_rpc.update_status.assert_called_once_with(
-            context,
-            [{'status': 'ACTIVE',
-             'ipsec_site_connections': {},
-             'updated_pending_status': True,
-             'id': FAKE_VPN_SERVICE['id']}])
-
-    def fake_ensure_process(self, process_id, vpnservice=None):
-        process = self.driver.processes.get(process_id)
-        if not process:
-            process = mock.Mock()
-            process.vpnservice = FAKE_VPN_SERVICE
-            process.connection_status = {}
-            process.status = constants.ACTIVE
-            process.updated_pending_status = True
-            self.driver.processes[process_id] = process
-        elif vpnservice:
-            process.vpnservice = vpnservice
-            process.update_vpnservice(vpnservice)
-        return process
-
-    def test_sync_update_vpnservice(self):
-        with mock.patch.object(self.driver,
-                               'ensure_process') as ensure_process:
-            ensure_process.side_effect = self.fake_ensure_process
-            new_vpn_service = FAKE_VPN_SERVICE
-            updated_vpn_service = copy.deepcopy(new_vpn_service)
-            updated_vpn_service['ipsec_site_connections'].append(
-                {'peer_cidrs': ['60.0.0.0/24',
-                                '70.0.0.0/24']})
-            context = mock.Mock()
-            self.driver.process_status_cache = {}
-            self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
-                new_vpn_service]
-            self.driver.sync(context, [])
-            process = self.driver.processes[FAKE_ROUTER_ID]
-            self.assertEqual(process.vpnservice, new_vpn_service)
-            self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
-                updated_vpn_service]
-            self.driver.sync(context, [])
-            process = self.driver.processes[FAKE_ROUTER_ID]
-            process.update_vpnservice.assert_called_once_with(
-                updated_vpn_service)
-            self.assertEqual(process.vpnservice, updated_vpn_service)
-
-    def test_sync_removed(self):
-        self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
-        context = mock.Mock()
-        process_id = _uuid()
-        process = mock.Mock()
-        process.vpnservice = FAKE_VPN_SERVICE
-        self.driver.processes = {
-            process_id: process}
-        self.driver.sync(context, [])
-        process.disable.assert_called_once_with()
-        self.assertNotIn(process_id, self.driver.processes)
-
-    def test_sync_removed_router(self):
-        self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
-        context = mock.Mock()
-        process_id = _uuid()
-        self.driver.sync(context, [{'id': process_id}])
-        self.assertNotIn(process_id, self.driver.processes)
-
-    def test_status_updated_on_connection_admin_down(self):
-        self.driver.process_status_cache = {
-            '1': {
-                'status': constants.ACTIVE,
-                'id': 123,
-                'updated_pending_status': False,
-                'ipsec_site_connections': {
-                    '10': {
-                        'status': constants.ACTIVE,
-                        'updated_pending_status': False,
-                    },
-                    '20': {
-                        'status': constants.ACTIVE,
-                        'updated_pending_status': False,
-                    }
-                }
-            }
-        }
-        # Simulate that there is no longer status for connection '20'
-        # e.g. connection admin down
-        new_status = {
-            'ipsec_site_connections': {
-                '10': {
-                    'status': constants.ACTIVE,
-                    'updated_pending_status': False
-                }
-            }
-        }
-        self.driver.update_downed_connections('1', new_status)
-        existing_conn = new_status['ipsec_site_connections'].get('10')
-        self.assertIsNotNone(existing_conn)
-        self.assertEqual(constants.ACTIVE, existing_conn['status'])
-        missing_conn = new_status['ipsec_site_connections'].get('20')
-        self.assertIsNotNone(missing_conn)
-        self.assertEqual(constants.DOWN, missing_conn['status'])
-
-    def test_status_updated_on_service_admin_down(self):
-        self.driver.process_status_cache = {
-            '1': {
-                'status': constants.ACTIVE,
-                'id': 123,
-                'updated_pending_status': False,
-                'ipsec_site_connections': {
-                    '10': {
-                        'status': constants.ACTIVE,
-                        'updated_pending_status': False,
-                    },
-                    '20': {
-                        'status': constants.ACTIVE,
-                        'updated_pending_status': False,
-                    }
-                }
-            }
-        }
-        # Simulate that there are no connections now
-        new_status = {
-            'ipsec_site_connections': {}
-        }
-        self.driver.update_downed_connections('1', new_status)
-        missing_conn = new_status['ipsec_site_connections'].get('10')
-        self.assertIsNotNone(missing_conn)
-        self.assertEqual(constants.DOWN, missing_conn['status'])
-        missing_conn = new_status['ipsec_site_connections'].get('20')
-        self.assertIsNotNone(missing_conn)
-        self.assertEqual(constants.DOWN, missing_conn['status'])
diff --git a/neutron/tests/unit/services/vpn/service_drivers/__init__.py b/neutron/tests/unit/services/vpn/service_drivers/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py b/neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py
deleted file mode 100644 (file)
index 436a458..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-# Copyright 2014 Cisco Systems, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-from oslo.config import cfg
-
-from neutron import context as n_ctx
-from neutron.db import servicetype_db as st_db
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.vpn import plugin as vpn_plugin
-from neutron.services.vpn.service_drivers import cisco_csr_db as csr_db
-from neutron.services.vpn.service_drivers import cisco_ipsec as ipsec_driver
-from neutron.services.vpn.service_drivers import cisco_validator as validator
-from neutron.tests import base
-from neutron.tests.unit import testlib_api
-
-_uuid = uuidutils.generate_uuid
-
-FAKE_VPN_CONN_ID = _uuid()
-FAKE_SERVICE_ID = _uuid()
-FAKE_VPN_CONNECTION = {
-    'vpnservice_id': FAKE_SERVICE_ID,
-    'id': FAKE_VPN_CONN_ID,
-    'ikepolicy_id': _uuid(),
-    'ipsecpolicy_id': _uuid(),
-    'tenant_id': _uuid()
-}
-
-FAKE_ROUTER_ID = _uuid()
-FAKE_VPN_SERVICE = {
-    'router_id': FAKE_ROUTER_ID
-}
-
-FAKE_HOST = 'fake_host'
-IPV4 = 4
-
-CISCO_IPSEC_SERVICE_DRIVER = ('neutron.services.vpn.service_drivers.'
-                              'cisco_ipsec.CiscoCsrIPsecVPNDriver')
-
-
-class TestCiscoValidatorSelection(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestCiscoValidatorSelection, self).setUp()
-        vpnaas_provider = (constants.VPN + ':vpnaas:' +
-                           CISCO_IPSEC_SERVICE_DRIVER + ':default')
-        cfg.CONF.set_override('service_provider',
-                              [vpnaas_provider],
-                              'service_providers')
-        stm = st_db.ServiceTypeManager()
-        mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance',
-                   return_value=stm).start()
-        mock.patch('neutron.common.rpc.create_connection').start()
-        self.vpn_plugin = vpn_plugin.VPNDriverPlugin()
-
-    def test_reference_driver_used(self):
-        self.assertIsInstance(self.vpn_plugin._get_validator(),
-                              validator.CiscoCsrVpnValidator)
-
-
-class TestCiscoIPsecDriverValidation(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestCiscoIPsecDriverValidation, self).setUp()
-        self.l3_plugin = mock.Mock()
-        mock.patch(
-            'neutron.manager.NeutronManager.get_service_plugins',
-            return_value={constants.L3_ROUTER_NAT: self.l3_plugin}).start()
-        self.context = n_ctx.Context('some_user', 'some_tenant')
-        self.vpn_service = {'router_id': '123'}
-        self.router = mock.Mock()
-        self.service_plugin = mock.Mock()
-        self.validator = validator.CiscoCsrVpnValidator(self.service_plugin)
-
-    def test_ike_version_unsupported(self):
-        """Failure test that Cisco CSR REST API does not support IKE v2."""
-        policy_info = {'ike_version': 'v2',
-                       'lifetime': {'units': 'seconds', 'value': 60}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_ike_version,
-                          policy_info)
-
-    def test_ike_lifetime_not_in_seconds(self):
-        """Failure test of unsupported lifetime units for IKE policy."""
-        policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          "IKE Policy", policy_info)
-
-    def test_ipsec_lifetime_not_in_seconds(self):
-        """Failure test of unsupported lifetime units for IPSec policy."""
-        policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          "IPSec Policy", policy_info)
-
-    def test_ike_lifetime_seconds_values_at_limits(self):
-        """Test valid lifetime values for IKE policy."""
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 60}}
-        self.validator.validate_lifetime('IKE Policy', policy_info)
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}}
-        self.validator.validate_lifetime('IKE Policy', policy_info)
-
-    def test_ipsec_lifetime_seconds_values_at_limits(self):
-        """Test valid lifetime values for IPSec policy."""
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 120}}
-        self.validator.validate_lifetime('IPSec Policy', policy_info)
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 2592000}}
-        self.validator.validate_lifetime('IPSec Policy', policy_info)
-
-    def test_ike_lifetime_values_invalid(self):
-        """Failure test of unsupported lifetime values for IKE policy."""
-        which = "IKE Policy"
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 59}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          which, policy_info)
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 86401}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          which, policy_info)
-
-    def test_ipsec_lifetime_values_invalid(self):
-        """Failure test of unsupported lifetime values for IPSec policy."""
-        which = "IPSec Policy"
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 119}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          which, policy_info)
-        policy_info = {'lifetime': {'units': 'seconds', 'value': 2592001}}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_lifetime,
-                          which, policy_info)
-
-    def test_ipsec_connection_with_mtu_at_limits(self):
-        """Test IPSec site-to-site connection with MTU at limits."""
-        conn_info = {'mtu': 1500}
-        self.validator.validate_mtu(conn_info)
-        conn_info = {'mtu': 9192}
-        self.validator.validate_mtu(conn_info)
-
-    def test_ipsec_connection_with_invalid_mtu(self):
-        """Failure test of IPSec site connection with unsupported MTUs."""
-        conn_info = {'mtu': 1499}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_mtu, conn_info)
-        conn_info = {'mtu': 9193}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_mtu, conn_info)
-
-    def simulate_gw_ip_available(self):
-        """Helper function indicating that tunnel has a gateway IP."""
-        def have_one():
-            return 1
-        self.router.gw_port.fixed_ips.__len__ = have_one
-        ip_addr_mock = mock.Mock()
-        self.router.gw_port.fixed_ips = [ip_addr_mock]
-
-    def test_have_public_ip_for_router(self):
-        """Ensure that router for IPSec connection has gateway IP."""
-        self.simulate_gw_ip_available()
-        try:
-            self.validator.validate_public_ip_present(self.router)
-        except Exception:
-            self.fail("Unexpected exception on validation")
-
-    def test_router_with_missing_gateway_ip(self):
-        """Failure test of IPSec connection with missing gateway IP."""
-        self.simulate_gw_ip_available()
-        self.router.gw_port = None
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_public_ip_present,
-                          self.router)
-
-    def test_peer_id_is_an_ip_address(self):
-        """Ensure peer ID is an IP address for IPsec connection create."""
-        ipsec_sitecon = {'peer_id': '10.10.10.10'}
-        self.validator.validate_peer_id(ipsec_sitecon)
-
-    def test_peer_id_is_not_ip_address(self):
-        """Failure test of peer_id that is not an IP address."""
-        ipsec_sitecon = {'peer_id': 'some-site.com'}
-        self.assertRaises(validator.CsrValidationFailure,
-                          self.validator.validate_peer_id, ipsec_sitecon)
-
-    def test_validation_for_create_ipsec_connection(self):
-        """Ensure all validation passes for IPSec site connection create."""
-        self.simulate_gw_ip_available()
-        self.service_plugin.get_ikepolicy = mock.Mock(
-            return_value={'ike_version': 'v1',
-                          'lifetime': {'units': 'seconds', 'value': 60}})
-        self.service_plugin.get_ipsecpolicy = mock.Mock(
-            return_value={'lifetime': {'units': 'seconds', 'value': 120}})
-        self.service_plugin.get_vpnservice = mock.Mock(
-            return_value=self.vpn_service)
-        self.l3_plugin._get_router = mock.Mock(return_value=self.router)
-        # Provide the minimum needed items to validate
-        ipsec_sitecon = {'id': '1',
-                         'vpnservice_id': FAKE_SERVICE_ID,
-                         'ikepolicy_id': '123',
-                         'ipsecpolicy_id': '2',
-                         'mtu': 1500,
-                         'peer_id': '10.10.10.10'}
-        # Using defaults for DPD info
-        expected = {'dpd_action': 'hold',
-                    'dpd_interval': 30,
-                    'dpd_timeout': 120}
-        expected.update(ipsec_sitecon)
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
-        self.validator.validate_ipsec_site_connection(self.context,
-                                                      ipsec_sitecon, IPV4)
-        self.assertEqual(expected, ipsec_sitecon)
-
-
-class TestCiscoIPsecDriverMapping(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestCiscoIPsecDriverMapping, self).setUp()
-        self.context = mock.patch.object(n_ctx, 'Context').start()
-        self.session = self.context.session
-        self.query_mock = self.session.query.return_value.order_by
-
-    def test_identifying_first_mapping_id(self):
-        """Make sure first available ID is obtained for each ID type."""
-        # Simulate mapping table is empty - get first one
-        self.query_mock.return_value = []
-        next_id = csr_db.get_next_available_tunnel_id(self.session)
-        self.assertEqual(0, next_id)
-
-        next_id = csr_db.get_next_available_ike_policy_id(self.session)
-        self.assertEqual(1, next_id)
-
-        next_id = csr_db.get_next_available_ipsec_policy_id(self.session)
-        self.assertEqual(1, next_id)
-
-    def test_last_mapping_id_available(self):
-        """Make sure can get the last ID for each of the table types."""
-        # Simulate query indicates table is full
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(csr_db.MAX_CSR_TUNNELS - 1)]
-        next_id = csr_db.get_next_available_tunnel_id(self.session)
-        self.assertEqual(csr_db.MAX_CSR_TUNNELS - 1, next_id)
-
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(1, csr_db.MAX_CSR_IKE_POLICIES)]
-        next_id = csr_db.get_next_available_ike_policy_id(self.session)
-        self.assertEqual(csr_db.MAX_CSR_IKE_POLICIES, next_id)
-
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(1, csr_db.MAX_CSR_IPSEC_POLICIES)]
-        next_id = csr_db.get_next_available_ipsec_policy_id(self.session)
-        self.assertEqual(csr_db.MAX_CSR_IPSEC_POLICIES, next_id)
-
-    def test_reusing_first_available_mapping_id(self):
-        """Ensure that we reuse the first available ID.
-
-        Make sure that the next lowest ID is obtained from the mapping
-        table when there are "holes" from deletions. Database query sorts
-        the entries, so will return them in order. Using tunnel ID, as the
-        logic is the same for each ID type.
-        """
-        self.query_mock.return_value = [(0, ), (1, ), (2, ), (5, ), (6, )]
-        next_id = csr_db.get_next_available_tunnel_id(self.session)
-        self.assertEqual(3, next_id)
-
-    def test_no_more_mapping_ids_available(self):
-        """Failure test of trying to reserve ID, when none available."""
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(csr_db.MAX_CSR_TUNNELS)]
-        self.assertRaises(IndexError, csr_db.get_next_available_tunnel_id,
-                          self.session)
-
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(1, csr_db.MAX_CSR_IKE_POLICIES + 1)]
-        self.assertRaises(IndexError, csr_db.get_next_available_ike_policy_id,
-                          self.session)
-
-        self.query_mock.return_value = [
-            (x, ) for x in xrange(1, csr_db.MAX_CSR_IPSEC_POLICIES + 1)]
-        self.assertRaises(IndexError,
-                          csr_db.get_next_available_ipsec_policy_id,
-                          self.session)
-
-    def test_create_tunnel_mappings(self):
-        """Ensure successfully create new tunnel mappings."""
-        # Simulate that first IDs are obtained
-        self.query_mock.return_value = []
-        map_db_mock = mock.patch.object(csr_db, 'IdentifierMap').start()
-        conn_info = {'ikepolicy_id': '10',
-                     'ipsecpolicy_id': '50',
-                     'id': '100',
-                     'tenant_id': '1000'}
-        csr_db.create_tunnel_mapping(self.context, conn_info)
-        map_db_mock.assert_called_once_with(csr_tunnel_id=0,
-                                            csr_ike_policy_id=1,
-                                            csr_ipsec_policy_id=1,
-                                            ipsec_site_conn_id='100',
-                                            tenant_id='1000')
-        # Create another, with next ID of 2 for all IDs (not mocking each
-        # ID separately, so will not have different IDs).
-        self.query_mock.return_value = [(0, ), (1, )]
-        map_db_mock.reset_mock()
-        conn_info = {'ikepolicy_id': '20',
-                     'ipsecpolicy_id': '60',
-                     'id': '101',
-                     'tenant_id': '1000'}
-        csr_db.create_tunnel_mapping(self.context, conn_info)
-        map_db_mock.assert_called_once_with(csr_tunnel_id=2,
-                                            csr_ike_policy_id=2,
-                                            csr_ipsec_policy_id=2,
-                                            ipsec_site_conn_id='101',
-                                            tenant_id='1000')
-
-
-class TestCiscoIPsecDriver(testlib_api.SqlTestCase):
-
-    """Test that various incoming requests are sent to device driver."""
-
-    def setUp(self):
-        super(TestCiscoIPsecDriver, self).setUp()
-        mock.patch('neutron.common.rpc.create_connection').start()
-
-        service_plugin = mock.Mock()
-        service_plugin._get_vpnservice.return_value = {
-            'router_id': _uuid()
-        }
-
-        l3_plugin = mock.Mock()
-        mock.patch(
-            'neutron.manager.NeutronManager.get_service_plugins',
-            return_value={constants.L3_ROUTER_NAT: l3_plugin}).start()
-
-        l3_plugin.get_host_for_router.return_value = FAKE_HOST
-        l3_agent = mock.Mock()
-        l3_agent.host = 'some-host'
-        l3_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
-
-        self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(service_plugin)
-        mock.patch.object(csr_db, 'create_tunnel_mapping').start()
-        self.context = n_ctx.Context('some_user', 'some_tenant')
-
-    def _test_update(self, func, args, additional_info=None):
-        with contextlib.nested(
-            mock.patch.object(self.driver.agent_rpc.client, 'cast'),
-            mock.patch.object(self.driver.agent_rpc.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.driver.agent_rpc.client
-            func(self.context, *args)
-
-        prepare_args = {'server': 'fake_host', 'version': '1.0'}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        rpc_mock.assert_called_once_with(self.context, 'vpnservice_updated',
-                                         reason=mock.ANY)
-
-    def test_create_ipsec_site_connection(self):
-        self._test_update(self.driver.create_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION],
-                          {'reason': 'ipsec-conn-create'})
-
-    def test_update_ipsec_site_connection(self):
-        self._test_update(self.driver.update_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION],
-                          {'reason': 'ipsec-conn-update'})
-
-    def test_delete_ipsec_site_connection(self):
-        self._test_update(self.driver.delete_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION],
-                          {'reason': 'ipsec-conn-delete'})
-
-    def test_update_vpnservice(self):
-        self._test_update(self.driver.update_vpnservice,
-                          [FAKE_VPN_SERVICE, FAKE_VPN_SERVICE],
-                          {'reason': 'vpn-service-update'})
-
-    def test_delete_vpnservice(self):
-        self._test_update(self.driver.delete_vpnservice,
-                          [FAKE_VPN_SERVICE],
-                          {'reason': 'vpn-service-delete'})
-
-
-class TestCiscoIPsecDriverRequests(base.BaseTestCase):
-
-    """Test handling device driver requests for service info."""
-
-    def setUp(self):
-        super(TestCiscoIPsecDriverRequests, self).setUp()
-        mock.patch('neutron.common.rpc.create_connection').start()
-
-        service_plugin = mock.Mock()
-        self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(service_plugin)
-
-    def test_build_router_tunnel_interface_name(self):
-        """Check formation of inner/outer interface name for CSR router."""
-        router_info = {
-            '_interfaces': [
-                {'hosting_info': {'segmentation_id': 100,
-                                  'hosting_port_name': 't1_p:1'}}
-            ],
-            'gw_port':
-                {'hosting_info': {'segmentation_id': 200,
-                                  'hosting_port_name': 't2_p:1'}}
-        }
-        self.assertEqual(
-            'GigabitEthernet2.100',
-            self.driver._create_interface(router_info['_interfaces'][0]))
-        self.assertEqual(
-            'GigabitEthernet3.200',
-            self.driver._create_interface(router_info['gw_port']))
-
-    def test_build_router_info(self):
-        """Check creation of CSR info to send to device driver."""
-        router_info = {
-            'hosting_device': {
-                'management_ip_address': '1.1.1.1',
-                'credentials': {'username': 'me', 'password': 'secret'}
-            },
-            'gw_port':
-                {'hosting_info': {'segmentation_id': 101,
-                                 'hosting_port_name': 't2_p:1'}},
-            'id': u'c607b58e-f150-4289-b83f-45623578d122',
-            '_interfaces': [
-                {'hosting_info': {'segmentation_id': 100,
-                                  'hosting_port_name': 't1_p:1'}}
-            ]
-        }
-        expected = {'rest_mgmt_ip': '1.1.1.1',
-                    'username': 'me',
-                    'password': 'secret',
-                    'inner_if_name': 'GigabitEthernet2.100',
-                    'outer_if_name': 'GigabitEthernet3.101',
-                    'vrf': 'nrouter-c607b5',
-                    'timeout': 30}
-        self.assertEqual(expected, self.driver._get_router_info(router_info))
diff --git a/neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py b/neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py
deleted file mode 100644 (file)
index 9f100fd..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-from oslo.config import cfg
-
-from neutron import context as n_ctx
-from neutron.db import l3_db
-from neutron.db import servicetype_db as st_db
-from neutron.db.vpn import vpn_validator
-from neutron.extensions import vpnaas
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.services.vpn import plugin as vpn_plugin
-from neutron.services.vpn.service_drivers import ipsec as ipsec_driver
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-
-FAKE_SERVICE_ID = _uuid()
-FAKE_VPN_CONNECTION = {
-    'vpnservice_id': FAKE_SERVICE_ID
-}
-FAKE_ROUTER_ID = _uuid()
-FAKE_VPN_SERVICE = {
-    'router_id': FAKE_ROUTER_ID
-}
-FAKE_HOST = 'fake_host'
-FAKE_ROUTER = {l3_db.EXTERNAL_GW_INFO: FAKE_ROUTER_ID}
-FAKE_SUBNET_ID = _uuid()
-IPV4 = 4
-IPV6 = 6
-
-IPSEC_SERVICE_DRIVER = ('neutron.services.vpn.service_drivers.'
-                        'ipsec.IPsecVPNDriver')
-
-
-class TestValidatorSelection(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestValidatorSelection, self).setUp()
-        vpnaas_provider = (constants.VPN + ':vpnaas:' +
-                           IPSEC_SERVICE_DRIVER + ':default')
-        cfg.CONF.set_override('service_provider',
-                              [vpnaas_provider],
-                              'service_providers')
-        mock.patch('neutron.common.rpc.create_connection').start()
-        stm = st_db.ServiceTypeManager()
-        mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance',
-                   return_value=stm).start()
-        self.vpn_plugin = vpn_plugin.VPNDriverPlugin()
-
-    def test_reference_driver_used(self):
-        self.assertIsInstance(self.vpn_plugin._get_validator(),
-                              vpn_validator.VpnReferenceValidator)
-
-
-class TestIPsecDriverValidation(base.BaseTestCase):
-
-    def setUp(self):
-        super(TestIPsecDriverValidation, self).setUp()
-        self.l3_plugin = mock.Mock()
-        mock.patch(
-            'neutron.manager.NeutronManager.get_service_plugins',
-            return_value={constants.L3_ROUTER_NAT: self.l3_plugin}).start()
-        self.core_plugin = mock.Mock()
-        mock.patch('neutron.manager.NeutronManager.get_plugin',
-                   return_value=self.core_plugin).start()
-        self.context = n_ctx.Context('some_user', 'some_tenant')
-        self.validator = vpn_validator.VpnReferenceValidator()
-
-    def test_non_public_router_for_vpn_service(self):
-        """Failure test of service validate, when router missing ext. I/F."""
-        self.l3_plugin.get_router.return_value = {}  # No external gateway
-        vpnservice = {'router_id': 123, 'subnet_id': 456}
-        self.assertRaises(vpnaas.RouterIsNotExternal,
-                          self.validator.validate_vpnservice,
-                          self.context, vpnservice)
-
-    def test_subnet_not_connected_for_vpn_service(self):
-        """Failure test of service validate, when subnet not on router."""
-        self.l3_plugin.get_router.return_value = FAKE_ROUTER
-        self.core_plugin.get_ports.return_value = None
-        vpnservice = {'router_id': FAKE_ROUTER_ID, 'subnet_id': FAKE_SUBNET_ID}
-        self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter,
-                          self.validator.validate_vpnservice,
-                          self.context, vpnservice)
-
-    def test_defaults_for_ipsec_site_connections_on_create(self):
-        """Check that defaults are applied correctly.
-
-        MTU has a default and will always be present on create.
-        However, the DPD settings do not have a default, so
-        database create method will assign default values for any
-        missing. In addition, the DPD dict will be flattened
-        for storage into the database, so we'll do it as part of
-        assigning defaults.
-        """
-        ipsec_sitecon = {}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
-        expected = {
-            'dpd_action': 'hold',
-            'dpd_timeout': 120,
-            'dpd_interval': 30
-        }
-        self.assertEqual(expected, ipsec_sitecon)
-
-        ipsec_sitecon = {'dpd': {'interval': 50}}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
-        expected = {
-            'dpd': {'interval': 50},
-            'dpd_action': 'hold',
-            'dpd_timeout': 120,
-            'dpd_interval': 50
-        }
-        self.assertEqual(expected, ipsec_sitecon)
-
-    def test_defaults_for_ipsec_site_connections_on_update(self):
-        """Check that defaults are used for any values not specified."""
-        ipsec_sitecon = {}
-        prev_connection = {'dpd_action': 'clear',
-                           'dpd_timeout': 500,
-                           'dpd_interval': 250}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
-                                                              prev_connection)
-        expected = {
-            'dpd_action': 'clear',
-            'dpd_timeout': 500,
-            'dpd_interval': 250
-        }
-        self.assertEqual(expected, ipsec_sitecon)
-
-        ipsec_sitecon = {'dpd': {'timeout': 200}}
-        prev_connection = {'dpd_action': 'clear',
-                           'dpd_timeout': 500,
-                           'dpd_interval': 100}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
-                                                              prev_connection)
-        expected = {
-            'dpd': {'timeout': 200},
-            'dpd_action': 'clear',
-            'dpd_timeout': 200,
-            'dpd_interval': 100
-        }
-        self.assertEqual(expected, ipsec_sitecon)
-
-    def test_bad_dpd_settings_on_create(self):
-        """Failure tests of DPD settings for IPSec conn during create."""
-        ipsec_sitecon = {'mtu': 1500, 'dpd_action': 'hold',
-                         'dpd_interval': 100, 'dpd_timeout': 100}
-        self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
-                          self.validator.validate_ipsec_site_connection,
-                          self.context, ipsec_sitecon, IPV4)
-        ipsec_sitecon = {'mtu': 1500, 'dpd_action': 'hold',
-                         'dpd_interval': 100, 'dpd_timeout': 99}
-        self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
-                          self.validator.validate_ipsec_site_connection,
-                          self.context, ipsec_sitecon, IPV4)
-
-    def test_bad_dpd_settings_on_update(self):
-        """Failure tests of DPD settings for IPSec conn. during update.
-
-        Note: On an update, the user may specify only some of the DPD settings.
-        Previous values will be assigned for any missing items, so by the
-        time the validation occurs, all items will be available for checking.
-        The MTU may not be provided, during validation and will be ignored,
-        if that is the case.
-        """
-        prev_connection = {'mtu': 2000,
-                           'dpd_action': 'hold',
-                           'dpd_interval': 100,
-                           'dpd_timeout': 120}
-        ipsec_sitecon = {'dpd': {'interval': 120}}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
-                                                              prev_connection)
-        self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
-                          self.validator.validate_ipsec_site_connection,
-                          self.context, ipsec_sitecon, IPV4)
-
-        prev_connection = {'mtu': 2000,
-                           'dpd_action': 'hold',
-                           'dpd_interval': 100,
-                           'dpd_timeout': 120}
-        ipsec_sitecon = {'dpd': {'timeout': 99}}
-        self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
-                                                              prev_connection)
-        self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
-                          self.validator.validate_ipsec_site_connection,
-                          self.context, ipsec_sitecon, IPV4)
-
-    def test_bad_mtu_for_ipsec_connection(self):
-        """Failure test of invalid MTU values for IPSec conn create/update."""
-        ip_version_limits = vpn_validator.VpnReferenceValidator.IP_MIN_MTU
-        for version, limit in ip_version_limits.items():
-            ipsec_sitecon = {'mtu': limit - 1,
-                             'dpd_action': 'hold',
-                             'dpd_interval': 100,
-                             'dpd_timeout': 120}
-            self.assertRaises(
-                vpnaas.IPsecSiteConnectionMtuError,
-                self.validator.validate_ipsec_site_connection,
-                self.context, ipsec_sitecon, version)
-
-
-class TestIPsecDriver(base.BaseTestCase):
-    def setUp(self):
-        super(TestIPsecDriver, self).setUp()
-        mock.patch('neutron.common.rpc.create_connection').start()
-
-        l3_agent = mock.Mock()
-        l3_agent.host = FAKE_HOST
-        plugin = mock.Mock()
-        plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
-        plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
-        get_plugin = plugin_p.start()
-        get_plugin.return_value = plugin
-        service_plugin_p = mock.patch(
-            'neutron.manager.NeutronManager.get_service_plugins')
-        get_service_plugin = service_plugin_p.start()
-        get_service_plugin.return_value = {constants.L3_ROUTER_NAT: plugin}
-
-        service_plugin = mock.Mock()
-        service_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
-        service_plugin._get_vpnservice.return_value = {
-            'router_id': _uuid()
-        }
-        self.driver = ipsec_driver.IPsecVPNDriver(service_plugin)
-
-    def _test_update(self, func, args):
-        ctxt = n_ctx.Context('', 'somebody')
-        with contextlib.nested(
-            mock.patch.object(self.driver.agent_rpc.client, 'cast'),
-            mock.patch.object(self.driver.agent_rpc.client, 'prepare'),
-        ) as (
-            rpc_mock, prepare_mock
-        ):
-            prepare_mock.return_value = self.driver.agent_rpc.client
-            func(ctxt, *args)
-
-        prepare_args = {'server': 'fake_host', 'version': '1.0'}
-        prepare_mock.assert_called_once_with(**prepare_args)
-
-        rpc_mock.assert_called_once_with(ctxt, 'vpnservice_updated')
-
-    def test_create_ipsec_site_connection(self):
-        self._test_update(self.driver.create_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION])
-
-    def test_update_ipsec_site_connection(self):
-        self._test_update(self.driver.update_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION])
-
-    def test_delete_ipsec_site_connection(self):
-        self._test_update(self.driver.delete_ipsec_site_connection,
-                          [FAKE_VPN_CONNECTION])
-
-    def test_update_vpnservice(self):
-        self._test_update(self.driver.update_vpnservice,
-                          [FAKE_VPN_SERVICE, FAKE_VPN_SERVICE])
-
-    def test_delete_vpnservice(self):
-        self._test_update(self.driver.delete_vpnservice,
-                          [FAKE_VPN_SERVICE])
diff --git a/neutron/tests/unit/services/vpn/test_vpn_agent.py b/neutron/tests/unit/services/vpn/test_vpn_agent.py
deleted file mode 100644 (file)
index d7175bb..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo.config import cfg
-
-from neutron.agent.common import config as agent_config
-from neutron.agent import l3_agent
-from neutron.agent import l3_ha_agent
-from neutron.agent.linux import interface
-from neutron.common import config as base_config
-from neutron.openstack.common import uuidutils
-from neutron.services.vpn import agent
-from neutron.services.vpn import device_drivers
-from neutron.tests import base
-
-_uuid = uuidutils.generate_uuid
-NOOP_DEVICE_CLASS = 'NoopDeviceDriver'
-NOOP_DEVICE = ('neutron.tests.unit.services.'
-               'vpn.test_vpn_agent.%s' % NOOP_DEVICE_CLASS)
-
-
-class NoopDeviceDriver(device_drivers.DeviceDriver):
-    def sync(self, context, processes):
-        pass
-
-    def create_router(self, process_id):
-        pass
-
-    def destroy_router(self, process_id):
-        pass
-
-
-class TestVPNAgent(base.BaseTestCase):
-    def setUp(self):
-        super(TestVPNAgent, self).setUp()
-        self.conf = cfg.CONF
-        self.conf.register_opts(base_config.core_opts)
-        self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
-        self.conf.register_opts(l3_ha_agent.OPTS)
-        self.conf.register_opts(interface.OPTS)
-        agent_config.register_interface_driver_opts_helper(self.conf)
-        agent_config.register_use_namespaces_opts_helper(self.conf)
-        agent_config.register_agent_state_opts_helper(self.conf)
-        agent_config.register_root_helper(self.conf)
-
-        self.conf.set_override('interface_driver',
-                               'neutron.agent.linux.interface.NullDriver')
-        self.conf.set_override(
-            'vpn_device_driver',
-            [NOOP_DEVICE],
-            'vpnagent')
-
-        for clazz in [
-            'neutron.agent.linux.ip_lib.device_exists',
-            'neutron.agent.linux.ip_lib.IPWrapper',
-            'neutron.agent.linux.interface.NullDriver',
-            'neutron.agent.linux.utils.execute'
-        ]:
-            mock.patch(clazz).start()
-
-        l3pluginApi_cls = mock.patch(
-            'neutron.agent.l3_agent.L3PluginApi').start()
-        self.plugin_api = mock.MagicMock()
-        l3pluginApi_cls.return_value = self.plugin_api
-
-        looping_call_p = mock.patch(
-            'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
-        looping_call_p.start()
-
-        self.fake_host = 'fake_host'
-        self.agent = agent.VPNAgent(self.fake_host)
-
-    def test_setup_drivers(self):
-        self.assertEqual(1, len(self.agent.devices))
-        device = self.agent.devices[0]
-        self.assertEqual(
-            NOOP_DEVICE_CLASS,
-            device.__class__.__name__
-        )
-
-    def test_get_namespace(self):
-        router_id = _uuid()
-        ns = "ns-" + router_id
-        ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
-                                 {}, ns_name=ns)
-        self.agent.router_info = {router_id: ri}
-        namespace = self.agent.get_namespace(router_id)
-        self.assertTrue(namespace.endswith(router_id))
-        self.assertFalse(self.agent.get_namespace('fake_id'))
-
-    def test_add_nat_rule(self):
-        router_id = _uuid()
-        ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, {})
-        iptables = mock.Mock()
-        ri.iptables_manager.ipv4['nat'] = iptables
-        self.agent.router_info = {router_id: ri}
-        self.agent.add_nat_rule(router_id, 'fake_chain', 'fake_rule', True)
-        iptables.add_rule.assert_called_once_with(
-            'fake_chain', 'fake_rule', top=True)
-
-    def test_add_nat_rule_with_no_router(self):
-        self.agent.router_info = {}
-        #Should do nothing
-        self.agent.add_nat_rule(
-            'fake_router_id',
-            'fake_chain',
-            'fake_rule',
-            True)
-
-    def test_remove_rule(self):
-        router_id = _uuid()
-        ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, {})
-        iptables = mock.Mock()
-        ri.iptables_manager.ipv4['nat'] = iptables
-        self.agent.router_info = {router_id: ri}
-        self.agent.remove_nat_rule(router_id, 'fake_chain', 'fake_rule', True)
-        iptables.remove_rule.assert_called_once_with(
-            'fake_chain', 'fake_rule', top=True)
-
-    def test_remove_rule_with_no_router(self):
-        self.agent.router_info = {}
-        #Should do nothing
-        self.agent.remove_nat_rule(
-            'fake_router_id',
-            'fake_chain',
-            'fake_rule')
-
-    def test_iptables_apply(self):
-        router_id = _uuid()
-        ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, {})
-        iptables = mock.Mock()
-        ri.iptables_manager = iptables
-        self.agent.router_info = {router_id: ri}
-        self.agent.iptables_apply(router_id)
-        iptables.apply.assert_called_once_with()
-
-    def test_iptables_apply_with_no_router(self):
-        #Should do nothing
-        self.agent.router_info = {}
-        self.agent.iptables_apply('fake_router_id')
-
-    def test_router_added(self):
-        mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager').start()
-        router_id = _uuid()
-        router = {'id': router_id}
-        device = mock.Mock()
-        self.agent.devices = [device]
-        self.agent._router_added(router_id, router)
-        device.create_router.assert_called_once_with(router_id)
-
-    def test_router_removed(self):
-        self.plugin_api.get_external_network_id.return_value = None
-        mock.patch(
-            'neutron.agent.linux.iptables_manager.IptablesManager').start()
-        router_id = _uuid()
-        ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, {},
-                                 ns_name="qrouter-%s" % router_id)
-        ri.router = {
-            'id': router_id,
-            'admin_state_up': True,
-            'routes': [],
-            'external_gateway_info': {},
-            'distributed': False}
-        device = mock.Mock()
-        self.agent.router_info = {router_id: ri}
-        self.agent.devices = [device]
-        self.agent._router_removed(router_id)
-        device.destroy_router.assert_called_once_with(router_id)
-
-    def test_process_router_if_compatible(self):
-        self.plugin_api.get_external_network_id.return_value = None
-        router = {'id': _uuid(),
-                  'admin_state_up': True,
-                  'routes': [],
-                  'external_gateway_info': {}}
-
-        device = mock.Mock()
-        self.agent.devices = [device]
-        self.agent._process_router_if_compatible(router)
-        device.sync.assert_called_once_with(mock.ANY, [router])
diff --git a/neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py b/neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py
deleted file mode 100644 (file)
index dbdc999..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2013, Nachi Ueno, NTT I3, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import contextlib
-
-import mock
-
-from neutron.common import constants
-from neutron import context
-from neutron.db.vpn import vpn_validator
-from neutron import manager
-from neutron.plugins.common import constants as p_constants
-from neutron.services.vpn.service_drivers import ipsec as ipsec_driver
-from neutron.tests.unit.db.vpn import test_db_vpnaas
-from neutron.tests.unit.openvswitch import test_agent_scheduler
-from neutron.tests.unit import test_agent_ext_plugin
-
-FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
-VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
-
-
-class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
-                          test_agent_scheduler.AgentSchedulerTestMixIn,
-                          test_agent_ext_plugin.AgentDBTestMixIn):
-
-    def setUp(self):
-        self.adminContext = context.get_admin_context()
-        driver_cls_p = mock.patch(
-            'neutron.services.vpn.'
-            'service_drivers.ipsec.IPsecVPNDriver')
-        driver_cls = driver_cls_p.start()
-        self.driver = mock.Mock()
-        self.driver.service_type = ipsec_driver.IPSEC
-        self.driver.validator = vpn_validator.VpnReferenceValidator()
-        driver_cls.return_value = self.driver
-        super(TestVPNDriverPlugin, self).setUp(
-            vpnaas_plugin=VPN_DRIVER_CLASS)
-
-    def test_create_ipsec_site_connection(self, **extras):
-        super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
-        self.driver.create_ipsec_site_connection.assert_called_once_with(
-            mock.ANY, mock.ANY)
-        self.driver.delete_ipsec_site_connection.assert_called_once_with(
-            mock.ANY, mock.ANY)
-
-    def test_delete_vpnservice(self, **extras):
-        super(TestVPNDriverPlugin, self).test_delete_vpnservice()
-        self.driver.delete_vpnservice.assert_called_once_with(
-            mock.ANY, mock.ANY)
-
-    def test_update_vpnservice(self, **extras):
-        super(TestVPNDriverPlugin, self).test_update_vpnservice()
-        self.driver.update_vpnservice.assert_called_once_with(
-            mock.ANY, mock.ANY, mock.ANY)
-
-    @contextlib.contextmanager
-    def vpnservice_set(self):
-        """Test case to create a ipsec_site_connection."""
-        vpnservice_name = "vpn1"
-        ipsec_site_connection_name = "ipsec_site_connection"
-        ikename = "ikepolicy1"
-        ipsecname = "ipsecpolicy1"
-        description = "my-vpn-connection"
-        keys = {'name': vpnservice_name,
-                'description': "my-vpn-connection",
-                'peer_address': '192.168.1.10',
-                'peer_id': '192.168.1.10',
-                'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                'initiator': 'bi-directional',
-                'mtu': 1500,
-                'dpd_action': 'hold',
-                'dpd_interval': 40,
-                'dpd_timeout': 120,
-                'tenant_id': self._tenant_id,
-                'psk': 'abcd',
-                'status': 'PENDING_CREATE',
-                'admin_state_up': True}
-        with self.ikepolicy(name=ikename) as ikepolicy:
-            with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
-                with self.subnet() as subnet:
-                    with self.router() as router:
-                        plugin = manager.NeutronManager.get_plugin()
-                        agent = {'host': FAKE_HOST,
-                                 'agent_type': constants.AGENT_TYPE_L3,
-                                 'binary': 'fake-binary',
-                                 'topic': 'fake-topic'}
-                        plugin.create_or_update_agent(self.adminContext, agent)
-                        plugin.schedule_router(
-                            self.adminContext, router['router']['id'])
-                        with self.vpnservice(name=vpnservice_name,
-                                             subnet=subnet,
-                                             router=router) as vpnservice1:
-                            keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
-                            keys['ipsecpolicy_id'] = (
-                                ipsecpolicy['ipsecpolicy']['id']
-                            )
-                            keys['vpnservice_id'] = (
-                                vpnservice1['vpnservice']['id']
-                            )
-                            with self.ipsec_site_connection(
-                                self.fmt,
-                                ipsec_site_connection_name,
-                                keys['peer_address'],
-                                keys['peer_id'],
-                                keys['peer_cidrs'],
-                                keys['mtu'],
-                                keys['psk'],
-                                keys['initiator'],
-                                keys['dpd_action'],
-                                keys['dpd_interval'],
-                                keys['dpd_timeout'],
-                                vpnservice1,
-                                ikepolicy,
-                                ipsecpolicy,
-                                keys['admin_state_up'],
-                                description=description,
-                            ):
-                                yield vpnservice1['vpnservice']
-
-    def test_get_agent_hosting_vpn_services(self):
-        with self.vpnservice_set():
-            service_plugin = manager.NeutronManager.get_service_plugins()[
-                p_constants.VPN]
-            vpnservices = service_plugin._get_agent_hosting_vpn_services(
-                self.adminContext, FAKE_HOST)
-            vpnservices = vpnservices.all()
-            self.assertEqual(1, len(vpnservices))
-            vpnservice_db = vpnservices[0]
-            self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
-            ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
-            self.assertIsNotNone(
-                ipsec_site_connection['ikepolicy'])
-            self.assertIsNotNone(
-                ipsec_site_connection['ipsecpolicy'])
-
-    def test_update_status(self):
-        with self.vpnservice_set() as vpnservice:
-            self._register_agent_states()
-            service_plugin = manager.NeutronManager.get_service_plugins()[
-                p_constants.VPN]
-            service_plugin.update_status_by_agent(
-                self.adminContext,
-                [{'status': 'ACTIVE',
-                  'ipsec_site_connections': {},
-                  'updated_pending_status': True,
-                  'id': vpnservice['id']}])
-            vpnservices = service_plugin._get_agent_hosting_vpn_services(
-                self.adminContext, FAKE_HOST)
-            vpnservice_db = vpnservices[0]
-            self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
diff --git a/neutron/tests/unit/services/vpn/test_vpnaas_extension.py b/neutron/tests/unit/services/vpn/test_vpnaas_extension.py
deleted file mode 100644 (file)
index f4f2591..0000000
+++ /dev/null
@@ -1,522 +0,0 @@
-#    (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
-#    All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-import mock
-from webob import exc
-
-from neutron.extensions import vpnaas
-from neutron.openstack.common import uuidutils
-from neutron.plugins.common import constants
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_api_v2_extension
-
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_api_v2._get_path
-
-
-class VpnaasExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        super(VpnaasExtensionTestCase, self).setUp()
-        plural_mappings = {'ipsecpolicy': 'ipsecpolicies',
-                           'ikepolicy': 'ikepolicies',
-                           'ipsec_site_connection': 'ipsec-site-connections'}
-        self._setUpExtension(
-            'neutron.extensions.vpnaas.VPNPluginBase', constants.VPN,
-            vpnaas.RESOURCE_ATTRIBUTE_MAP, vpnaas.Vpnaas,
-            'vpn', plural_mappings=plural_mappings,
-            use_quota=True)
-
-    def test_ikepolicy_create(self):
-        """Test case to create an ikepolicy."""
-        ikepolicy_id = _uuid()
-        data = {'ikepolicy': {'name': 'ikepolicy1',
-                              'description': 'myikepolicy1',
-                              'auth_algorithm': 'sha1',
-                              'encryption_algorithm': 'aes-128',
-                              'phase1_negotiation_mode': 'main',
-                              'lifetime': {
-                                  'units': 'seconds',
-                                  'value': 3600},
-                              'ike_version': 'v1',
-                              'pfs': 'group5',
-                              'tenant_id': _uuid()}}
-
-        return_value = copy.copy(data['ikepolicy'])
-        return_value.update({'id': ikepolicy_id})
-
-        instance = self.plugin.return_value
-        instance.create_ikepolicy.return_value = return_value
-        res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_ikepolicy.assert_called_with(mock.ANY,
-                                                     ikepolicy=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('ikepolicy', res)
-        self.assertEqual(res['ikepolicy'], return_value)
-
-    def test_ikepolicy_list(self):
-        """Test case to list all ikepolicies."""
-        ikepolicy_id = _uuid()
-        return_value = [{'name': 'ikepolicy1',
-                         'auth_algorithm': 'sha1',
-                         'encryption_algorithm': 'aes-128',
-                         'pfs': 'group5',
-                         'ike_version': 'v1',
-                         'id': ikepolicy_id}]
-
-        instance = self.plugin.return_value
-        instance.get_ikepolicies.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt))
-
-        instance.get_ikepolicies.assert_called_with(mock.ANY,
-                                                    fields=mock.ANY,
-                                                    filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_ikepolicy_update(self):
-        """Test case to update an ikepolicy."""
-        ikepolicy_id = _uuid()
-        update_data = {'ikepolicy': {'name': 'ikepolicy1',
-                                     'encryption_algorithm': 'aes-256'}}
-        return_value = {'name': 'ikepolicy1',
-                        'auth_algorithm': 'sha1',
-                        'encryption_algorithm': 'aes-256',
-                        'phase1_negotiation_mode': 'main',
-                        'lifetime': {
-                            'units': 'seconds',
-                            'value': 3600},
-                        'ike_version': 'v1',
-                        'pfs': 'group5',
-                        'tenant_id': _uuid(),
-                        'id': ikepolicy_id}
-
-        instance = self.plugin.return_value
-        instance.update_ikepolicy.return_value = return_value
-
-        res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id,
-                                                     ikepolicy=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ikepolicy', res)
-        self.assertEqual(res['ikepolicy'], return_value)
-
-    def test_ikepolicy_get(self):
-        """Test case to get or show an ikepolicy."""
-        ikepolicy_id = _uuid()
-        return_value = {'name': 'ikepolicy1',
-                        'auth_algorithm': 'sha1',
-                        'encryption_algorithm': 'aes-128',
-                        'phase1_negotiation_mode': 'main',
-                        'lifetime': {
-                            'units': 'seconds',
-                            'value': 3600},
-                        'ike_version': 'v1',
-                        'pfs': 'group5',
-                        'tenant_id': _uuid(),
-                        'id': ikepolicy_id}
-
-        instance = self.plugin.return_value
-        instance.get_ikepolicy.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id,
-                                     fmt=self.fmt))
-
-        instance.get_ikepolicy.assert_called_with(mock.ANY,
-                                                  ikepolicy_id,
-                                                  fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ikepolicy', res)
-        self.assertEqual(res['ikepolicy'], return_value)
-
-    def test_ikepolicy_delete(self):
-        """Test case to delete an ikepolicy."""
-        self._test_entity_delete('ikepolicy')
-
-    def test_ipsecpolicy_create(self):
-        """Test case to create an ipsecpolicy."""
-        ipsecpolicy_id = _uuid()
-        data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
-                                'description': 'myipsecpolicy1',
-                                'auth_algorithm': 'sha1',
-                                'encryption_algorithm': 'aes-128',
-                                'encapsulation_mode': 'tunnel',
-                                'lifetime': {
-                                    'units': 'seconds',
-                                    'value': 3600},
-                                'transform_protocol': 'esp',
-                                'pfs': 'group5',
-                                'tenant_id': _uuid()}}
-        return_value = copy.copy(data['ipsecpolicy'])
-        return_value.update({'id': ipsecpolicy_id})
-
-        instance = self.plugin.return_value
-        instance.create_ipsecpolicy.return_value = return_value
-        res = self.api.post(_get_path('vpn/ipsecpolicies', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_ipsecpolicy.assert_called_with(mock.ANY,
-                                                       ipsecpolicy=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsecpolicy', res)
-        self.assertEqual(res['ipsecpolicy'], return_value)
-
-    def test_ipsecpolicy_list(self):
-        """Test case to list an ipsecpolicy."""
-        ipsecpolicy_id = _uuid()
-        return_value = [{'name': 'ipsecpolicy1',
-                         'auth_algorithm': 'sha1',
-                         'encryption_algorithm': 'aes-128',
-                         'pfs': 'group5',
-                         'id': ipsecpolicy_id}]
-
-        instance = self.plugin.return_value
-        instance.get_ipsecpolicies.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/ipsecpolicies', fmt=self.fmt))
-
-        instance.get_ipsecpolicies.assert_called_with(mock.ANY,
-                                                      fields=mock.ANY,
-                                                      filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_ipsecpolicy_update(self):
-        """Test case to update an ipsecpolicy."""
-        ipsecpolicy_id = _uuid()
-        update_data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
-                                       'encryption_algorithm': 'aes-256'}}
-        return_value = {'name': 'ipsecpolicy1',
-                        'auth_algorithm': 'sha1',
-                        'encryption_algorithm': 'aes-128',
-                        'encapsulation_mode': 'tunnel',
-                        'lifetime': {
-                            'units': 'seconds',
-                            'value': 3600},
-                        'transform_protocol': 'esp',
-                        'pfs': 'group5',
-                        'tenant_id': _uuid(),
-                        'id': ipsecpolicy_id}
-
-        instance = self.plugin.return_value
-        instance.update_ipsecpolicy.return_value = return_value
-
-        res = self.api.put(_get_path('vpn/ipsecpolicies',
-                                     id=ipsecpolicy_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_ipsecpolicy.assert_called_with(mock.ANY,
-                                                       ipsecpolicy_id,
-                                                       ipsecpolicy=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsecpolicy', res)
-        self.assertEqual(res['ipsecpolicy'], return_value)
-
-    def test_ipsecpolicy_get(self):
-        """Test case to get or show an ipsecpolicy."""
-        ipsecpolicy_id = _uuid()
-        return_value = {'name': 'ipsecpolicy1',
-                        'auth_algorithm': 'sha1',
-                        'encryption_algorithm': 'aes-128',
-                        'encapsulation_mode': 'tunnel',
-                        'lifetime': {
-                            'units': 'seconds',
-                            'value': 3600},
-                        'transform_protocol': 'esp',
-                        'pfs': 'group5',
-                        'tenant_id': _uuid(),
-                        'id': ipsecpolicy_id}
-
-        instance = self.plugin.return_value
-        instance.get_ipsecpolicy.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/ipsecpolicies',
-                                     id=ipsecpolicy_id,
-                                     fmt=self.fmt))
-
-        instance.get_ipsecpolicy.assert_called_with(mock.ANY,
-                                                    ipsecpolicy_id,
-                                                    fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsecpolicy', res)
-        self.assertEqual(res['ipsecpolicy'], return_value)
-
-    def test_ipsecpolicy_delete(self):
-        """Test case to delete an ipsecpolicy."""
-        self._test_entity_delete('ipsecpolicy')
-
-    def test_vpnservice_create(self):
-        """Test case to create a vpnservice."""
-        vpnservice_id = _uuid()
-        data = {'vpnservice': {'name': 'vpnservice1',
-                               'description': 'descr_vpn1',
-                               'subnet_id': _uuid(),
-                               'router_id': _uuid(),
-                               'admin_state_up': True,
-                               'tenant_id': _uuid()}}
-        return_value = copy.copy(data['vpnservice'])
-        return_value.update({'status': "ACTIVE", 'id': vpnservice_id})
-
-        instance = self.plugin.return_value
-        instance.create_vpnservice.return_value = return_value
-        res = self.api.post(_get_path('vpn/vpnservices', fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_vpnservice.assert_called_with(mock.ANY,
-                                                      vpnservice=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('vpnservice', res)
-        self.assertEqual(res['vpnservice'], return_value)
-
-    def test_vpnservice_list(self):
-        """Test case to list all vpnservices."""
-        vpnservice_id = _uuid()
-        return_value = [{'name': 'vpnservice1',
-                         'tenant_id': _uuid(),
-                         'status': 'ACTIVE',
-                         'id': vpnservice_id}]
-
-        instance = self.plugin.return_value
-        instance.get_vpnservice.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/vpnservices', fmt=self.fmt))
-
-        instance.get_vpnservices.assert_called_with(mock.ANY,
-                                                    fields=mock.ANY,
-                                                    filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_vpnservice_update(self):
-        """Test case to update a vpnservice."""
-        vpnservice_id = _uuid()
-        update_data = {'vpnservice': {'admin_state_up': False}}
-        return_value = {'name': 'vpnservice1',
-                        'admin_state_up': False,
-                        'subnet_id': _uuid(),
-                        'router_id': _uuid(),
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': vpnservice_id}
-
-        instance = self.plugin.return_value
-        instance.update_vpnservice.return_value = return_value
-
-        res = self.api.put(_get_path('vpn/vpnservices',
-                                     id=vpnservice_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_vpnservice.assert_called_with(mock.ANY,
-                                                      vpnservice_id,
-                                                      vpnservice=update_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('vpnservice', res)
-        self.assertEqual(res['vpnservice'], return_value)
-
-    def test_vpnservice_get(self):
-        """Test case to get or show a vpnservice."""
-        vpnservice_id = _uuid()
-        return_value = {'name': 'vpnservice1',
-                        'admin_state_up': True,
-                        'subnet_id': _uuid(),
-                        'router_id': _uuid(),
-                        'tenant_id': _uuid(),
-                        'status': "ACTIVE",
-                        'id': vpnservice_id}
-
-        instance = self.plugin.return_value
-        instance.get_vpnservice.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/vpnservices',
-                                     id=vpnservice_id,
-                                     fmt=self.fmt))
-
-        instance.get_vpnservice.assert_called_with(mock.ANY,
-                                                   vpnservice_id,
-                                                   fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('vpnservice', res)
-        self.assertEqual(res['vpnservice'], return_value)
-
-    def test_vpnservice_delete(self):
-        """Test case to delete a vpnservice."""
-        self._test_entity_delete('vpnservice')
-
-    def test_ipsec_site_connection_create(self):
-        """Test case to create a ipsec_site_connection."""
-        ipsecsite_con_id = _uuid()
-        ikepolicy_id = _uuid()
-        ipsecpolicy_id = _uuid()
-        data = {
-            'ipsec_site_connection': {'name': 'connection1',
-                                      'description': 'Remote-connection1',
-                                      'peer_address': '192.168.1.10',
-                                      'peer_id': '192.168.1.10',
-                                      'peer_cidrs': ['192.168.2.0/24',
-                                                     '192.168.3.0/24'],
-                                      'mtu': 1500,
-                                      'psk': 'abcd',
-                                      'initiator': 'bi-directional',
-                                      'dpd': {
-                                          'action': 'hold',
-                                          'interval': 30,
-                                          'timeout': 120},
-                                      'ikepolicy_id': ikepolicy_id,
-                                      'ipsecpolicy_id': ipsecpolicy_id,
-                                      'vpnservice_id': _uuid(),
-                                      'admin_state_up': True,
-                                      'tenant_id': _uuid()}
-        }
-        return_value = copy.copy(data['ipsec_site_connection'])
-        return_value.update({'status': "ACTIVE", 'id': ipsecsite_con_id})
-
-        instance = self.plugin.return_value
-        instance.create_ipsec_site_connection.return_value = return_value
-        res = self.api.post(_get_path('vpn/ipsec-site-connections',
-                                      fmt=self.fmt),
-                            self.serialize(data),
-                            content_type='application/%s' % self.fmt)
-        instance.create_ipsec_site_connection.assert_called_with(
-            mock.ANY, ipsec_site_connection=data
-        )
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsec_site_connection', res)
-        self.assertEqual(res['ipsec_site_connection'], return_value)
-
-    def test_ipsec_site_connection_list(self):
-        """Test case to list all ipsec_site_connections."""
-        ipsecsite_con_id = _uuid()
-        return_value = [{'name': 'connection1',
-                         'peer_address': '192.168.1.10',
-                         'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                         'route_mode': 'static',
-                         'auth_mode': 'psk',
-                         'tenant_id': _uuid(),
-                         'status': 'ACTIVE',
-                         'id': ipsecsite_con_id}]
-
-        instance = self.plugin.return_value
-        instance.get_ipsec_site_connections.return_value = return_value
-
-        res = self.api.get(
-            _get_path('vpn/ipsec-site-connections', fmt=self.fmt))
-
-        instance.get_ipsec_site_connections.assert_called_with(
-            mock.ANY, fields=mock.ANY, filters=mock.ANY
-        )
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_ipsec_site_connection_update(self):
-        """Test case to update a ipsec_site_connection."""
-        ipsecsite_con_id = _uuid()
-        update_data = {'ipsec_site_connection': {'admin_state_up': False}}
-        return_value = {'name': 'connection1',
-                        'description': 'Remote-connection1',
-                        'peer_address': '192.168.1.10',
-                        'peer_id': '192.168.1.10',
-                        'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
-                        'mtu': 1500,
-                        'psk': 'abcd',
-                        'initiator': 'bi-directional',
-                        'dpd': {
-                            'action': 'hold',
-                            'interval': 30,
-                            'timeout': 120},
-                        'ikepolicy_id': _uuid(),
-                        'ipsecpolicy_id': _uuid(),
-                        'vpnservice_id': _uuid(),
-                        'admin_state_up': False,
-                        'tenant_id': _uuid(),
-                        'status': 'ACTIVE',
-                        'id': ipsecsite_con_id}
-
-        instance = self.plugin.return_value
-        instance.update_ipsec_site_connection.return_value = return_value
-
-        res = self.api.put(_get_path('vpn/ipsec-site-connections',
-                                     id=ipsecsite_con_id,
-                                     fmt=self.fmt),
-                           self.serialize(update_data))
-
-        instance.update_ipsec_site_connection.assert_called_with(
-            mock.ANY, ipsecsite_con_id, ipsec_site_connection=update_data
-        )
-
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsec_site_connection', res)
-        self.assertEqual(res['ipsec_site_connection'], return_value)
-
-    def test_ipsec_site_connection_get(self):
-        """Test case to get or show a ipsec_site_connection."""
-        ipsecsite_con_id = _uuid()
-        return_value = {'name': 'connection1',
-                        'description': 'Remote-connection1',
-                        'peer_address': '192.168.1.10',
-                        'peer_id': '192.168.1.10',
-                        'peer_cidrs': ['192.168.2.0/24',
-                                       '192.168.3.0/24'],
-                        'mtu': 1500,
-                        'psk': 'abcd',
-                        'initiator': 'bi-directional',
-                        'dpd': {
-                            'action': 'hold',
-                            'interval': 30,
-                            'timeout': 120},
-                        'ikepolicy_id': _uuid(),
-                        'ipsecpolicy_id': _uuid(),
-                        'vpnservice_id': _uuid(),
-                        'admin_state_up': True,
-                        'tenant_id': _uuid(),
-                        'status': 'ACTIVE',
-                        'id': ipsecsite_con_id}
-
-        instance = self.plugin.return_value
-        instance.get_ipsec_site_connection.return_value = return_value
-
-        res = self.api.get(_get_path('vpn/ipsec-site-connections',
-                                     id=ipsecsite_con_id,
-                                     fmt=self.fmt))
-
-        instance.get_ipsec_site_connection.assert_called_with(
-            mock.ANY, ipsecsite_con_id, fields=mock.ANY
-        )
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        res = self.deserialize(res)
-        self.assertIn('ipsec_site_connection', res)
-        self.assertEqual(res['ipsec_site_connection'], return_value)
-
-    def test_ipsec_site_connection_delete(self):
-        """Test case to delete a ipsec_site_connection."""
-        self._test_entity_delete('ipsec_site_connection')