]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Decompose the VMware plugin
authorSalvatore Orlando <salv.orlando@gmail.com>
Mon, 2 Mar 2015 18:14:38 +0000 (10:14 -0800)
committerSalvatore Orlando <salv.orlando@gmail.com>
Thu, 5 Mar 2015 15:28:15 +0000 (07:28 -0800)
This patch remove all the business logic pertaining to the VMware
plugin. The following modules are left in openstack/neutron:
- plugin-specific API extension declarations
- database models (and a module with constants they use)
- integration module pointing to the external repository

Change-Id: I8a01a977889b36015a9cfa900173c05bfd516457
Partially-Implements: blueprint core-vendor-decomposition

118 files changed:
neutron/plugins/vmware/api_client/__init__.py [deleted file]
neutron/plugins/vmware/api_client/base.py [deleted file]
neutron/plugins/vmware/api_client/client.py [deleted file]
neutron/plugins/vmware/api_client/eventlet_client.py [deleted file]
neutron/plugins/vmware/api_client/eventlet_request.py [deleted file]
neutron/plugins/vmware/api_client/exception.py [deleted file]
neutron/plugins/vmware/api_client/request.py [deleted file]
neutron/plugins/vmware/api_client/version.py [deleted file]
neutron/plugins/vmware/check_nsx_config.py [deleted file]
neutron/plugins/vmware/common/config.py [deleted file]
neutron/plugins/vmware/common/exceptions.py [deleted file]
neutron/plugins/vmware/common/nsx_utils.py [deleted file]
neutron/plugins/vmware/common/securitygroups.py [deleted file]
neutron/plugins/vmware/common/sync.py [deleted file]
neutron/plugins/vmware/common/utils.py [deleted file]
neutron/plugins/vmware/dbexts/db.py [deleted file]
neutron/plugins/vmware/dbexts/lsn_db.py [deleted file]
neutron/plugins/vmware/dbexts/maclearning.py [deleted file]
neutron/plugins/vmware/dbexts/networkgw_db.py [deleted file]
neutron/plugins/vmware/dbexts/qos_db.py [deleted file]
neutron/plugins/vmware/dbexts/vcns_db.py [deleted file]
neutron/plugins/vmware/dhcp_meta/__init__.py [deleted file]
neutron/plugins/vmware/dhcp_meta/combined.py [deleted file]
neutron/plugins/vmware/dhcp_meta/constants.py [deleted file]
neutron/plugins/vmware/dhcp_meta/lsnmanager.py [deleted file]
neutron/plugins/vmware/dhcp_meta/migration.py [deleted file]
neutron/plugins/vmware/dhcp_meta/nsx.py [deleted file]
neutron/plugins/vmware/dhcp_meta/rpc.py [deleted file]
neutron/plugins/vmware/dhcpmeta_modes.py [deleted file]
neutron/plugins/vmware/extensions/networkgw.py
neutron/plugins/vmware/nsx_cluster.py [deleted file]
neutron/plugins/vmware/nsxlib/__init__.py [deleted file]
neutron/plugins/vmware/nsxlib/l2gateway.py [deleted file]
neutron/plugins/vmware/nsxlib/lsn.py [deleted file]
neutron/plugins/vmware/nsxlib/queue.py [deleted file]
neutron/plugins/vmware/nsxlib/router.py [deleted file]
neutron/plugins/vmware/nsxlib/secgroup.py [deleted file]
neutron/plugins/vmware/nsxlib/switch.py [deleted file]
neutron/plugins/vmware/nsxlib/versioning.py [deleted file]
neutron/plugins/vmware/plugin.py
neutron/plugins/vmware/plugins/__init__.py [deleted file]
neutron/plugins/vmware/plugins/base.py [deleted file]
neutron/plugins/vmware/shell/__init__.py [deleted file]
neutron/plugins/vmware/shell/commands.py [deleted file]
neutron/plugins/vmware/vshield/__init__.py [deleted file]
neutron/plugins/vmware/vshield/common/VcnsApiClient.py [deleted file]
neutron/plugins/vmware/vshield/common/__init__.py [deleted file]
neutron/plugins/vmware/vshield/common/constants.py [deleted file]
neutron/plugins/vmware/vshield/common/exceptions.py [deleted file]
neutron/plugins/vmware/vshield/edge_appliance_driver.py [deleted file]
neutron/plugins/vmware/vshield/tasks/__init__.py [deleted file]
neutron/plugins/vmware/vshield/tasks/constants.py [deleted file]
neutron/plugins/vmware/vshield/tasks/tasks.py [deleted file]
neutron/plugins/vmware/vshield/vcns.py [deleted file]
neutron/plugins/vmware/vshield/vcns_driver.py [deleted file]
neutron/tests/unit/vmware/__init__.py [deleted file]
neutron/tests/unit/vmware/apiclient/__init__.py [deleted file]
neutron/tests/unit/vmware/apiclient/fake.py [deleted file]
neutron/tests/unit/vmware/apiclient/test_api_common.py [deleted file]
neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py [deleted file]
neutron/tests/unit/vmware/db/__init__.py [deleted file]
neutron/tests/unit/vmware/db/test_lsn_db.py [deleted file]
neutron/tests/unit/vmware/db/test_nsx_db.py [deleted file]
neutron/tests/unit/vmware/etc/fake_get_gwservice.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lqueue.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lrouter.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lswitch.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json [deleted file]
neutron/tests/unit/vmware/etc/fake_get_security_profile.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_gwservice.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lqueue.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lrouter.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lswitch.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json [deleted file]
neutron/tests/unit/vmware/etc/fake_post_security_profile.json [deleted file]
neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json [deleted file]
neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json [deleted file]
neutron/tests/unit/vmware/etc/neutron.conf.test [deleted file]
neutron/tests/unit/vmware/etc/nsx.ini.agentless.test [deleted file]
neutron/tests/unit/vmware/etc/nsx.ini.basic.test [deleted file]
neutron/tests/unit/vmware/etc/nsx.ini.combined.test [deleted file]
neutron/tests/unit/vmware/etc/nsx.ini.full.test [deleted file]
neutron/tests/unit/vmware/etc/nsx.ini.test [deleted file]
neutron/tests/unit/vmware/etc/nvp.ini.full.test [deleted file]
neutron/tests/unit/vmware/etc/vcns.ini.test [deleted file]
neutron/tests/unit/vmware/extensions/__init__.py [deleted file]
neutron/tests/unit/vmware/extensions/test_addresspairs.py [deleted file]
neutron/tests/unit/vmware/extensions/test_maclearning.py [deleted file]
neutron/tests/unit/vmware/extensions/test_networkgw.py [deleted file]
neutron/tests/unit/vmware/extensions/test_portsecurity.py [deleted file]
neutron/tests/unit/vmware/extensions/test_providernet.py [deleted file]
neutron/tests/unit/vmware/extensions/test_qosqueues.py [deleted file]
neutron/tests/unit/vmware/nsxlib/__init__.py [deleted file]
neutron/tests/unit/vmware/nsxlib/base.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_l2gateway.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_lsn.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_queue.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_router.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_secgroup.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_switch.py [deleted file]
neutron/tests/unit/vmware/nsxlib/test_versioning.py [deleted file]
neutron/tests/unit/vmware/test_agent_scheduler.py [deleted file]
neutron/tests/unit/vmware/test_dhcpmeta.py [deleted file]
neutron/tests/unit/vmware/test_nsx_opts.py [deleted file]
neutron/tests/unit/vmware/test_nsx_plugin.py [deleted file]
neutron/tests/unit/vmware/test_nsx_sync.py [deleted file]
neutron/tests/unit/vmware/test_nsx_utils.py [deleted file]
neutron/tests/unit/vmware/vshield/__init__.py [deleted file]
neutron/tests/unit/vmware/vshield/fake_vcns.py [deleted file]
neutron/tests/unit/vmware/vshield/test_vcns_driver.py [deleted file]
setup.cfg

diff --git a/neutron/plugins/vmware/api_client/__init__.py b/neutron/plugins/vmware/api_client/__init__.py
deleted file mode 100644 (file)
index 6b7126b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import httplib
-
-
-def ctrl_conn_to_str(conn):
-    """Returns a string representing a connection URL to the controller."""
-    if isinstance(conn, httplib.HTTPSConnection):
-        proto = "https://"
-    elif isinstance(conn, httplib.HTTPConnection):
-        proto = "http://"
-    else:
-        raise TypeError(_('Invalid connection type: %s') % type(conn))
-    return "%s%s:%s" % (proto, conn.host, conn.port)
diff --git a/neutron/plugins/vmware/api_client/base.py b/neutron/plugins/vmware/api_client/base.py
deleted file mode 100644 (file)
index 4178355..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-import httplib
-import six
-import time
-
-from oslo_config import cfg
-
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware import api_client
-
-LOG = logging.getLogger(__name__)
-
-GENERATION_ID_TIMEOUT = -1
-DEFAULT_CONCURRENT_CONNECTIONS = 3
-DEFAULT_CONNECT_TIMEOUT = 5
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ApiClientBase(object):
-    """An abstract baseclass for all API client implementations."""
-
-    def _create_connection(self, host, port, is_ssl):
-        if is_ssl:
-            return httplib.HTTPSConnection(host, port,
-                                           timeout=self._connect_timeout)
-        return httplib.HTTPConnection(host, port,
-                                      timeout=self._connect_timeout)
-
-    @staticmethod
-    def _conn_params(http_conn):
-        is_ssl = isinstance(http_conn, httplib.HTTPSConnection)
-        return (http_conn.host, http_conn.port, is_ssl)
-
-    @property
-    def user(self):
-        return self._user
-
-    @property
-    def password(self):
-        return self._password
-
-    @property
-    def config_gen(self):
-        # If NSX_gen_timeout is not -1 then:
-        # Maintain a timestamp along with the generation ID.  Hold onto the
-        # ID long enough to be useful and block on sequential requests but
-        # not long enough to persist when Onix db is cleared, which resets
-        # the generation ID, causing the DAL to block indefinitely with some
-        # number that's higher than the cluster's value.
-        if self._gen_timeout != -1:
-            ts = self._config_gen_ts
-            if ts is not None:
-                if (time.time() - ts) > self._gen_timeout:
-                    return None
-        return self._config_gen
-
-    @config_gen.setter
-    def config_gen(self, value):
-        if self._config_gen != value:
-            if self._gen_timeout != -1:
-                self._config_gen_ts = time.time()
-        self._config_gen = value
-
-    def auth_cookie(self, conn):
-        cookie = None
-        data = self._get_provider_data(conn)
-        if data:
-            cookie = data[1]
-        return cookie
-
-    def set_auth_cookie(self, conn, cookie):
-        data = self._get_provider_data(conn)
-        if data:
-            self._set_provider_data(conn, (data[0], cookie))
-
-    def acquire_connection(self, auto_login=True, headers=None, rid=-1):
-        '''Check out an available HTTPConnection instance.
-
-        Blocks until a connection is available.
-        :auto_login: automatically logins before returning conn
-        :headers: header to pass on to login attempt
-        :param rid: request id passed in from request eventlet.
-        :returns: An available HTTPConnection instance or None if no
-                 api_providers are configured.
-        '''
-        if not self._api_providers:
-            LOG.warn(_LW("[%d] no API providers currently available."), rid)
-            return None
-        if self._conn_pool.empty():
-            LOG.debug("[%d] Waiting to acquire API client connection.", rid)
-        priority, conn = self._conn_pool.get()
-        now = time.time()
-        if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
-            LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
-                         "seconds; reconnecting."),
-                     {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
-                      'sec': now - conn.last_used})
-            conn = self._create_connection(*self._conn_params(conn))
-
-        conn.last_used = now
-        conn.priority = priority  # stash current priority for release
-        qsize = self._conn_pool.qsize()
-        LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
-                  "connection(s) available.",
-                  {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
-                   'qsize': qsize})
-        if auto_login and self.auth_cookie(conn) is None:
-            self._wait_for_login(conn, headers)
-        return conn
-
-    def release_connection(self, http_conn, bad_state=False,
-                           service_unavail=False, rid=-1):
-        '''Mark HTTPConnection instance as available for check-out.
-
-        :param http_conn: An HTTPConnection instance obtained from this
-            instance.
-        :param bad_state: True if http_conn is known to be in a bad state
-                (e.g. connection fault.)
-        :service_unavail: True if http_conn returned 503 response.
-        :param rid: request id passed in from request eventlet.
-        '''
-        conn_params = self._conn_params(http_conn)
-        if self._conn_params(http_conn) not in self._api_providers:
-            LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
-                      "API provider for the cluster",
-                      {'rid': rid,
-                       'conn': api_client.ctrl_conn_to_str(http_conn)})
-            return
-        elif hasattr(http_conn, "no_release"):
-            return
-
-        priority = http_conn.priority
-        if bad_state:
-            # Reconnect to provider.
-            LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
-                         "reconnecting to %(conn)s"),
-                     {'rid': rid,
-                      'conn': api_client.ctrl_conn_to_str(http_conn)})
-            http_conn = self._create_connection(*self._conn_params(http_conn))
-        elif service_unavail:
-            # http_conn returned a service unaviable response, put other
-            # connections to the same controller at end of priority queue,
-            conns = []
-            while not self._conn_pool.empty():
-                priority, conn = self._conn_pool.get()
-                if self._conn_params(conn) == conn_params:
-                    priority = self._next_conn_priority
-                    self._next_conn_priority += 1
-                conns.append((priority, conn))
-            for priority, conn in conns:
-                self._conn_pool.put((priority, conn))
-            # put http_conn at end of queue also
-            priority = self._next_conn_priority
-            self._next_conn_priority += 1
-
-        self._conn_pool.put((priority, http_conn))
-        LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
-                  "connection(s) available.",
-                  {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
-                   'qsize': self._conn_pool.qsize()})
-
-    def _wait_for_login(self, conn, headers=None):
-        '''Block until a login has occurred for the current API provider.'''
-
-        data = self._get_provider_data(conn)
-        if data is None:
-            LOG.error(_LE("Login request for an invalid connection: '%s'"),
-                      api_client.ctrl_conn_to_str(conn))
-            return
-        provider_sem = data[0]
-        if provider_sem.acquire(blocking=False):
-            try:
-                cookie = self._login(conn, headers)
-                self.set_auth_cookie(conn, cookie)
-            finally:
-                provider_sem.release()
-        else:
-            LOG.debug("Waiting for auth to complete")
-            # Wait until we can acquire then release
-            provider_sem.acquire(blocking=True)
-            provider_sem.release()
-
-    def _get_provider_data(self, conn_or_conn_params, default=None):
-        """Get data for specified API provider.
-
-        Args:
-            conn_or_conn_params: either a HTTP(S)Connection object or the
-                resolved conn_params tuple returned by self._conn_params().
-            default: conn_params if ones passed aren't known
-        Returns: Data associated with specified provider
-        """
-        conn_params = self._normalize_conn_params(conn_or_conn_params)
-        return self._api_provider_data.get(conn_params, default)
-
-    def _set_provider_data(self, conn_or_conn_params, data):
-        """Set data for specified API provider.
-
-        Args:
-            conn_or_conn_params: either a HTTP(S)Connection object or the
-                resolved conn_params tuple returned by self._conn_params().
-            data: data to associate with API provider
-        """
-        conn_params = self._normalize_conn_params(conn_or_conn_params)
-        if data is None:
-            del self._api_provider_data[conn_params]
-        else:
-            self._api_provider_data[conn_params] = data
-
-    def _normalize_conn_params(self, conn_or_conn_params):
-        """Normalize conn_param tuple.
-
-        Args:
-            conn_or_conn_params: either a HTTP(S)Connection object or the
-                resolved conn_params tuple returned by self._conn_params().
-
-        Returns: Normalized conn_param tuple
-        """
-        if (not isinstance(conn_or_conn_params, tuple) and
-            not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
-            LOG.debug("Invalid conn_params value: '%s'",
-                      str(conn_or_conn_params))
-            return conn_or_conn_params
-        if isinstance(conn_or_conn_params, httplib.HTTPConnection):
-            conn_params = self._conn_params(conn_or_conn_params)
-        else:
-            conn_params = conn_or_conn_params
-        host, port, is_ssl = conn_params
-        if port is None:
-            port = 443 if is_ssl else 80
-        return (host, port, is_ssl)
diff --git a/neutron/plugins/vmware/api_client/client.py b/neutron/plugins/vmware/api_client/client.py
deleted file mode 100644 (file)
index 67ea8d2..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import httplib
-
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import base
-from neutron.plugins.vmware.api_client import eventlet_client
-from neutron.plugins.vmware.api_client import eventlet_request
-from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version
-
-LOG = logging.getLogger(__name__)
-
-
-class NsxApiClient(eventlet_client.EventletApiClient):
-    """The Nsx API Client."""
-
-    def __init__(self, api_providers, user, password,
-                 concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
-                 gen_timeout=base.GENERATION_ID_TIMEOUT,
-                 use_https=True,
-                 connect_timeout=base.DEFAULT_CONNECT_TIMEOUT,
-                 http_timeout=75, retries=2, redirects=2):
-        '''Constructor. Adds the following:
-
-        :param http_timeout: how long to wait before aborting an
-            unresponsive controller (and allow for retries to another
-            controller in the cluster)
-        :param retries: the number of concurrent connections.
-        :param redirects: the number of concurrent connections.
-        '''
-        super(NsxApiClient, self).__init__(
-            api_providers, user, password,
-            concurrent_connections=concurrent_connections,
-            gen_timeout=gen_timeout, use_https=use_https,
-            connect_timeout=connect_timeout)
-
-        self._request_timeout = http_timeout * retries
-        self._http_timeout = http_timeout
-        self._retries = retries
-        self._redirects = redirects
-        self._version = None
-
-    # NOTE(salvatore-orlando): This method is not used anymore. Login is now
-    # performed automatically inside the request eventlet if necessary.
-    def login(self, user=None, password=None):
-        '''Login to NSX controller.
-
-        Assumes same password is used for all controllers.
-
-        :param user: controller user (usually admin). Provided for
-                backwards compatibility. In the  normal mode of operation
-                this should be None.
-        :param password: controller password. Provided for backwards
-                compatibility. In the normal mode of operation this should
-                be None.
-        '''
-        if user:
-            self._user = user
-        if password:
-            self._password = password
-
-        return self._login()
-
-    def request(self, method, url, body="", content_type="application/json"):
-        '''Issues request to controller.'''
-
-        g = eventlet_request.GenericRequestEventlet(
-            self, method, url, body, content_type, auto_login=True,
-            http_timeout=self._http_timeout,
-            retries=self._retries, redirects=self._redirects)
-        g.start()
-        response = g.join()
-        LOG.debug('Request returns "%s"', response)
-
-        # response is a modified HTTPResponse object or None.
-        # response.read() will not work on response as the underlying library
-        # request_eventlet.ApiRequestEventlet has already called this
-        # method in order to extract the body and headers for processing.
-        # ApiRequestEventlet derived classes call .read() and
-        # .getheaders() on the HTTPResponse objects and store the results in
-        # the response object's .body and .headers data members for future
-        # access.
-
-        if response is None:
-            # Timeout.
-            LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
-                      {'method': method, 'url': url})
-            raise exception.RequestTimeout()
-
-        status = response.status
-        if status == httplib.UNAUTHORIZED:
-            raise exception.UnAuthorizedRequest()
-
-        # Fail-fast: Check for exception conditions and raise the
-        # appropriate exceptions for known error codes.
-        if status in exception.ERROR_MAPPINGS:
-            LOG.error(_LE("Received error code: %s"), status)
-            LOG.error(_LE("Server Error Message: %s"), response.body)
-            exception.ERROR_MAPPINGS[status](response)
-
-        # Continue processing for non-error condition.
-        if (status != httplib.OK and status != httplib.CREATED
-                and status != httplib.NO_CONTENT):
-            LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
-                          "%(status)d (content = '%(body)s')"),
-                      {'method': method, 'url': url,
-                       'status': response.status, 'body': response.body})
-            return None
-
-        if not self._version:
-            self._version = version.find_version(response.headers)
-        return response.body
-
-    def get_version(self):
-        if not self._version:
-            # Determine the controller version by querying the
-            # cluster nodes. Currently, the version will be the
-            # one of the server that responds.
-            self.request('GET', '/ws.v1/control-cluster/node')
-            if not self._version:
-                LOG.error(_LE('Unable to determine NSX version. '
-                              'Plugin might not work as expected.'))
-        return self._version
diff --git a/neutron/plugins/vmware/api_client/eventlet_client.py b/neutron/plugins/vmware/api_client/eventlet_client.py
deleted file mode 100644 (file)
index 0eba2fd..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import time
-
-import eventlet
-eventlet.monkey_patch()
-
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import base
-from neutron.plugins.vmware.api_client import eventlet_request
-
-LOG = logging.getLogger(__name__)
-
-
-class EventletApiClient(base.ApiClientBase):
-    """Eventlet-based implementation of NSX ApiClient ABC."""
-
-    def __init__(self, api_providers, user, password,
-                 concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
-                 gen_timeout=base.GENERATION_ID_TIMEOUT,
-                 use_https=True,
-                 connect_timeout=base.DEFAULT_CONNECT_TIMEOUT):
-        '''Constructor
-
-        :param api_providers: a list of tuples of the form: (host, port,
-            is_ssl).
-        :param user: login username.
-        :param password: login password.
-        :param concurrent_connections: total number of concurrent connections.
-        :param use_https: whether or not to use https for requests.
-        :param connect_timeout: connection timeout in seconds.
-        :param gen_timeout controls how long the generation id is kept
-            if set to -1 the generation id is never timed out
-        '''
-        if not api_providers:
-            api_providers = []
-        self._api_providers = set([tuple(p) for p in api_providers])
-        self._api_provider_data = {}  # tuple(semaphore, session_cookie)
-        for p in self._api_providers:
-            self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None))
-        self._user = user
-        self._password = password
-        self._concurrent_connections = concurrent_connections
-        self._use_https = use_https
-        self._connect_timeout = connect_timeout
-        self._config_gen = None
-        self._config_gen_ts = None
-        self._gen_timeout = gen_timeout
-
-        # Connection pool is a list of queues.
-        self._conn_pool = eventlet.queue.PriorityQueue()
-        self._next_conn_priority = 1
-        for host, port, is_ssl in api_providers:
-            for _ in range(concurrent_connections):
-                conn = self._create_connection(host, port, is_ssl)
-                self._conn_pool.put((self._next_conn_priority, conn))
-                self._next_conn_priority += 1
-
-    def acquire_redirect_connection(self, conn_params, auto_login=True,
-                                    headers=None):
-        """Check out or create connection to redirected NSX API server.
-
-        Args:
-            conn_params: tuple specifying target of redirect, see
-                self._conn_params()
-            auto_login: returned connection should have valid session cookie
-            headers: headers to pass on if auto_login
-
-        Returns: An available HTTPConnection instance corresponding to the
-                 specified conn_params. If a connection did not previously
-                 exist, new connections are created with the highest prioity
-                 in the connection pool and one of these new connections
-                 returned.
-        """
-        result_conn = None
-        data = self._get_provider_data(conn_params)
-        if data:
-            # redirect target already exists in provider data and connections
-            # to the provider have been added to the connection pool. Try to
-            # obtain a connection from the pool, note that it's possible that
-            # all connection to the provider are currently in use.
-            conns = []
-            while not self._conn_pool.empty():
-                priority, conn = self._conn_pool.get_nowait()
-                if not result_conn and self._conn_params(conn) == conn_params:
-                    conn.priority = priority
-                    result_conn = conn
-                else:
-                    conns.append((priority, conn))
-            for priority, conn in conns:
-                self._conn_pool.put((priority, conn))
-            # hack: if no free connections available, create new connection
-            # and stash "no_release" attribute (so that we only exceed
-            # self._concurrent_connections temporarily)
-            if not result_conn:
-                conn = self._create_connection(*conn_params)
-                conn.priority = 0  # redirect connections have highest priority
-                conn.no_release = True
-                result_conn = conn
-        else:
-            #redirect target not already known, setup provider lists
-            self._api_providers.update([conn_params])
-            self._set_provider_data(conn_params,
-                                    (eventlet.semaphore.Semaphore(1), None))
-            # redirects occur during cluster upgrades, i.e. results to old
-            # redirects to new, so give redirect targets highest priority
-            priority = 0
-            for i in range(self._concurrent_connections):
-                conn = self._create_connection(*conn_params)
-                conn.priority = priority
-                if i == self._concurrent_connections - 1:
-                    break
-                self._conn_pool.put((priority, conn))
-            result_conn = conn
-        if result_conn:
-            result_conn.last_used = time.time()
-            if auto_login and self.auth_cookie(conn) is None:
-                self._wait_for_login(result_conn, headers)
-        return result_conn
-
-    def _login(self, conn=None, headers=None):
-        '''Issue login request and update authentication cookie.'''
-        cookie = None
-        g = eventlet_request.LoginRequestEventlet(
-            self, self._user, self._password, conn, headers)
-        g.start()
-        ret = g.join()
-        if ret:
-            if isinstance(ret, Exception):
-                LOG.error(_LE('Login error "%s"'), ret)
-                raise ret
-
-            cookie = ret.getheader("Set-Cookie")
-            if cookie:
-                LOG.debug("Saving new authentication cookie '%s'", cookie)
-
-        return cookie
-
-# Register as subclass.
-base.ApiClientBase.register(EventletApiClient)
diff --git a/neutron/plugins/vmware/api_client/eventlet_request.py b/neutron/plugins/vmware/api_client/eventlet_request.py
deleted file mode 100644 (file)
index dfa9348..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import httplib
-import urllib
-
-import eventlet
-from oslo_serialization import jsonutils
-
-from neutron.i18n import _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import request
-
-LOG = logging.getLogger(__name__)
-USER_AGENT = "Neutron eventlet client/2.0"
-
-
-class EventletApiRequest(request.ApiRequest):
-    '''Eventlet-based ApiRequest class.
-
-    This class will form the basis for eventlet-based ApiRequest classes
-    '''
-
-    # Maximum number of green threads present in the system at one time.
-    API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE
-
-    # Pool of green threads. One green thread is allocated per incoming
-    # request. Incoming requests will block when the pool is empty.
-    API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
-
-    # A unique id is assigned to each incoming request. When the current
-    # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
-    MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID
-
-    # The request id for the next incoming request.
-    CURRENT_REQUEST_ID = 0
-
-    def __init__(self, client_obj, url, method="GET", body=None,
-                 headers=None,
-                 retries=request.DEFAULT_RETRIES,
-                 auto_login=True,
-                 redirects=request.DEFAULT_REDIRECTS,
-                 http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None):
-        '''Constructor.'''
-        self._api_client = client_obj
-        self._url = url
-        self._method = method
-        self._body = body
-        self._headers = headers or {}
-        self._request_timeout = http_timeout * retries
-        self._retries = retries
-        self._auto_login = auto_login
-        self._redirects = redirects
-        self._http_timeout = http_timeout
-        self._client_conn = client_conn
-        self._abort = False
-
-        self._request_error = None
-
-        if "User-Agent" not in self._headers:
-            self._headers["User-Agent"] = USER_AGENT
-
-        self._green_thread = None
-        # Retrieve and store this instance's unique request id.
-        self._request_id = EventletApiRequest.CURRENT_REQUEST_ID
-        # Update the class variable that tracks request id.
-        # Request IDs wrap around at MAXIMUM_REQUEST_ID
-        next_request_id = self._request_id + 1
-        next_request_id %= self.MAXIMUM_REQUEST_ID
-        EventletApiRequest.CURRENT_REQUEST_ID = next_request_id
-
-    @classmethod
-    def _spawn(cls, func, *args, **kwargs):
-        '''Allocate a green thread from the class pool.'''
-        return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
-
-    def spawn(self, func, *args, **kwargs):
-        '''Spawn a new green thread with the supplied function and args.'''
-        return self.__class__._spawn(func, *args, **kwargs)
-
-    @classmethod
-    def joinall(cls):
-        '''Wait for all outstanding requests to complete.'''
-        return cls.API_REQUEST_POOL.waitall()
-
-    def join(self):
-        '''Wait for instance green thread to complete.'''
-        if self._green_thread is not None:
-            return self._green_thread.wait()
-        return Exception(_('Joining an invalid green thread'))
-
-    def start(self):
-        '''Start request processing.'''
-        self._green_thread = self.spawn(self._run)
-
-    def copy(self):
-        '''Return a copy of this request instance.'''
-        return EventletApiRequest(
-            self._api_client, self._url, self._method, self._body,
-            self._headers, self._retries,
-            self._auto_login, self._redirects, self._http_timeout)
-
-    def _run(self):
-        '''Method executed within green thread.'''
-        if self._request_timeout:
-            # No timeout exception escapes the with block.
-            with eventlet.timeout.Timeout(self._request_timeout, False):
-                return self._handle_request()
-
-            LOG.info(_LI('[%d] Request timeout.'), self._rid())
-            self._request_error = Exception(_('Request timeout'))
-            return None
-        else:
-            return self._handle_request()
-
-    def _handle_request(self):
-        '''First level request handling.'''
-        attempt = 0
-        timeout = 0
-        response = None
-        while response is None and attempt <= self._retries:
-            eventlet.greenthread.sleep(timeout)
-            attempt += 1
-
-            req = self._issue_request()
-            # automatically raises any exceptions returned.
-            if isinstance(req, httplib.HTTPResponse):
-                timeout = 0
-                if attempt <= self._retries and not self._abort:
-                    if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
-                        continue
-                    elif req.status == httplib.SERVICE_UNAVAILABLE:
-                        timeout = 0.5
-                        continue
-                    # else fall through to return the error code
-
-                LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
-                          ": %(status)s",
-                          {'rid': self._rid(), 'method': self._method,
-                           'url': self._url, 'status': req.status})
-                self._request_error = None
-                response = req
-            else:
-                LOG.info(_LI('[%(rid)d] Error while handling request: '
-                             '%(req)s'),
-                         {'rid': self._rid(), 'req': req})
-                self._request_error = req
-                response = None
-        return response
-
-
-class LoginRequestEventlet(EventletApiRequest):
-    '''Process a login request.'''
-
-    def __init__(self, client_obj, user, password, client_conn=None,
-                 headers=None):
-        if headers is None:
-            headers = {}
-        headers.update({"Content-Type": "application/x-www-form-urlencoded"})
-        body = urllib.urlencode({"username": user, "password": password})
-        super(LoginRequestEventlet, self).__init__(
-            client_obj, "/ws.v1/login", "POST", body, headers,
-            auto_login=False, client_conn=client_conn)
-
-    def session_cookie(self):
-        if self.successful():
-            return self.value.getheader("Set-Cookie")
-        return None
-
-
-class GetApiProvidersRequestEventlet(EventletApiRequest):
-    '''Get a list of API providers.'''
-
-    def __init__(self, client_obj):
-        url = "/ws.v1/control-cluster/node?fields=roles"
-        super(GetApiProvidersRequestEventlet, self).__init__(
-            client_obj, url, "GET", auto_login=True)
-
-    def api_providers(self):
-        """Parse api_providers from response.
-
-        Returns: api_providers in [(host, port, is_ssl), ...] format
-        """
-        def _provider_from_listen_addr(addr):
-            # (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
-            parts = addr.split(':')
-            return (parts[1], int(parts[2]), parts[0] == 'pssl')
-
-        try:
-            if self.successful():
-                ret = []
-                body = jsonutils.loads(self.value.body)
-                for node in body.get('results', []):
-                    for role in node.get('roles', []):
-                        if role.get('role') == 'api_provider':
-                            addr = role.get('listen_addr')
-                            if addr:
-                                ret.append(_provider_from_listen_addr(addr))
-                return ret
-        except Exception as e:
-            LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
-                     {'rid': self._rid(), 'e': e})
-            # intentionally fall through
-        return None
-
-
-class GenericRequestEventlet(EventletApiRequest):
-    '''Handle a generic request.'''
-
-    def __init__(self, client_obj, method, url, body, content_type,
-                 auto_login=False,
-                 http_timeout=request.DEFAULT_HTTP_TIMEOUT,
-                 retries=request.DEFAULT_RETRIES,
-                 redirects=request.DEFAULT_REDIRECTS):
-        headers = {"Content-Type": content_type}
-        super(GenericRequestEventlet, self).__init__(
-            client_obj, url, method, body, headers,
-            retries=retries,
-            auto_login=auto_login, redirects=redirects,
-            http_timeout=http_timeout)
-
-    def session_cookie(self):
-        if self.successful():
-            return self.value.getheader("Set-Cookie")
-        return None
-
-
-request.ApiRequest.register(EventletApiRequest)
diff --git a/neutron/plugins/vmware/api_client/exception.py b/neutron/plugins/vmware/api_client/exception.py
deleted file mode 100644 (file)
index b3facfc..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-
-class NsxApiException(Exception):
-    """Base NSX API Client Exception.
-
-    To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
-    with the keyword arguments provided to the constructor.
-
-    """
-    message = _("An unknown exception occurred.")
-
-    def __init__(self, **kwargs):
-        try:
-            self._error_string = self.message % kwargs
-        except Exception:
-            # at least get the core message out if something happened
-            self._error_string = self.message
-
-    def __str__(self):
-        return self._error_string
-
-
-class UnAuthorizedRequest(NsxApiException):
-    message = _("Server denied session's authentication credentials.")
-
-
-class ResourceNotFound(NsxApiException):
-    message = _("An entity referenced in the request was not found.")
-
-
-class Conflict(NsxApiException):
-    message = _("Request conflicts with configuration on a different "
-                "entity.")
-
-
-class ServiceUnavailable(NsxApiException):
-    message = _("Request could not completed because the associated "
-                "resource could not be reached.")
-
-
-class Forbidden(NsxApiException):
-    message = _("The request is forbidden from accessing the "
-                "referenced resource.")
-
-
-class ReadOnlyMode(Forbidden):
-    message = _("Create/Update actions are forbidden when in read-only mode.")
-
-
-class RequestTimeout(NsxApiException):
-    message = _("The request has timed out.")
-
-
-class BadRequest(NsxApiException):
-    message = _("The server is unable to fulfill the request due "
-                "to a bad syntax")
-
-
-class InvalidSecurityCertificate(BadRequest):
-    message = _("The backend received an invalid security certificate.")
-
-
-def fourZeroZero(response=None):
-    if response and "Invalid SecurityCertificate" in response.body:
-        raise InvalidSecurityCertificate()
-    raise BadRequest()
-
-
-def fourZeroFour(response=None):
-    raise ResourceNotFound()
-
-
-def fourZeroNine(response=None):
-    raise Conflict()
-
-
-def fiveZeroThree(response=None):
-    raise ServiceUnavailable()
-
-
-def fourZeroThree(response=None):
-    if 'read-only' in response.body:
-        raise ReadOnlyMode()
-    else:
-        raise Forbidden()
-
-
-def zero(self, response=None):
-    raise NsxApiException()
-
-
-ERROR_MAPPINGS = {
-    400: fourZeroZero,
-    404: fourZeroFour,
-    405: zero,
-    409: fourZeroNine,
-    503: fiveZeroThree,
-    403: fourZeroThree,
-    301: zero,
-    307: zero,
-    500: zero,
-    501: zero,
-    503: zero
-}
diff --git a/neutron/plugins/vmware/api_client/request.py b/neutron/plugins/vmware/api_client/request.py
deleted file mode 100644 (file)
index 2575a7a..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import abc
-import copy
-import httplib
-import time
-
-import eventlet
-from oslo_utils import excutils
-import six
-import six.moves.urllib.parse as urlparse
-
-from neutron.i18n import _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware import api_client
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_HTTP_TIMEOUT = 30
-DEFAULT_RETRIES = 2
-DEFAULT_REDIRECTS = 2
-DEFAULT_API_REQUEST_POOL_SIZE = 1000
-DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
-DOWNLOAD_TIMEOUT = 180
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ApiRequest(object):
-    '''An abstract baseclass for all ApiRequest implementations.
-
-    This defines the interface and property structure for both eventlet and
-    gevent-based ApiRequest classes.
-    '''
-
-    # List of allowed status codes.
-    ALLOWED_STATUS_CODES = [
-        httplib.OK,
-        httplib.CREATED,
-        httplib.NO_CONTENT,
-        httplib.MOVED_PERMANENTLY,
-        httplib.TEMPORARY_REDIRECT,
-        httplib.BAD_REQUEST,
-        httplib.UNAUTHORIZED,
-        httplib.FORBIDDEN,
-        httplib.NOT_FOUND,
-        httplib.CONFLICT,
-        httplib.INTERNAL_SERVER_ERROR,
-        httplib.SERVICE_UNAVAILABLE
-    ]
-
-    @abc.abstractmethod
-    def start(self):
-        pass
-
-    @abc.abstractmethod
-    def join(self):
-        pass
-
-    @abc.abstractmethod
-    def copy(self):
-        pass
-
-    def _issue_request(self):
-        '''Issue a request to a provider.'''
-        conn = (self._client_conn or
-                self._api_client.acquire_connection(True,
-                                                    copy.copy(self._headers),
-                                                    rid=self._rid()))
-        if conn is None:
-            error = Exception(_("No API connections available"))
-            self._request_error = error
-            return error
-
-        url = self._url
-        LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
-                  "body: %(body)s",
-                  {'rid': self._rid(), 'conn': self._request_str(conn, url),
-                   'body': self._body})
-        issued_time = time.time()
-        is_conn_error = False
-        is_conn_service_unavail = False
-        response = None
-        try:
-            redirects = 0
-            while (redirects <= self._redirects):
-                # Update connection with user specified request timeout,
-                # the connect timeout is usually smaller so we only set
-                # the request timeout after a connection is established
-                if conn.sock is None:
-                    conn.connect()
-                    conn.sock.settimeout(self._http_timeout)
-                elif conn.sock.gettimeout() != self._http_timeout:
-                    conn.sock.settimeout(self._http_timeout)
-
-                headers = copy.copy(self._headers)
-                cookie = self._api_client.auth_cookie(conn)
-                if cookie:
-                    headers["Cookie"] = cookie
-
-                gen = self._api_client.config_gen
-                if gen:
-                    headers["X-Nvp-Wait-For-Config-Generation"] = gen
-                    LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
-                              "request header: '%s'", gen)
-                try:
-                    conn.request(self._method, url, self._body, headers)
-                except Exception as e:
-                    with excutils.save_and_reraise_exception():
-                        LOG.warn(_LW("[%(rid)d] Exception issuing request: "
-                                     "%(e)s"),
-                                 {'rid': self._rid(), 'e': e})
-
-                response = conn.getresponse()
-                response.body = response.read()
-                response.headers = response.getheaders()
-                elapsed_time = time.time() - issued_time
-                LOG.debug("[%(rid)d] Completed request '%(conn)s': "
-                          "%(status)s (%(elapsed)s seconds)",
-                          {'rid': self._rid(),
-                           'conn': self._request_str(conn, url),
-                           'status': response.status,
-                           'elapsed': elapsed_time})
-
-                new_gen = response.getheader('X-Nvp-Config-Generation', None)
-                if new_gen:
-                    LOG.debug("Reading X-Nvp-config-Generation response "
-                              "header: '%s'", new_gen)
-                    if (self._api_client.config_gen is None or
-                        self._api_client.config_gen < int(new_gen)):
-                        self._api_client.config_gen = int(new_gen)
-
-                if response.status == httplib.UNAUTHORIZED:
-
-                    if cookie is None and self._url != "/ws.v1/login":
-                        # The connection still has no valid cookie despite
-                        # attempts to authenticate and the request has failed
-                        # with unauthorized status code. If this isn't a
-                        # a request to authenticate, we should abort the
-                        # request since there is no point in retrying.
-                        self._abort = True
-
-                    # If request is unauthorized, clear the session cookie
-                    # for the current provider so that subsequent requests
-                    # to the same provider triggers re-authentication.
-                    self._api_client.set_auth_cookie(conn, None)
-                elif response.status == httplib.SERVICE_UNAVAILABLE:
-                    is_conn_service_unavail = True
-
-                if response.status not in [httplib.MOVED_PERMANENTLY,
-                                           httplib.TEMPORARY_REDIRECT]:
-                    break
-                elif redirects >= self._redirects:
-                    LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
-                                 "request"), self._rid())
-                    break
-                redirects += 1
-
-                conn, url = self._redirect_params(conn, response.headers,
-                                                  self._client_conn is None)
-                if url is None:
-                    response.status = httplib.INTERNAL_SERVER_ERROR
-                    break
-                LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
-                         {'rid': self._rid(),
-                          'conn': self._request_str(conn, url)})
-                # yield here, just in case we are not out of the loop yet
-                eventlet.greenthread.sleep(0)
-            # If we receive any of these responses, then
-            # our server did not process our request and may be in an
-            # errored state. Raise an exception, which will cause the
-            # the conn to be released with is_conn_error == True
-            # which puts the conn on the back of the client's priority
-            # queue.
-            if (response.status == httplib.INTERNAL_SERVER_ERROR and
-                response.status > httplib.NOT_IMPLEMENTED):
-                LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
-                             "received: %(status)s"),
-                         {'rid': self._rid(), 'method': self._method,
-                          'url': self._url, 'status': response.status})
-                raise Exception(_('Server error return: %s'), response.status)
-            return response
-        except Exception as e:
-            if isinstance(e, httplib.BadStatusLine):
-                msg = (_("Invalid server response"))
-            else:
-                msg = unicode(e)
-            if response is None:
-                elapsed_time = time.time() - issued_time
-            LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
-                         "(%(elapsed)s seconds)"),
-                     {'rid': self._rid(), 'conn': self._request_str(conn, url),
-                      'msg': msg, 'elapsed': elapsed_time})
-            self._request_error = e
-            is_conn_error = True
-            return e
-        finally:
-            # Make sure we release the original connection provided by the
-            # acquire_connection() call above.
-            if self._client_conn is None:
-                self._api_client.release_connection(conn, is_conn_error,
-                                                    is_conn_service_unavail,
-                                                    rid=self._rid())
-
-    def _redirect_params(self, conn, headers, allow_release_conn=False):
-        """Process redirect response, create new connection if necessary.
-
-        Args:
-            conn: connection that returned the redirect response
-            headers: response headers of the redirect response
-            allow_release_conn: if redirecting to a different server,
-                release existing connection back to connection pool.
-
-        Returns: Return tuple(conn, url) where conn is a connection object
-            to the redirect target and url is the path of the API request
-        """
-
-        url = None
-        for name, value in headers:
-            if name.lower() == "location":
-                url = value
-                break
-        if not url:
-            LOG.warn(_LW("[%d] Received redirect status without location "
-                         "header field"), self._rid())
-            return (conn, None)
-        # Accept location with the following format:
-        # 1. /path, redirect to same node
-        # 2. scheme://hostname:[port]/path where scheme is https or http
-        # Reject others
-        # 3. e.g. relative paths, unsupported scheme, unspecified host
-        result = urlparse.urlparse(url)
-        if not result.scheme and not result.hostname and result.path:
-            if result.path[0] == "/":
-                if result.query:
-                    url = "%s?%s" % (result.path, result.query)
-                else:
-                    url = result.path
-                return (conn, url)      # case 1
-            else:
-                LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
-                             "'%(url)s'"), {'rid': self._rid(), 'url': url})
-                return (conn, None)     # case 3
-        elif result.scheme not in ["http", "https"] or not result.hostname:
-            LOG.warn(_LW("[%(rid)d] Received malformed redirect "
-                         "location: %(url)s"),
-                     {'rid': self._rid(), 'url': url})
-            return (conn, None)         # case 3
-        # case 2, redirect location includes a scheme
-        # so setup a new connection and authenticate
-        if allow_release_conn:
-            self._api_client.release_connection(conn)
-        conn_params = (result.hostname, result.port, result.scheme == "https")
-        conn = self._api_client.acquire_redirect_connection(conn_params, True,
-                                                            self._headers)
-        if result.query:
-            url = "%s?%s" % (result.path, result.query)
-        else:
-            url = result.path
-        return (conn, url)
-
-    def _rid(self):
-        '''Return current request id.'''
-        return self._request_id
-
-    @property
-    def request_error(self):
-        '''Return any errors associated with this instance.'''
-        return self._request_error
-
-    def _request_str(self, conn, url):
-        '''Return string representation of connection.'''
-        return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn),
-                             url)
diff --git a/neutron/plugins/vmware/api_client/version.py b/neutron/plugins/vmware/api_client/version.py
deleted file mode 100644 (file)
index 189f318..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def find_version(headers):
-    """Retrieve NSX controller version from response headers."""
-    for (header_name, header_value) in (headers or ()):
-        try:
-            if header_name == 'server':
-                return Version(header_value.split('/')[1])
-        except IndexError:
-            LOG.warning(_LW("Unable to fetch NSX version from response "
-                            "headers :%s"), headers)
-
-
-class Version(object):
-    """Abstracts NSX version by exposing major and minor."""
-
-    def __init__(self, version):
-        self.full_version = version.split('.')
-        self.major = int(self.full_version[0])
-        self.minor = int(self.full_version[1])
-
-    def __str__(self):
-        return '.'.join(self.full_version)
diff --git a/neutron/plugins/vmware/check_nsx_config.py b/neutron/plugins/vmware/check_nsx_config.py
deleted file mode 100644 (file)
index 8d9d3d0..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from __future__ import print_function
-
-import sys
-
-from oslo_config import cfg
-
-from neutron.common import config
-from neutron.plugins.vmware.common import config as nsx_config  # noqa
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware import nsxlib
-
-config.setup_logging()
-
-
-def help(name):
-    print("Usage: %s path/to/neutron/plugin/ini/config/file" % name)
-    sys.exit(1)
-
-
-def get_nsx_controllers(cluster):
-    return cluster.nsx_controllers
-
-
-def config_helper(config_entity, cluster):
-    try:
-        return nsxlib.do_request('GET',
-                                 "/ws.v1/%s?fields=uuid" % config_entity,
-                                 cluster=cluster).get('results', [])
-    except Exception as e:
-        msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
-               % {'err': e,
-                  'ctl': ', '.join(get_nsx_controllers(cluster))})
-        raise Exception(msg)
-
-
-def get_control_cluster_nodes(cluster):
-    return config_helper("control-cluster/node", cluster)
-
-
-def get_gateway_services(cluster):
-    ret_gw_services = {"L2GatewayServiceConfig": [],
-                       "L3GatewayServiceConfig": []}
-    gw_services = config_helper("gateway-service", cluster)
-    for gw_service in gw_services:
-        ret_gw_services[gw_service['type']].append(gw_service['uuid'])
-    return ret_gw_services
-
-
-def get_transport_zones(cluster):
-    transport_zones = config_helper("transport-zone", cluster)
-    return [transport_zone['uuid'] for transport_zone in transport_zones]
-
-
-def get_transport_nodes(cluster):
-    transport_nodes = config_helper("transport-node", cluster)
-    return [transport_node['uuid'] for transport_node in transport_nodes]
-
-
-def is_transport_node_connected(cluster, node_uuid):
-    try:
-        return nsxlib.do_request('GET',
-                                 "/ws.v1/transport-node/%s/status" % node_uuid,
-                                 cluster=cluster)['connection']['connected']
-    except Exception as e:
-        msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
-               % {'err': e,
-                  'ctl': ', '.join(get_nsx_controllers(cluster))})
-        raise Exception(msg)
-
-
-def main():
-    if len(sys.argv) != 2:
-        help(sys.argv[0])
-    args = ['--config-file']
-    args.append(sys.argv[1])
-    config.init(args)
-    print("----------------------- Database Options -----------------------")
-    print("\tconnection: %s" % cfg.CONF.database.connection)
-    print("\tretry_interval: %d" % cfg.CONF.database.retry_interval)
-    print("\tmax_retries: %d" % cfg.CONF.database.max_retries)
-    print("-----------------------    NSX Options   -----------------------")
-    print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout)
-    print("\tNumber of concurrent connections to each controller %d" %
-          cfg.CONF.NSX.concurrent_connections)
-    print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls)
-    print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls)
-    print("-----------------------  Cluster Options -----------------------")
-    print("\tretries: %s" % cfg.CONF.retries)
-    print("\tredirects: %s" % cfg.CONF.redirects)
-    print("\thttp_timeout: %s" % cfg.CONF.http_timeout)
-    cluster = nsx_utils.create_nsx_cluster(
-        cfg.CONF,
-        cfg.CONF.NSX.concurrent_connections,
-        cfg.CONF.NSX.nsx_gen_timeout)
-    nsx_controllers = get_nsx_controllers(cluster)
-    num_controllers = len(nsx_controllers)
-    print("Number of controllers found: %s" % num_controllers)
-    if num_controllers == 0:
-        print("You must specify at least one controller!")
-        sys.exit(1)
-
-    get_control_cluster_nodes(cluster)
-    for controller in nsx_controllers:
-        print("\tController endpoint: %s" % controller)
-        gateway_services = get_gateway_services(cluster)
-        default_gateways = {
-            "L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid,
-            "L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid}
-        errors = 0
-        for svc_type in default_gateways.keys():
-            for uuid in gateway_services[svc_type]:
-                print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid))
-            if (default_gateways[svc_type] and
-                default_gateways[svc_type] not in gateway_services[svc_type]):
-                print("\t\t\tError: specified default %s gateway (%s) is "
-                      "missing from NSX Gateway Services!" % (
-                          svc_type,
-                          default_gateways[svc_type]))
-                errors += 1
-        transport_zones = get_transport_zones(cluster)
-        print("\tTransport zones: %s" % transport_zones)
-        if cfg.CONF.default_tz_uuid not in transport_zones:
-            print("\t\tError: specified default transport zone "
-                  "(%s) is missing from NSX transport zones!"
-                  % cfg.CONF.default_tz_uuid)
-            errors += 1
-        transport_nodes = get_transport_nodes(cluster)
-        print("\tTransport nodes: %s" % transport_nodes)
-        node_errors = []
-        for node in transport_nodes:
-            if not is_transport_node_connected(cluster, node):
-                node_errors.append(node)
-
-    # Use different exit codes, so that we can distinguish
-    # between config and runtime errors
-    if len(node_errors):
-        print("\nThere are one or mode transport nodes that are "
-              "not connected: %s. Please, revise!" % node_errors)
-        sys.exit(10)
-    elif errors:
-        print("\nThere are %d errors with your configuration. "
-              "Please, revise!" % errors)
-        sys.exit(12)
-    else:
-        print("Done.")
diff --git a/neutron/plugins/vmware/common/config.py b/neutron/plugins/vmware/common/config.py
deleted file mode 100644 (file)
index 89b5eeb..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-
-
-class AgentModes(object):
-    AGENT = 'agent'
-    AGENTLESS = 'agentless'
-    COMBINED = 'combined'
-
-
-class MetadataModes(object):
-    DIRECT = 'access_network'
-    INDIRECT = 'dhcp_host_route'
-
-
-class ReplicationModes(object):
-    SERVICE = 'service'
-    SOURCE = 'source'
-
-
-base_opts = [
-    cfg.IntOpt('max_lp_per_bridged_ls', default=5000,
-               deprecated_group='NVP',
-               help=_("Maximum number of ports of a logical switch on a "
-                      "bridged transport zone (default 5000)")),
-    cfg.IntOpt('max_lp_per_overlay_ls', default=256,
-               deprecated_group='NVP',
-               help=_("Maximum number of ports of a logical switch on an "
-                      "overlay transport zone (default 256)")),
-    cfg.IntOpt('concurrent_connections', default=10,
-               deprecated_group='NVP',
-               help=_("Maximum concurrent connections to each NSX "
-                      "controller.")),
-    cfg.IntOpt('nsx_gen_timeout', default=-1,
-               deprecated_name='nvp_gen_timeout',
-               deprecated_group='NVP',
-               help=_("Number of seconds a generation id should be valid for "
-                      "(default -1 meaning do not time out)")),
-    cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT,
-               deprecated_group='NVP',
-               help=_("If set to access_network this enables a dedicated "
-                      "connection to the metadata proxy for metadata server "
-                      "access via Neutron router. If set to dhcp_host_route "
-                      "this enables host route injection via the dhcp agent. "
-                      "This option is only useful if running on a host that "
-                      "does not support namespaces otherwise access_network "
-                      "should be used.")),
-    cfg.StrOpt('default_transport_type', default='stt',
-               choices=('stt', 'gre', 'bridge', 'ipsec_gre', 'ipsec_stt'),
-               deprecated_group='NVP',
-               help=_("The default network transport type to use")),
-    cfg.StrOpt('agent_mode', default=AgentModes.AGENT,
-               deprecated_group='NVP',
-               help=_("The mode used to implement DHCP/metadata services.")),
-    cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE,
-               help=_("The default option leverages service nodes to perform"
-                      " packet replication though one could set to this to "
-                      "'source' to perform replication locally. This is useful"
-                      " if one does not want to deploy a service node(s). "
-                      "It must be set to 'service' for leveraging distributed "
-                      "routers."))
-]
-
-sync_opts = [
-    cfg.IntOpt('state_sync_interval', default=10,
-               deprecated_group='NVP_SYNC',
-               help=_("Interval in seconds between runs of the state "
-                      "synchronization task. Set it to 0 to disable it")),
-    cfg.IntOpt('max_random_sync_delay', default=0,
-               deprecated_group='NVP_SYNC',
-               help=_("Maximum value for the additional random "
-                      "delay in seconds between runs of the state "
-                      "synchronization task")),
-    cfg.IntOpt('min_sync_req_delay', default=1,
-               deprecated_group='NVP_SYNC',
-               help=_('Minimum delay, in seconds, between two state '
-                      'synchronization queries to NSX. It must not '
-                      'exceed state_sync_interval')),
-    cfg.IntOpt('min_chunk_size', default=500,
-               deprecated_group='NVP_SYNC',
-               help=_('Minimum number of resources to be retrieved from NSX '
-                      'during state synchronization')),
-    cfg.BoolOpt('always_read_status', default=False,
-                deprecated_group='NVP_SYNC',
-                help=_('Always read operational status from backend on show '
-                       'operations. Enabling this option might slow down '
-                       'the system.'))
-]
-
-connection_opts = [
-    cfg.StrOpt('nsx_user',
-               default='admin',
-               deprecated_name='nvp_user',
-               help=_('User name for NSX controllers in this cluster')),
-    cfg.StrOpt('nsx_password',
-               default='admin',
-               deprecated_name='nvp_password',
-               secret=True,
-               help=_('Password for NSX controllers in this cluster')),
-    cfg.IntOpt('http_timeout',
-               default=75,
-               help=_('Time before aborting a request')),
-    cfg.IntOpt('retries',
-               default=2,
-               help=_('Number of time a request should be retried')),
-    cfg.IntOpt('redirects',
-               default=2,
-               help=_('Number of times a redirect should be followed')),
-    cfg.ListOpt('nsx_controllers',
-                deprecated_name='nvp_controllers',
-                help=_("Lists the NSX controllers in this cluster")),
-    cfg.IntOpt('conn_idle_timeout',
-               default=900,
-               help=_('Reconnect connection to nsx if not used within this '
-                      'amount of time.')),
-]
-
-cluster_opts = [
-    cfg.StrOpt('default_tz_uuid',
-               help=_("This is uuid of the default NSX Transport zone that "
-                      "will be used for creating tunneled isolated "
-                      "\"Neutron\" networks. It needs to be created in NSX "
-                      "before starting Neutron with the nsx plugin.")),
-    cfg.StrOpt('default_l3_gw_service_uuid',
-               help=_("Unique identifier of the NSX L3 Gateway service "
-                      "which will be used for implementing routers and "
-                      "floating IPs")),
-    cfg.StrOpt('default_l2_gw_service_uuid',
-               help=_("Unique identifier of the NSX L2 Gateway service "
-                      "which will be used by default for network gateways")),
-    cfg.StrOpt('default_service_cluster_uuid',
-               help=_("Unique identifier of the Service Cluster which will "
-                      "be used by logical services like dhcp and metadata")),
-    cfg.StrOpt('default_interface_name', default='breth0',
-               help=_("Name of the interface on a L2 Gateway transport node"
-                      "which should be used by default when setting up a "
-                      "network connection")),
-]
-
-DEFAULT_STATUS_CHECK_INTERVAL = 2000
-
-vcns_opts = [
-    cfg.StrOpt('user',
-               default='admin',
-               help=_('User name for vsm')),
-    cfg.StrOpt('password',
-               default='default',
-               secret=True,
-               help=_('Password for vsm')),
-    cfg.StrOpt('manager_uri',
-               help=_('uri for vsm')),
-    cfg.StrOpt('datacenter_moid',
-               help=_('Optional parameter identifying the ID of datacenter '
-                      'to deploy NSX Edges')),
-    cfg.StrOpt('deployment_container_id',
-               help=_('Optional parameter identifying the ID of datastore to '
-                      'deploy NSX Edges')),
-    cfg.StrOpt('resource_pool_id',
-               help=_('Optional parameter identifying the ID of resource to '
-                      'deploy NSX Edges')),
-    cfg.StrOpt('datastore_id',
-               help=_('Optional parameter identifying the ID of datastore to '
-                      'deploy NSX Edges')),
-    cfg.StrOpt('external_network',
-               help=_('Network ID for physical network connectivity')),
-    cfg.IntOpt('task_status_check_interval',
-               default=DEFAULT_STATUS_CHECK_INTERVAL,
-               help=_("Task status check interval"))
-]
-
-# Register the configuration options
-cfg.CONF.register_opts(connection_opts)
-cfg.CONF.register_opts(cluster_opts)
-cfg.CONF.register_opts(vcns_opts, group="vcns")
-cfg.CONF.register_opts(base_opts, group="NSX")
-cfg.CONF.register_opts(sync_opts, group="NSX_SYNC")
-
-
-def validate_config_options():
-    if cfg.CONF.NSX.replication_mode not in (ReplicationModes.SERVICE,
-                                             ReplicationModes.SOURCE):
-        error = (_("Invalid replication_mode: %s") %
-                 cfg.CONF.NSX.replication_mode)
-        raise nsx_exc.NsxPluginException(err_msg=error)
diff --git a/neutron/plugins/vmware/common/exceptions.py b/neutron/plugins/vmware/common/exceptions.py
deleted file mode 100644 (file)
index 94cfdb2..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2012 VMware, Inc
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions as n_exc
-
-
-class NsxPluginException(n_exc.NeutronException):
-    message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s")
-
-
-class InvalidVersion(NsxPluginException):
-    message = _("Unable to fulfill request with version %(version)s.")
-
-
-class InvalidConnection(NsxPluginException):
-    message = _("Invalid NSX connection parameters: %(conn_params)s")
-
-
-class InvalidClusterConfiguration(NsxPluginException):
-    message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure "
-                "that these values are specified in the [DEFAULT] "
-                "section of the NSX plugin ini file.")
-
-
-class InvalidNovaZone(NsxPluginException):
-    message = _("Unable to find cluster config entry "
-                "for nova zone: %(nova_zone)s")
-
-
-class NoMorePortsException(NsxPluginException):
-    message = _("Unable to create port on network %(network)s. "
-                "Maximum number of ports reached")
-
-
-class NatRuleMismatch(NsxPluginException):
-    message = _("While retrieving NAT rules, %(actual_rules)s were found "
-                "whereas rules in the (%(min_rules)s,%(max_rules)s) interval "
-                "were expected")
-
-
-class InvalidAttachmentType(NsxPluginException):
-    message = _("Invalid NSX attachment type '%(attachment_type)s'")
-
-
-class MaintenanceInProgress(NsxPluginException):
-    message = _("The networking backend is currently in maintenance mode and "
-                "therefore unable to accept requests which modify its state. "
-                "Please try later.")
-
-
-class L2GatewayAlreadyInUse(n_exc.Conflict):
-    message = _("Gateway Service %(gateway)s is already in use")
-
-
-class InvalidSecurityCertificate(NsxPluginException):
-    message = _("An invalid security certificate was specified for the "
-                "gateway device. Certificates must be enclosed between "
-                "'-----BEGIN CERTIFICATE-----' and "
-                "'-----END CERTIFICATE-----'")
-
-
-class ServiceOverQuota(n_exc.Conflict):
-    message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s")
-
-
-class ServiceClusterUnavailable(NsxPluginException):
-    message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
-                "check NSX setup and/or configuration")
-
-
-class PortConfigurationError(NsxPluginException):
-    message = _("An error occurred while connecting LSN %(lsn_id)s "
-                "and network %(net_id)s via port %(port_id)s")
-
-    def __init__(self, **kwargs):
-        super(PortConfigurationError, self).__init__(**kwargs)
-        self.port_id = kwargs.get('port_id')
-
-
-class LsnNotFound(n_exc.NotFound):
-    message = _('Unable to find LSN for %(entity)s %(entity_id)s')
-
-
-class LsnPortNotFound(n_exc.NotFound):
-    message = (_('Unable to find port for LSN %(lsn_id)s '
-                 'and %(entity)s %(entity_id)s'))
-
-
-class LsnMigrationConflict(n_exc.Conflict):
-    message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s")
-
-
-class LsnConfigurationConflict(NsxPluginException):
-    message = _("Configuration conflict on Logical Service Node %(lsn_id)s")
diff --git a/neutron/plugins/vmware/common/nsx_utils.py b/neutron/plugins/vmware/common/nsx_utils.py
deleted file mode 100644 (file)
index 754e75d..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2013 VMware Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as n_exc
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import providernet as pnet
-from neutron.i18n import _LW
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import client
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import utils as vmw_utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
-from neutron.plugins.vmware.dbexts import networkgw_db
-from neutron.plugins.vmware import nsx_cluster
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-
-LOG = log.getLogger(__name__)
-
-
-def fetch_nsx_switches(session, cluster, neutron_net_id):
-    """Retrieve logical switches for a neutron network.
-
-    This function is optimized for fetching all the lswitches always
-    with a single NSX query.
-    If there is more than 1 logical switch (chained switches use case)
-    NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX
-    lswitch is directly retrieved by id (more efficient).
-    """
-    nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
-    if len(nsx_switch_ids) > 1:
-        lswitches = switchlib.get_lswitches(cluster, neutron_net_id)
-    else:
-        lswitches = [switchlib.get_lswitch_by_id(
-            cluster, nsx_switch_ids[0])]
-    return lswitches
-
-
-def get_nsx_switch_ids(session, cluster, neutron_network_id):
-    """Return the NSX switch id for a given neutron network.
-
-    First lookup for mappings in Neutron database. If no mapping is
-    found, query the NSX backend and add the mappings.
-    """
-    nsx_switch_ids = nsx_db.get_nsx_switch_ids(
-        session, neutron_network_id)
-    if not nsx_switch_ids:
-        # Find logical switches from backend.
-        # This is a rather expensive query, but it won't be executed
-        # more than once for each network in Neutron's lifetime
-        nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
-        if not nsx_switches:
-            LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
-                     neutron_network_id)
-            return
-        nsx_switch_ids = []
-        with session.begin(subtransactions=True):
-            for nsx_switch in nsx_switches:
-                nsx_switch_id = nsx_switch['uuid']
-                nsx_switch_ids.append(nsx_switch_id)
-                # Create DB mapping
-                nsx_db.add_neutron_nsx_network_mapping(
-                    session,
-                    neutron_network_id,
-                    nsx_switch_id)
-    return nsx_switch_ids
-
-
-def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
-    """Return the NSX switch and port uuids for a given neutron port.
-
-    First, look up the Neutron database. If not found, execute
-    a query on NSX platform as the mapping might be missing because
-    the port was created before upgrading to grizzly.
-
-    This routine also retrieves the identifier of the logical switch in
-    the backend where the port is plugged. Prior to Icehouse this
-    information was not available in the Neutron Database. For dealing
-    with pre-existing records, this routine will query the backend
-    for retrieving the correct switch identifier.
-
-    As of Icehouse release it is not indeed anymore possible to assume
-    the backend logical switch identifier is equal to the neutron
-    network identifier.
-    """
-    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
-        session, neutron_port_id)
-    if not nsx_switch_id:
-        # Find logical switch for port from backend
-        # This is a rather expensive query, but it won't be executed
-        # more than once for each port in Neutron's lifetime
-        nsx_ports = switchlib.query_lswitch_lports(
-            cluster, '*', relations='LogicalSwitchConfig',
-            filters={'tag': neutron_port_id,
-                     'tag_scope': 'q_port_id'})
-        # Only one result expected
-        # NOTE(salv-orlando): Not handling the case where more than one
-        # port is found with the same neutron port tag
-        if not nsx_ports:
-            LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
-                     neutron_port_id)
-            # This method is supposed to return a tuple
-            return None, None
-        nsx_port = nsx_ports[0]
-        nsx_switch_id = (nsx_port['_relations']
-                         ['LogicalSwitchConfig']['uuid'])
-        if nsx_port_id:
-            # Mapping already exists. Delete before recreating
-            nsx_db.delete_neutron_nsx_port_mapping(
-                session, neutron_port_id)
-        else:
-            nsx_port_id = nsx_port['uuid']
-        # (re)Create DB mapping
-        nsx_db.add_neutron_nsx_port_mapping(
-            session, neutron_port_id,
-            nsx_switch_id, nsx_port_id)
-    return nsx_switch_id, nsx_port_id
-
-
-def get_nsx_security_group_id(session, cluster, neutron_id):
-    """Return the NSX sec profile uuid for a given neutron sec group.
-
-    First, look up the Neutron database. If not found, execute
-    a query on NSX platform as the mapping might be missing.
-    NOTE: Security groups are called 'security profiles' on the NSX backend.
-    """
-    nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
-    if not nsx_id:
-        # Find security profile on backend.
-        # This is a rather expensive query, but it won't be executed
-        # more than once for each security group in Neutron's lifetime
-        nsx_sec_profiles = secgrouplib.query_security_profiles(
-            cluster, '*',
-            filters={'tag': neutron_id,
-                     'tag_scope': 'q_sec_group_id'})
-        # Only one result expected
-        # NOTE(salv-orlando): Not handling the case where more than one
-        # security profile is found with the same neutron port tag
-        if not nsx_sec_profiles:
-            LOG.warn(_LW("Unable to find NSX security profile for Neutron "
-                         "security group %s"), neutron_id)
-            return
-        elif len(nsx_sec_profiles) > 1:
-            LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
-                         "security group %s"), neutron_id)
-        nsx_sec_profile = nsx_sec_profiles[0]
-        nsx_id = nsx_sec_profile['uuid']
-        with session.begin(subtransactions=True):
-            # Create DB mapping
-            nsx_db.add_neutron_nsx_security_group_mapping(
-                session, neutron_id, nsx_id)
-    return nsx_id
-
-
-def get_nsx_router_id(session, cluster, neutron_router_id):
-    """Return the NSX router uuid for a given neutron router.
-
-    First, look up the Neutron database. If not found, execute
-    a query on NSX platform as the mapping might be missing.
-    """
-    nsx_router_id = nsx_db.get_nsx_router_id(
-        session, neutron_router_id)
-    if not nsx_router_id:
-        # Find logical router from backend.
-        # This is a rather expensive query, but it won't be executed
-        # more than once for each router in Neutron's lifetime
-        nsx_routers = routerlib.query_lrouters(
-            cluster, '*',
-            filters={'tag': neutron_router_id,
-                     'tag_scope': 'q_router_id'})
-        # Only one result expected
-        # NOTE(salv-orlando): Not handling the case where more than one
-        # port is found with the same neutron port tag
-        if not nsx_routers:
-            LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
-                     neutron_router_id)
-            return
-        nsx_router = nsx_routers[0]
-        nsx_router_id = nsx_router['uuid']
-        with session.begin(subtransactions=True):
-            # Create DB mapping
-            nsx_db.add_neutron_nsx_router_mapping(
-                session,
-                neutron_router_id,
-                nsx_router_id)
-    return nsx_router_id
-
-
-def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout):
-    cluster = nsx_cluster.NSXCluster(**cluster_opts)
-
-    def _ctrl_split(x, y):
-        return (x, int(y), True)
-
-    api_providers = [_ctrl_split(*ctrl.split(':'))
-                     for ctrl in cluster.nsx_controllers]
-    cluster.api_client = client.NsxApiClient(
-        api_providers, cluster.nsx_user, cluster.nsx_password,
-        http_timeout=cluster.http_timeout,
-        retries=cluster.retries,
-        redirects=cluster.redirects,
-        concurrent_connections=concurrent_connections,
-        gen_timeout=gen_timeout)
-    return cluster
-
-
-def get_nsx_device_status(cluster, nsx_uuid):
-    try:
-        status_up = l2gwlib.get_gateway_device_status(
-            cluster, nsx_uuid)
-        if status_up:
-            return networkgw_db.STATUS_ACTIVE
-        else:
-            return networkgw_db.STATUS_DOWN
-    except api_exc.NsxApiException:
-        return networkgw_db.STATUS_UNKNOWN
-    except n_exc.NotFound:
-        return networkgw_db.ERROR
-
-
-def get_nsx_device_statuses(cluster, tenant_id):
-    try:
-        status_dict = l2gwlib.get_gateway_devices_status(
-            cluster, tenant_id)
-        return dict((nsx_device_id,
-                     networkgw_db.STATUS_ACTIVE if connected
-                     else networkgw_db.STATUS_DOWN) for
-                    (nsx_device_id, connected) in status_dict.iteritems())
-    except api_exc.NsxApiException:
-        # Do not make a NSX API exception fatal
-        if tenant_id:
-            LOG.warn(_LW("Unable to retrieve operational status for gateway "
-                         "devices belonging to tenant: %s"), tenant_id)
-        else:
-            LOG.warn(_LW("Unable to retrieve operational status for "
-                         "gateway devices"))
-
-
-def _convert_bindings_to_nsx_transport_zones(bindings):
-    nsx_transport_zones_config = []
-    for binding in bindings:
-        transport_entry = {}
-        if binding.binding_type in [vmw_utils.NetworkTypes.FLAT,
-                                    vmw_utils.NetworkTypes.VLAN]:
-            transport_entry['transport_type'] = (
-                vmw_utils.NetworkTypes.BRIDGE)
-            transport_entry['binding_config'] = {}
-            vlan_id = binding.vlan_id
-            if vlan_id:
-                transport_entry['binding_config'] = (
-                    {'vlan_translation': [{'transport': vlan_id}]})
-        else:
-            transport_entry['transport_type'] = binding.binding_type
-        transport_entry['zone_uuid'] = binding.phy_uuid
-        nsx_transport_zones_config.append(transport_entry)
-    return nsx_transport_zones_config
-
-
-def _convert_segments_to_nsx_transport_zones(segments, default_tz_uuid):
-    nsx_transport_zones_config = []
-    for transport_zone in segments:
-        for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                      pnet.SEGMENTATION_ID]:
-            if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED:
-                transport_zone[value] = None
-
-        transport_entry = {}
-        transport_type = transport_zone.get(pnet.NETWORK_TYPE)
-        if transport_type in [vmw_utils.NetworkTypes.FLAT,
-                              vmw_utils.NetworkTypes.VLAN]:
-            transport_entry['transport_type'] = (
-                vmw_utils.NetworkTypes.BRIDGE)
-            transport_entry['binding_config'] = {}
-            vlan_id = transport_zone.get(pnet.SEGMENTATION_ID)
-            if vlan_id:
-                transport_entry['binding_config'] = (
-                    {'vlan_translation': [{'transport': vlan_id}]})
-        else:
-            transport_entry['transport_type'] = transport_type
-        transport_entry['zone_uuid'] = (
-            transport_zone[pnet.PHYSICAL_NETWORK] or default_tz_uuid)
-        nsx_transport_zones_config.append(transport_entry)
-    return nsx_transport_zones_config
-
-
-def convert_to_nsx_transport_zones(
-    default_tz_uuid, network=None, bindings=None,
-    default_transport_type=None):
-
-    # Convert fields from provider request to nsx format
-    if (network and not attr.is_attr_set(
-        network.get(mpnet.SEGMENTS))):
-        return [{"zone_uuid": default_tz_uuid,
-                 "transport_type": default_transport_type}]
-
-    # Convert fields from db to nsx format
-    if bindings:
-        return _convert_bindings_to_nsx_transport_zones(bindings)
-
-    # If we end up here we need to convert multiprovider segments into nsx
-    # transport zone configurations
-    return _convert_segments_to_nsx_transport_zones(
-        network.get(mpnet.SEGMENTS), default_tz_uuid)
diff --git a/neutron/plugins/vmware/common/securitygroups.py b/neutron/plugins/vmware/common/securitygroups.py
deleted file mode 100644 (file)
index db61b72..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.openstack.common import log
-from neutron.plugins.vmware.common import nsx_utils
-
-LOG = log.getLogger(__name__)
-# Protocol number look up for supported protocols
-protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17}
-
-
-def _convert_to_nsx_rule(session, cluster, rule, with_id=False):
-    """Converts a Neutron security group rule to the NSX format.
-
-    This routine also replaces Neutron IDs with NSX UUIDs.
-    """
-    nsx_rule = {}
-    params = ['remote_ip_prefix', 'protocol',
-              'remote_group_id', 'port_range_min',
-              'port_range_max', 'ethertype']
-    if with_id:
-        params.append('id')
-
-    for param in params:
-        value = rule.get(param)
-        if param not in rule:
-            nsx_rule[param] = value
-        elif not value:
-            pass
-        elif param == 'remote_ip_prefix':
-            nsx_rule['ip_prefix'] = rule['remote_ip_prefix']
-        elif param == 'remote_group_id':
-            nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id(
-                session, cluster, rule['remote_group_id'])
-
-        elif param == 'protocol':
-            try:
-                nsx_rule['protocol'] = int(rule['protocol'])
-            except (ValueError, TypeError):
-                nsx_rule['protocol'] = (
-                    protocol_num_look_up[rule['protocol']])
-        else:
-            nsx_rule[param] = value
-    return nsx_rule
-
-
-def _convert_to_nsx_rules(session, cluster, rules, with_id=False):
-    """Converts a list of Neutron security group rules to the NSX format."""
-    nsx_rules = {'logical_port_ingress_rules': [],
-                 'logical_port_egress_rules': []}
-    for direction in ['logical_port_ingress_rules',
-                      'logical_port_egress_rules']:
-        for rule in rules[direction]:
-            nsx_rules[direction].append(
-                _convert_to_nsx_rule(session, cluster, rule, with_id))
-    return nsx_rules
-
-
-def get_security_group_rules_nsx_format(session, cluster,
-                                        security_group_rules, with_id=False):
-    """Convert neutron security group rules into NSX format.
-
-    This routine splits Neutron security group rules into two lists, one
-    for ingress rules and the other for egress rules.
-    """
-
-    def fields(rule):
-        _fields = ['remote_ip_prefix', 'remote_group_id', 'protocol',
-                   'port_range_min', 'port_range_max', 'protocol', 'ethertype']
-        if with_id:
-            _fields.append('id')
-        return dict((k, v) for k, v in rule.iteritems() if k in _fields)
-
-    ingress_rules = []
-    egress_rules = []
-    for rule in security_group_rules:
-        if rule.get('souce_group_id'):
-            rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id(
-                session, cluster, rule['remote_group_id'])
-
-        if rule['direction'] == 'ingress':
-            ingress_rules.append(fields(rule))
-        elif rule['direction'] == 'egress':
-            egress_rules.append(fields(rule))
-    rules = {'logical_port_ingress_rules': egress_rules,
-             'logical_port_egress_rules': ingress_rules}
-    return _convert_to_nsx_rules(session, cluster, rules, with_id)
-
-
-def merge_security_group_rules_with_current(session, cluster,
-                                            new_rules, current_rules):
-    merged_rules = get_security_group_rules_nsx_format(
-        session, cluster, current_rules)
-    for new_rule in new_rules:
-        rule = new_rule['security_group_rule']
-        if rule['direction'] == 'ingress':
-            merged_rules['logical_port_egress_rules'].append(
-                _convert_to_nsx_rule(session, cluster, rule))
-        elif rule['direction'] == 'egress':
-            merged_rules['logical_port_ingress_rules'].append(
-                _convert_to_nsx_rule(session, cluster, rule))
-    return merged_rules
-
-
-def remove_security_group_with_id_and_id_field(rules, rule_id):
-    """Remove rule by rule_id.
-
-    This function receives all of the current rule associated with a
-    security group and then removes the rule that matches the rule_id. In
-    addition it removes the id field in the dict with each rule since that
-    should not be passed to nsx.
-    """
-    for rule_direction in rules.values():
-        item_to_remove = None
-        for port_rule in rule_direction:
-            if port_rule['id'] == rule_id:
-                item_to_remove = port_rule
-            else:
-                # remove key from dictionary for NSX
-                del port_rule['id']
-        if item_to_remove:
-            rule_direction.remove(item_to_remove)
diff --git a/neutron/plugins/vmware/common/sync.py b/neutron/plugins/vmware/common/sync.py
deleted file mode 100644 (file)
index e60ddd4..0000000
+++ /dev/null
@@ -1,676 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import random
-
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron import context
-from neutron.db import external_net_db
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.extensions import l3
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log
-from neutron.openstack.common import loopingcall
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-
-# Maximum page size for a single request
-# NOTE(salv-orlando): This might become a version-dependent map should the
-# limit be raised in future versions
-MAX_PAGE_SIZE = 5000
-
-LOG = log.getLogger(__name__)
-
-
-class NsxCache(object):
-    """A simple Cache for NSX resources.
-
-    Associates resource id with resource hash to rapidly identify
-    updated resources.
-    Each entry in the cache also stores the following information:
-    - changed: the resource in the cache has been altered following
-      an update or a delete
-    - hit: the resource has been visited during an update (and possibly
-      left unchanged)
-    - data: current resource data
-    - data_bk: backup of resource data prior to its removal
-    """
-
-    def __init__(self):
-        # Maps a uuid to the dict containing it
-        self._uuid_dict_mappings = {}
-        # Dicts for NSX cached resources
-        self._lswitches = {}
-        self._lswitchports = {}
-        self._lrouters = {}
-
-    def __getitem__(self, key):
-        # uuids are unique across the various types of resources
-        # TODO(salv-orlando): Avoid lookups over all dictionaries
-        # when retrieving items
-        # Fetch lswitches, lports, or lrouters
-        resources = self._uuid_dict_mappings[key]
-        return resources[key]
-
-    def _clear_changed_flag_and_remove_from_cache(self, resources):
-        # Clear the 'changed' attribute for all items
-        for uuid, item in resources.items():
-            if item.pop('changed', None) and not item.get('data'):
-                # The item is not anymore in NSX, so delete it
-                del resources[uuid]
-                del self._uuid_dict_mappings[uuid]
-                LOG.debug("Removed item %s from NSX object cache", uuid)
-
-    def _update_resources(self, resources, new_resources, clear_changed=True):
-        if clear_changed:
-            self._clear_changed_flag_and_remove_from_cache(resources)
-
-        def do_hash(item):
-            return hash(jsonutils.dumps(item))
-
-        # Parse new data and identify new, deleted, and updated resources
-        for item in new_resources:
-            item_id = item['uuid']
-            if resources.get(item_id):
-                new_hash = do_hash(item)
-                if new_hash != resources[item_id]['hash']:
-                    resources[item_id]['hash'] = new_hash
-                    resources[item_id]['changed'] = True
-                    resources[item_id]['data_bk'] = (
-                        resources[item_id]['data'])
-                    resources[item_id]['data'] = item
-                # Mark the item as hit in any case
-                resources[item_id]['hit'] = True
-                LOG.debug("Updating item %s in NSX object cache", item_id)
-            else:
-                resources[item_id] = {'hash': do_hash(item)}
-                resources[item_id]['hit'] = True
-                resources[item_id]['changed'] = True
-                resources[item_id]['data'] = item
-                # add a uuid to dict mapping for easy retrieval
-                # with __getitem__
-                self._uuid_dict_mappings[item_id] = resources
-                LOG.debug("Added item %s to NSX object cache", item_id)
-
-    def _delete_resources(self, resources):
-        # Mark for removal all the elements which have not been visited.
-        # And clear the 'hit' attribute.
-        for to_delete in [k for (k, v) in resources.iteritems()
-                          if not v.pop('hit', False)]:
-            resources[to_delete]['changed'] = True
-            resources[to_delete]['data_bk'] = (
-                resources[to_delete].pop('data', None))
-
-    def _get_resource_ids(self, resources, changed_only):
-        if changed_only:
-            return [k for (k, v) in resources.iteritems()
-                    if v.get('changed')]
-        return resources.keys()
-
-    def get_lswitches(self, changed_only=False):
-        return self._get_resource_ids(self._lswitches, changed_only)
-
-    def get_lrouters(self, changed_only=False):
-        return self._get_resource_ids(self._lrouters, changed_only)
-
-    def get_lswitchports(self, changed_only=False):
-        return self._get_resource_ids(self._lswitchports, changed_only)
-
-    def update_lswitch(self, lswitch):
-        self._update_resources(self._lswitches, [lswitch], clear_changed=False)
-
-    def update_lrouter(self, lrouter):
-        self._update_resources(self._lrouters, [lrouter], clear_changed=False)
-
-    def update_lswitchport(self, lswitchport):
-        self._update_resources(self._lswitchports, [lswitchport],
-                               clear_changed=False)
-
-    def process_updates(self, lswitches=None,
-                        lrouters=None, lswitchports=None):
-        self._update_resources(self._lswitches, lswitches)
-        self._update_resources(self._lrouters, lrouters)
-        self._update_resources(self._lswitchports, lswitchports)
-        return (self._get_resource_ids(self._lswitches, changed_only=True),
-                self._get_resource_ids(self._lrouters, changed_only=True),
-                self._get_resource_ids(self._lswitchports, changed_only=True))
-
-    def process_deletes(self):
-        self._delete_resources(self._lswitches)
-        self._delete_resources(self._lrouters)
-        self._delete_resources(self._lswitchports)
-        return (self._get_resource_ids(self._lswitches, changed_only=True),
-                self._get_resource_ids(self._lrouters, changed_only=True),
-                self._get_resource_ids(self._lswitchports, changed_only=True))
-
-
-class SyncParameters(object):
-    """Defines attributes used by the synchronization procedure.
-
-    chunk_size: Actual chunk size
-    extra_chunk_size: Additional data to fetch because of chunk size
-                      adjustment
-    current_chunk: Counter of the current data chunk being synchronized
-    Page cursors: markers for the next resource to fetch.
-                 'start' means page cursor unset for fetching 1st page
-    init_sync_performed: True if the initial synchronization concluded
-    """
-
-    def __init__(self, min_chunk_size):
-        self.chunk_size = min_chunk_size
-        self.extra_chunk_size = 0
-        self.current_chunk = 0
-        self.ls_cursor = 'start'
-        self.lr_cursor = 'start'
-        self.lp_cursor = 'start'
-        self.init_sync_performed = False
-        self.total_size = 0
-
-
-def _start_loopingcall(min_chunk_size, state_sync_interval, func):
-    """Start a loopingcall for the synchronization task."""
-    # Start a looping call to synchronize operational status
-    # for neutron resources
-    if not state_sync_interval:
-        # do not start the looping call if specified
-        # sync interval is 0
-        return
-    state_synchronizer = loopingcall.DynamicLoopingCall(
-        func, sp=SyncParameters(min_chunk_size))
-    state_synchronizer.start(
-        periodic_interval_max=state_sync_interval)
-    return state_synchronizer
-
-
-class NsxSynchronizer(object):
-
-    LS_URI = nsxlib._build_uri_path(
-        switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status',
-        relations='LogicalSwitchStatus')
-    LR_URI = nsxlib._build_uri_path(
-        routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status',
-        relations='LogicalRouterStatus')
-    LP_URI = nsxlib._build_uri_path(
-        switchlib.LSWITCHPORT_RESOURCE,
-        parent_resource_id='*',
-        fields='uuid,tags,fabric_status_up',
-        relations='LogicalPortStatus')
-
-    def __init__(self, plugin, cluster, state_sync_interval,
-                 req_delay, min_chunk_size, max_rand_delay=0):
-        random.seed()
-        self._nsx_cache = NsxCache()
-        # Store parameters as instance members
-        # NOTE(salv-orlando): apologies if it looks java-ish
-        self._plugin = plugin
-        self._cluster = cluster
-        self._req_delay = req_delay
-        self._sync_interval = state_sync_interval
-        self._max_rand_delay = max_rand_delay
-        # Validate parameters
-        if self._sync_interval < self._req_delay:
-            err_msg = (_("Minimum request delay:%(req_delay)s must not "
-                         "exceed synchronization interval:%(sync_interval)s") %
-                       {'req_delay': self._req_delay,
-                        'sync_interval': self._sync_interval})
-            LOG.error(err_msg)
-            raise nsx_exc.NsxPluginException(err_msg=err_msg)
-        # Backoff time in case of failures while fetching sync data
-        self._sync_backoff = 1
-        # Store the looping call in an instance variable to allow unit tests
-        # for controlling its lifecycle
-        self._sync_looping_call = _start_loopingcall(
-            min_chunk_size, state_sync_interval, self._synchronize_state)
-
-    def _get_tag_dict(self, tags):
-        return dict((tag.get('scope'), tag['tag']) for tag in tags)
-
-    def synchronize_network(self, context, neutron_network_data,
-                            lswitches=None):
-        """Synchronize a Neutron network with its NSX counterpart.
-
-        This routine synchronizes a set of switches when a Neutron
-        network is mapped to multiple lswitches.
-        """
-        if not lswitches:
-            # Try to get logical switches from nsx
-            try:
-                lswitches = nsx_utils.fetch_nsx_switches(
-                    context.session, self._cluster,
-                    neutron_network_data['id'])
-            except exceptions.NetworkNotFound:
-                # TODO(salv-orlando): We should be catching
-                # api_exc.ResourceNotFound here
-                # The logical switch was not found
-                LOG.warning(_LW("Logical switch for neutron network %s not "
-                                "found on NSX."), neutron_network_data['id'])
-                lswitches = []
-            else:
-                for lswitch in lswitches:
-                    self._nsx_cache.update_lswitch(lswitch)
-        # By default assume things go wrong
-        status = constants.NET_STATUS_ERROR
-        # In most cases lswitches will contain a single element
-        for ls in lswitches:
-            if not ls:
-                # Logical switch was deleted
-                break
-            ls_status = ls['_relations']['LogicalSwitchStatus']
-            if not ls_status['fabric_status']:
-                status = constants.NET_STATUS_DOWN
-                break
-        else:
-            # No switch was down or missing. Set status to ACTIVE unless
-            # there were no switches in the first place!
-            if lswitches:
-                status = constants.NET_STATUS_ACTIVE
-        # Update db object
-        if status == neutron_network_data['status']:
-            # do nothing
-            return
-
-        with context.session.begin(subtransactions=True):
-            try:
-                network = self._plugin._get_network(context,
-                                                    neutron_network_data['id'])
-            except exceptions.NetworkNotFound:
-                pass
-            else:
-                network.status = status
-                LOG.debug("Updating status for neutron resource %(q_id)s to:"
-                          " %(status)s",
-                          {'q_id': neutron_network_data['id'],
-                           'status': status})
-
-    def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False):
-        if not ls_uuids and not scan_missing:
-            return
-        neutron_net_ids = set()
-        neutron_nsx_mappings = {}
-        # TODO(salvatore-orlando): Deal with the case the tag
-        # has been tampered with
-        for ls_uuid in ls_uuids:
-            # If the lswitch has been deleted, get backup copy of data
-            lswitch = (self._nsx_cache[ls_uuid].get('data') or
-                       self._nsx_cache[ls_uuid].get('data_bk'))
-            tags = self._get_tag_dict(lswitch['tags'])
-            neutron_id = tags.get('quantum_net_id')
-            neutron_net_ids.add(neutron_id)
-            neutron_nsx_mappings[neutron_id] = (
-                neutron_nsx_mappings.get(neutron_id, []) +
-                [self._nsx_cache[ls_uuid]])
-        # Fetch neutron networks from database
-        filters = {'router:external': [False]}
-        if not scan_missing:
-            filters['id'] = neutron_net_ids
-
-        networks = self._plugin._get_collection(
-            ctx, models_v2.Network, self._plugin._make_network_dict,
-            filters=filters)
-
-        for network in networks:
-            lswitches = neutron_nsx_mappings.get(network['id'], [])
-            lswitches = [lsw.get('data') for lsw in lswitches]
-            self.synchronize_network(ctx, network, lswitches)
-
-    def synchronize_router(self, context, neutron_router_data,
-                           lrouter=None):
-        """Synchronize a neutron router with its NSX counterpart."""
-        if not lrouter:
-            # Try to get router from nsx
-            try:
-                # This query will return the logical router status too
-                nsx_router_id = nsx_utils.get_nsx_router_id(
-                    context.session, self._cluster, neutron_router_data['id'])
-                if nsx_router_id:
-                    lrouter = routerlib.get_lrouter(
-                        self._cluster, nsx_router_id)
-            except exceptions.NotFound:
-                # NOTE(salv-orlando): We should be catching
-                # api_exc.ResourceNotFound here
-                # The logical router was not found
-                LOG.warning(_LW("Logical router for neutron router %s not "
-                                "found on NSX."), neutron_router_data['id'])
-            if lrouter:
-                # Update the cache
-                self._nsx_cache.update_lrouter(lrouter)
-
-        # Note(salv-orlando): It might worth adding a check to verify neutron
-        # resource tag in nsx entity matches a Neutron id.
-        # By default assume things go wrong
-        status = constants.NET_STATUS_ERROR
-        if lrouter:
-            lr_status = (lrouter['_relations']
-                         ['LogicalRouterStatus']
-                         ['fabric_status'])
-            status = (lr_status and
-                      constants.NET_STATUS_ACTIVE
-                      or constants.NET_STATUS_DOWN)
-        # Update db object
-        if status == neutron_router_data['status']:
-            # do nothing
-            return
-
-        with context.session.begin(subtransactions=True):
-            try:
-                router = self._plugin._get_router(context,
-                                                  neutron_router_data['id'])
-            except l3.RouterNotFound:
-                pass
-            else:
-                router.status = status
-                LOG.debug("Updating status for neutron resource %(q_id)s to:"
-                          " %(status)s",
-                          {'q_id': neutron_router_data['id'],
-                           'status': status})
-
-    def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False):
-        if not lr_uuids and not scan_missing:
-            return
-        # TODO(salvatore-orlando): Deal with the case the tag
-        # has been tampered with
-        neutron_router_mappings = {}
-        for lr_uuid in lr_uuids:
-            lrouter = (self._nsx_cache[lr_uuid].get('data') or
-                       self._nsx_cache[lr_uuid].get('data_bk'))
-            tags = self._get_tag_dict(lrouter['tags'])
-            neutron_router_id = tags.get('q_router_id')
-            if neutron_router_id:
-                neutron_router_mappings[neutron_router_id] = (
-                    self._nsx_cache[lr_uuid])
-            else:
-                LOG.warn(_LW("Unable to find Neutron router id for "
-                             "NSX logical router: %s"), lr_uuid)
-        # Fetch neutron routers from database
-        filters = ({} if scan_missing else
-                   {'id': neutron_router_mappings.keys()})
-        routers = self._plugin._get_collection(
-            ctx, l3_db.Router, self._plugin._make_router_dict,
-            filters=filters)
-        for router in routers:
-            lrouter = neutron_router_mappings.get(router['id'])
-            self.synchronize_router(
-                ctx, router, lrouter and lrouter.get('data'))
-
-    def synchronize_port(self, context, neutron_port_data,
-                         lswitchport=None, ext_networks=None):
-        """Synchronize a Neutron port with its NSX counterpart."""
-        # Skip synchronization for ports on external networks
-        if not ext_networks:
-            ext_networks = [net['id'] for net in context.session.query(
-                models_v2.Network).join(
-                    external_net_db.ExternalNetwork,
-                    (models_v2.Network.id ==
-                     external_net_db.ExternalNetwork.network_id))]
-        if neutron_port_data['network_id'] in ext_networks:
-            with context.session.begin(subtransactions=True):
-                neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE
-                return
-
-        if not lswitchport:
-            # Try to get port from nsx
-            try:
-                ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
-                    context.session, self._cluster, neutron_port_data['id'])
-                if lp_uuid:
-                    lswitchport = switchlib.get_port(
-                        self._cluster, ls_uuid, lp_uuid,
-                        relations='LogicalPortStatus')
-            except (exceptions.PortNotFoundOnNetwork):
-                # NOTE(salv-orlando): We should be catching
-                # api_exc.ResourceNotFound here instead
-                # of PortNotFoundOnNetwork when the id exists but
-                # the logical switch port was not found
-                LOG.warning(_LW("Logical switch port for neutron port %s "
-                                "not found on NSX."), neutron_port_data['id'])
-                lswitchport = None
-            else:
-                # If lswitchport is not None, update the cache.
-                # It could be none if the port was deleted from the backend
-                if lswitchport:
-                    self._nsx_cache.update_lswitchport(lswitchport)
-        # Note(salv-orlando): It might worth adding a check to verify neutron
-        # resource tag in nsx entity matches Neutron id.
-        # By default assume things go wrong
-        status = constants.PORT_STATUS_ERROR
-        if lswitchport:
-            lp_status = (lswitchport['_relations']
-                         ['LogicalPortStatus']
-                         ['fabric_status_up'])
-            status = (lp_status and
-                      constants.PORT_STATUS_ACTIVE
-                      or constants.PORT_STATUS_DOWN)
-
-        # Update db object
-        if status == neutron_port_data['status']:
-            # do nothing
-            return
-
-        with context.session.begin(subtransactions=True):
-            try:
-                port = self._plugin._get_port(context,
-                                              neutron_port_data['id'])
-            except exceptions.PortNotFound:
-                pass
-            else:
-                port.status = status
-                LOG.debug("Updating status for neutron resource %(q_id)s to:"
-                          " %(status)s",
-                          {'q_id': neutron_port_data['id'],
-                           'status': status})
-
-    def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False):
-        if not lp_uuids and not scan_missing:
-            return
-        # Find Neutron port id by tag - the tag is already
-        # loaded in memory, no reason for doing a db query
-        # TODO(salvatore-orlando): Deal with the case the tag
-        # has been tampered with
-        neutron_port_mappings = {}
-        for lp_uuid in lp_uuids:
-            lport = (self._nsx_cache[lp_uuid].get('data') or
-                     self._nsx_cache[lp_uuid].get('data_bk'))
-            tags = self._get_tag_dict(lport['tags'])
-            neutron_port_id = tags.get('q_port_id')
-            if neutron_port_id:
-                neutron_port_mappings[neutron_port_id] = (
-                    self._nsx_cache[lp_uuid])
-        # Fetch neutron ports from database
-        # At the first sync we need to fetch all ports
-        filters = ({} if scan_missing else
-                   {'id': neutron_port_mappings.keys()})
-        # TODO(salv-orlando): Work out a solution for avoiding
-        # this query
-        ext_nets = [net['id'] for net in ctx.session.query(
-            models_v2.Network).join(
-                external_net_db.ExternalNetwork,
-                (models_v2.Network.id ==
-                 external_net_db.ExternalNetwork.network_id))]
-        ports = self._plugin._get_collection(
-            ctx, models_v2.Port, self._plugin._make_port_dict,
-            filters=filters)
-        for port in ports:
-            lswitchport = neutron_port_mappings.get(port['id'])
-            self.synchronize_port(
-                ctx, port, lswitchport and lswitchport.get('data'),
-                ext_networks=ext_nets)
-
-    def _get_chunk_size(self, sp):
-        # NOTE(salv-orlando): Try to use __future__ for this routine only?
-        ratio = ((float(sp.total_size) / float(sp.chunk_size)) /
-                 (float(self._sync_interval) / float(self._req_delay)))
-        new_size = max(1.0, ratio) * float(sp.chunk_size)
-        return int(new_size) + (new_size - int(new_size) > 0)
-
-    def _fetch_data(self, uri, cursor, page_size):
-        # If not cursor there is nothing to retrieve
-        if cursor:
-            if cursor == 'start':
-                cursor = None
-            # Chunk size tuning might, in some conditions, make it larger
-            # than 5,000, which is the maximum page size allowed by the NSX
-            # API. In this case the request should be split in multiple
-            # requests. This is not ideal, and therefore a log warning will
-            # be emitted.
-            num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
-            if num_requests > 1:
-                LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
-                             "It might be necessary to do %(num_requests)d "
-                             "round-trips to NSX for fetching data. Please "
-                             "tune sync parameters to ensure chunk size "
-                             "is less than %(max_page_size)d"),
-                         {'cur_chunk_size': page_size,
-                          'num_requests': num_requests,
-                          'max_page_size': MAX_PAGE_SIZE})
-            # Only the first request might return the total size,
-            # subsequent requests will definetely not
-            results, cursor, total_size = nsxlib.get_single_query_page(
-                uri, self._cluster, cursor,
-                min(page_size, MAX_PAGE_SIZE))
-            for _req in range(num_requests - 1):
-                # If no cursor is returned break the cycle as there is no
-                # actual need to perform multiple requests (all fetched)
-                # This happens when the overall size of resources exceeds
-                # the maximum page size, but the number for each single
-                # resource type is below this threshold
-                if not cursor:
-                    break
-                req_results, cursor = nsxlib.get_single_query_page(
-                    uri, self._cluster, cursor,
-                    min(page_size, MAX_PAGE_SIZE))[:2]
-                results.extend(req_results)
-            # reset cursor before returning if we queried just to
-            # know the number of entities
-            return results, cursor if page_size else 'start', total_size
-        return [], cursor, None
-
-    def _fetch_nsx_data_chunk(self, sp):
-        base_chunk_size = sp.chunk_size
-        chunk_size = base_chunk_size + sp.extra_chunk_size
-        LOG.info(_LI("Fetching up to %s resources "
-                     "from NSX backend"), chunk_size)
-        fetched = ls_count = lr_count = lp_count = 0
-        lswitches = lrouters = lswitchports = []
-        if sp.ls_cursor or sp.ls_cursor == 'start':
-            (lswitches, sp.ls_cursor, ls_count) = self._fetch_data(
-                self.LS_URI, sp.ls_cursor, chunk_size)
-            fetched = len(lswitches)
-        if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start':
-            (lrouters, sp.lr_cursor, lr_count) = self._fetch_data(
-                self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0))
-        fetched += len(lrouters)
-        if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start':
-            (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data(
-                self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0))
-        fetched += len(lswitchports)
-        if sp.current_chunk == 0:
-            # No cursors were provided. Then it must be possible to
-            # calculate the total amount of data to fetch
-            sp.total_size = ls_count + lr_count + lp_count
-        LOG.debug("Total data size: %d", sp.total_size)
-        sp.chunk_size = self._get_chunk_size(sp)
-        # Calculate chunk size adjustment
-        sp.extra_chunk_size = sp.chunk_size - base_chunk_size
-        LOG.debug("Fetched %(num_lswitches)d logical switches, "
-                  "%(num_lswitchports)d logical switch ports,"
-                  "%(num_lrouters)d logical routers",
-                  {'num_lswitches': len(lswitches),
-                   'num_lswitchports': len(lswitchports),
-                   'num_lrouters': len(lrouters)})
-        return (lswitches, lrouters, lswitchports)
-
-    def _synchronize_state(self, sp):
-        # If the plugin has been destroyed, stop the LoopingCall
-        if not self._plugin:
-            raise loopingcall.LoopingCallDone()
-        start = timeutils.utcnow()
-        # Reset page cursor variables if necessary
-        if sp.current_chunk == 0:
-            sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
-        LOG.info(_LI("Running state synchronization task. Chunk: %s"),
-                 sp.current_chunk)
-        # Fetch chunk_size data from NSX
-        try:
-            (lswitches, lrouters, lswitchports) = (
-                self._fetch_nsx_data_chunk(sp))
-        except (api_exc.RequestTimeout, api_exc.NsxApiException):
-            sleep_interval = self._sync_backoff
-            # Cap max back off to 64 seconds
-            self._sync_backoff = min(self._sync_backoff * 2, 64)
-            LOG.exception(_LE("An error occurred while communicating with "
-                              "NSX backend. Will retry synchronization "
-                              "in %d seconds"), sleep_interval)
-            return sleep_interval
-        LOG.debug("Time elapsed querying NSX: %s",
-                  timeutils.utcnow() - start)
-        if sp.total_size:
-            num_chunks = ((sp.total_size / sp.chunk_size) +
-                          (sp.total_size % sp.chunk_size != 0))
-        else:
-            num_chunks = 1
-        LOG.debug("Number of chunks: %d", num_chunks)
-        # Find objects which have changed on NSX side and need
-        # to be synchronized
-        LOG.debug("Processing NSX cache for updated objects")
-        (ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates(
-            lswitches, lrouters, lswitchports)
-        # Process removed objects only at the last chunk
-        scan_missing = (sp.current_chunk == num_chunks - 1 and
-                        not sp.init_sync_performed)
-        if sp.current_chunk == num_chunks - 1:
-            LOG.debug("Processing NSX cache for deleted objects")
-            self._nsx_cache.process_deletes()
-            ls_uuids = self._nsx_cache.get_lswitches(
-                changed_only=not scan_missing)
-            lr_uuids = self._nsx_cache.get_lrouters(
-                changed_only=not scan_missing)
-            lp_uuids = self._nsx_cache.get_lswitchports(
-                changed_only=not scan_missing)
-        LOG.debug("Time elapsed hashing data: %s",
-                  timeutils.utcnow() - start)
-        # Get an admin context
-        ctx = context.get_admin_context()
-        # Synchronize with database
-        self._synchronize_lswitches(ctx, ls_uuids,
-                                    scan_missing=scan_missing)
-        self._synchronize_lrouters(ctx, lr_uuids,
-                                   scan_missing=scan_missing)
-        self._synchronize_lswitchports(ctx, lp_uuids,
-                                       scan_missing=scan_missing)
-        # Increase chunk counter
-        LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
-                     "%(total_chunks)d performed"),
-                 {'chunk_num': sp.current_chunk + 1,
-                  'total_chunks': num_chunks})
-        sp.current_chunk = (sp.current_chunk + 1) % num_chunks
-        added_delay = 0
-        if sp.current_chunk == 0:
-            # Ensure init_sync_performed is True
-            if not sp.init_sync_performed:
-                sp.init_sync_performed = True
-            # Add additional random delay
-            added_delay = random.randint(0, self._max_rand_delay)
-        LOG.debug("Time elapsed at end of sync: %s",
-                  timeutils.utcnow() - start)
-        return self._sync_interval / num_chunks + added_delay
diff --git a/neutron/plugins/vmware/common/utils.py b/neutron/plugins/vmware/common/utils.py
deleted file mode 100644 (file)
index 24fc717..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import hashlib
-
-from neutron.api.v2 import attributes
-from neutron.openstack.common import log
-from neutron import version
-
-
-LOG = log.getLogger(__name__)
-MAX_DISPLAY_NAME_LEN = 40
-NEUTRON_VERSION = version.version_info.release_string()
-
-
-# Allowed network types for the NSX Plugin
-class NetworkTypes(object):
-    """Allowed provider network types for the NSX Plugin."""
-    L3_EXT = 'l3_ext'
-    STT = 'stt'
-    GRE = 'gre'
-    FLAT = 'flat'
-    VLAN = 'vlan'
-    BRIDGE = 'bridge'
-
-
-def get_tags(**kwargs):
-    tags = ([dict(tag=value, scope=key)
-            for key, value in kwargs.iteritems()])
-    tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"})
-    return sorted(tags)
-
-
-def device_id_to_vm_id(device_id, obfuscate=False):
-    # device_id can be longer than 40 characters, for example
-    # a device_id for a dhcp port is like the following:
-    #
-    # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c
-    #
-    # To fit it into an NSX tag we need to hash it, however device_id
-    # used for ports associated to VM's are small enough so let's skip the
-    # hashing
-    if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate:
-        return hashlib.sha1(device_id).hexdigest()
-    else:
-        return device_id
-
-
-def check_and_truncate(display_name):
-    if (attributes.is_attr_set(display_name) and
-            len(display_name) > MAX_DISPLAY_NAME_LEN):
-        LOG.debug("Specified name:'%s' exceeds maximum length. "
-                  "It will be truncated on NSX", display_name)
-        return display_name[:MAX_DISPLAY_NAME_LEN]
-    return display_name or ''
diff --git a/neutron/plugins/vmware/dbexts/db.py b/neutron/plugins/vmware/dbexts/db.py
deleted file mode 100644 (file)
index facc241..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_db import exception as db_exc
-from oslo_utils import excutils
-from sqlalchemy.orm import exc
-
-import neutron.db.api as db
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.dbexts import nsx_models
-
-LOG = logging.getLogger(__name__)
-
-
-def get_network_bindings(session, network_id):
-    session = session or db.get_session()
-    return (session.query(nsx_models.TzNetworkBinding).
-            filter_by(network_id=network_id).
-            all())
-
-
-def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id,
-                                                    phy_uuid):
-    session = session or db.get_session()
-    return (session.query(nsx_models.TzNetworkBinding).
-            filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid).
-            all())
-
-
-def delete_network_bindings(session, network_id):
-    return (session.query(nsx_models.TzNetworkBinding).
-            filter_by(network_id=network_id).delete())
-
-
-def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
-    with session.begin(subtransactions=True):
-        binding = nsx_models.TzNetworkBinding(network_id, binding_type,
-                                          phy_uuid, vlan_id)
-        session.add(binding)
-    return binding
-
-
-def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id):
-    with session.begin(subtransactions=True):
-        mapping = nsx_models.NeutronNsxNetworkMapping(
-            neutron_id=neutron_id, nsx_id=nsx_switch_id)
-        session.add(mapping)
-        return mapping
-
-
-def add_neutron_nsx_port_mapping(session, neutron_id,
-                                 nsx_switch_id, nsx_port_id):
-    session.begin(subtransactions=True)
-    try:
-        mapping = nsx_models.NeutronNsxPortMapping(
-            neutron_id, nsx_switch_id, nsx_port_id)
-        session.add(mapping)
-        session.commit()
-    except db_exc.DBDuplicateEntry:
-        with excutils.save_and_reraise_exception() as ctxt:
-            session.rollback()
-            # do not complain if the same exact mapping is being added,
-            # otherwise re-raise because even though it is possible for the
-            # same neutron port to map to different back-end ports over time,
-            # this should not occur whilst a mapping already exists
-            current = get_nsx_switch_and_port_id(session, neutron_id)
-            if current[1] == nsx_port_id:
-                LOG.debug("Port mapping for %s already available",
-                          neutron_id)
-                ctxt.reraise = False
-    except db_exc.DBError:
-        with excutils.save_and_reraise_exception():
-            # rollback for any other db error
-            session.rollback()
-    return mapping
-
-
-def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id):
-    with session.begin(subtransactions=True):
-        mapping = nsx_models.NeutronNsxRouterMapping(
-            neutron_id=neutron_id, nsx_id=nsx_router_id)
-        session.add(mapping)
-        return mapping
-
-
-def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id):
-    """Map a Neutron security group to a NSX security profile.
-
-    :param session: a valid database session object
-    :param neutron_id: a neutron security group identifier
-    :param nsx_id: a nsx security profile identifier
-    """
-    with session.begin(subtransactions=True):
-        mapping = nsx_models.NeutronNsxSecurityGroupMapping(
-            neutron_id=neutron_id, nsx_id=nsx_id)
-        session.add(mapping)
-        return mapping
-
-
-def get_nsx_switch_ids(session, neutron_id):
-    # This function returns a list of NSX switch identifiers because of
-    # the possibility of chained logical switches
-    return [mapping['nsx_id'] for mapping in
-            session.query(nsx_models.NeutronNsxNetworkMapping).filter_by(
-                neutron_id=neutron_id)]
-
-
-def get_nsx_switch_and_port_id(session, neutron_id):
-    try:
-        mapping = (session.query(nsx_models.NeutronNsxPortMapping).
-                   filter_by(neutron_id=neutron_id).
-                   one())
-        return mapping['nsx_switch_id'], mapping['nsx_port_id']
-    except exc.NoResultFound:
-        LOG.debug("NSX identifiers for neutron port %s not yet "
-                  "stored in Neutron DB", neutron_id)
-        return None, None
-
-
-def get_nsx_router_id(session, neutron_id):
-    try:
-        mapping = (session.query(nsx_models.NeutronNsxRouterMapping).
-                   filter_by(neutron_id=neutron_id).one())
-        return mapping['nsx_id']
-    except exc.NoResultFound:
-        LOG.debug("NSX identifiers for neutron router %s not yet "
-                  "stored in Neutron DB", neutron_id)
-
-
-def get_nsx_security_group_id(session, neutron_id):
-    """Return the id of a security group in the NSX backend.
-
-    Note: security groups are called 'security profiles' in NSX
-    """
-    try:
-        mapping = (session.query(nsx_models.NeutronNsxSecurityGroupMapping).
-                   filter_by(neutron_id=neutron_id).
-                   one())
-        return mapping['nsx_id']
-    except exc.NoResultFound:
-        LOG.debug("NSX identifiers for neutron security group %s not yet "
-                  "stored in Neutron DB", neutron_id)
-        return None
-
-
-def _delete_by_neutron_id(session, model, neutron_id):
-    return session.query(model).filter_by(neutron_id=neutron_id).delete()
-
-
-def delete_neutron_nsx_port_mapping(session, neutron_id):
-    return _delete_by_neutron_id(
-        session, nsx_models.NeutronNsxPortMapping, neutron_id)
-
-
-def delete_neutron_nsx_router_mapping(session, neutron_id):
-    return _delete_by_neutron_id(
-        session, nsx_models.NeutronNsxRouterMapping, neutron_id)
-
-
-def unset_default_network_gateways(session):
-    with session.begin(subtransactions=True):
-        session.query(nsx_models.NetworkGateway).update(
-            {nsx_models.NetworkGateway.default: False})
-
-
-def set_default_network_gateway(session, gw_id):
-    with session.begin(subtransactions=True):
-        gw = (session.query(nsx_models.NetworkGateway).
-              filter_by(id=gw_id).one())
-        gw['default'] = True
-
-
-def set_multiprovider_network(session, network_id):
-    with session.begin(subtransactions=True):
-        multiprovider_network = nsx_models.MultiProviderNetworks(
-            network_id)
-        session.add(multiprovider_network)
-        return multiprovider_network
-
-
-def is_multiprovider_network(session, network_id):
-    with session.begin(subtransactions=True):
-        return bool(
-            session.query(nsx_models.MultiProviderNetworks).filter_by(
-                network_id=network_id).first())
diff --git a/neutron/plugins/vmware/dbexts/lsn_db.py b/neutron/plugins/vmware/dbexts/lsn_db.py
deleted file mode 100644 (file)
index 82e93b3..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_db import exception as d_exc
-from sqlalchemy import orm
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dbexts import nsx_models
-
-
-LOG = logging.getLogger(__name__)
-
-
-def lsn_add(context, network_id, lsn_id):
-    """Add Logical Service Node information to persistent datastore."""
-    with context.session.begin(subtransactions=True):
-        lsn = nsx_models.Lsn(network_id, lsn_id)
-        context.session.add(lsn)
-
-
-def lsn_remove(context, lsn_id):
-    """Remove Logical Service Node information from datastore given its id."""
-    with context.session.begin(subtransactions=True):
-        context.session.query(nsx_models.Lsn).filter_by(lsn_id=lsn_id).delete()
-
-
-def lsn_remove_for_network(context, network_id):
-    """Remove information about the Logical Service Node given its network."""
-    with context.session.begin(subtransactions=True):
-        context.session.query(nsx_models.Lsn).filter_by(
-            net_id=network_id).delete()
-
-
-def lsn_get_for_network(context, network_id, raise_on_err=True):
-    """Retrieve LSN information given its network id."""
-    query = context.session.query(nsx_models.Lsn)
-    try:
-        return query.filter_by(net_id=network_id).one()
-    except (orm.exc.NoResultFound, d_exc.DBError):
-        msg = _('Unable to find Logical Service Node for network %s')
-        if raise_on_err:
-            LOG.error(msg, network_id)
-            raise p_exc.LsnNotFound(entity='network',
-                                    entity_id=network_id)
-        else:
-            LOG.warn(msg, network_id)
-
-
-def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):
-    """Add Logical Service Node Port information to persistent datastore."""
-    with context.session.begin(subtransactions=True):
-        lsn_port = nsx_models.LsnPort(lsn_port_id, subnet_id, mac, lsn_id)
-        context.session.add(lsn_port)
-
-
-def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True):
-    """Return Logical Service Node Port information given its subnet id."""
-    with context.session.begin(subtransactions=True):
-        try:
-            return (context.session.query(nsx_models.LsnPort).
-                    filter_by(sub_id=subnet_id).one())
-        except (orm.exc.NoResultFound, d_exc.DBError):
-            if raise_on_err:
-                raise p_exc.LsnPortNotFound(lsn_id=None,
-                                            entity='subnet',
-                                            entity_id=subnet_id)
-
-
-def lsn_port_get_for_mac(context, mac_address, raise_on_err=True):
-    """Return Logical Service Node Port information given its mac address."""
-    with context.session.begin(subtransactions=True):
-        try:
-            return (context.session.query(nsx_models.LsnPort).
-                    filter_by(mac_addr=mac_address).one())
-        except (orm.exc.NoResultFound, d_exc.DBError):
-            if raise_on_err:
-                raise p_exc.LsnPortNotFound(lsn_id=None,
-                                            entity='mac',
-                                            entity_id=mac_address)
-
-
-def lsn_port_remove(context, lsn_port_id):
-    """Remove Logical Service Node port from the given Logical Service Node."""
-    with context.session.begin(subtransactions=True):
-        (context.session.query(nsx_models.LsnPort).
-         filter_by(lsn_port_id=lsn_port_id).delete())
diff --git a/neutron/plugins/vmware/dbexts/maclearning.py b/neutron/plugins/vmware/dbexts/maclearning.py
deleted file mode 100644 (file)
index 6a85162..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from sqlalchemy.orm import exc
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware.extensions import maclearning as mac
-
-LOG = logging.getLogger(__name__)
-
-
-class MacLearningDbMixin(object):
-    """Mixin class for mac learning."""
-
-    def _make_mac_learning_state_dict(self, port, fields=None):
-        res = {'port_id': port['port_id'],
-               mac.MAC_LEARNING: port[mac.MAC_LEARNING]}
-        return self._fields(res, fields)
-
-    def _extend_port_mac_learning_state(self, port_res, port_db):
-        state = port_db.mac_learning_state
-        if state and state.mac_learning_enabled:
-            port_res[mac.MAC_LEARNING] = state.mac_learning_enabled
-
-    # Register dict extend functions for ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, ['_extend_port_mac_learning_state'])
-
-    def _update_mac_learning_state(self, context, port_id, enabled):
-        try:
-            query = self._model_query(context, nsx_models.MacLearningState)
-            state = query.filter(
-                nsx_models.MacLearningState.port_id == port_id).one()
-            state.update({mac.MAC_LEARNING: enabled})
-        except exc.NoResultFound:
-            self._create_mac_learning_state(context,
-                                            {'id': port_id,
-                                             mac.MAC_LEARNING: enabled})
-
-    def _create_mac_learning_state(self, context, port):
-        with context.session.begin(subtransactions=True):
-            enabled = port[mac.MAC_LEARNING]
-            state = nsx_models.MacLearningState(
-                port_id=port['id'],
-                mac_learning_enabled=enabled)
-            context.session.add(state)
-        return self._make_mac_learning_state_dict(state)
diff --git a/neutron/plugins/vmware/dbexts/networkgw_db.py b/neutron/plugins/vmware/dbexts/networkgw_db.py
deleted file mode 100644 (file)
index 8f23a9d..0000000
+++ /dev/null
@@ -1,461 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from sqlalchemy.orm import exc as sa_orm_exc
-
-from neutron.api.v2 import attributes
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware.extensions import networkgw
-
-
-LOG = logging.getLogger(__name__)
-DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
-NETWORK_ID = 'network_id'
-SEGMENTATION_TYPE = 'segmentation_type'
-SEGMENTATION_ID = 'segmentation_id'
-ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
-                                     SEGMENTATION_TYPE,
-                                     SEGMENTATION_ID))
-# Constants for gateway device operational status
-STATUS_UNKNOWN = "UNKNOWN"
-STATUS_ERROR = "ERROR"
-STATUS_ACTIVE = "ACTIVE"
-STATUS_DOWN = "DOWN"
-
-
-class GatewayInUse(exceptions.InUse):
-    message = _("Network Gateway '%(gateway_id)s' still has active mappings "
-                "with one or more neutron networks.")
-
-
-class GatewayNotFound(exceptions.NotFound):
-    message = _("Network Gateway %(gateway_id)s could not be found")
-
-
-class GatewayDeviceInUse(exceptions.InUse):
-    message = _("Network Gateway Device '%(device_id)s' is still used by "
-                "one or more network gateways.")
-
-
-class GatewayDeviceNotFound(exceptions.NotFound):
-    message = _("Network Gateway Device %(device_id)s could not be found.")
-
-
-class GatewayDevicesNotFound(exceptions.NotFound):
-    message = _("One or more Network Gateway Devices could not be found: "
-                "%(device_ids)s.")
-
-
-class NetworkGatewayPortInUse(exceptions.InUse):
-    message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
-                "therefore cannot be deleted directly via the port API.")
-
-
-class GatewayConnectionInUse(exceptions.InUse):
-    message = _("The specified mapping '%(mapping)s' is already in use on "
-                "network gateway '%(gateway_id)s'.")
-
-
-class MultipleGatewayConnections(exceptions.Conflict):
-    message = _("Multiple network connections found on '%(gateway_id)s' "
-                "with provided criteria.")
-
-
-class GatewayConnectionNotFound(exceptions.NotFound):
-    message = _("The connection %(network_mapping_info)s was not found on the "
-                "network gateway '%(network_gateway_id)s'")
-
-
-class NetworkGatewayUnchangeable(exceptions.InUse):
-    message = _("The network gateway %(gateway_id)s "
-                "cannot be updated or deleted")
-
-
-class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
-
-    gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
-    device_resource = networkgw.DEVICE_RESOURCE_NAME
-
-    def _get_network_gateway(self, context, gw_id):
-        try:
-            gw = self._get_by_id(context, nsx_models.NetworkGateway, gw_id)
-        except sa_orm_exc.NoResultFound:
-            raise GatewayNotFound(gateway_id=gw_id)
-        return gw
-
-    def _make_gw_connection_dict(self, gw_conn):
-        return {'port_id': gw_conn['port_id'],
-                'segmentation_type': gw_conn['segmentation_type'],
-                'segmentation_id': gw_conn['segmentation_id']}
-
-    def _make_network_gateway_dict(self, network_gateway, fields=None):
-        device_list = []
-        for d in network_gateway['devices']:
-            device_list.append({'id': d['id'],
-                                'interface_name': d['interface_name']})
-        res = {'id': network_gateway['id'],
-               'name': network_gateway['name'],
-               'default': network_gateway['default'],
-               'devices': device_list,
-               'tenant_id': network_gateway['tenant_id']}
-        # Query gateway connections only if needed
-        if not fields or 'ports' in fields:
-            res['ports'] = [self._make_gw_connection_dict(conn)
-                            for conn in network_gateway.network_connections]
-        return self._fields(res, fields)
-
-    def _set_mapping_info_defaults(self, mapping_info):
-        if not mapping_info.get('segmentation_type'):
-            mapping_info['segmentation_type'] = 'flat'
-        if not mapping_info.get('segmentation_id'):
-            mapping_info['segmentation_id'] = 0
-
-    def _validate_network_mapping_info(self, network_mapping_info):
-        self._set_mapping_info_defaults(network_mapping_info)
-        network_id = network_mapping_info.get(NETWORK_ID)
-        if not network_id:
-            raise exceptions.InvalidInput(
-                error_message=_("A network identifier must be specified "
-                                "when connecting a network to a network "
-                                "gateway. Unable to complete operation"))
-        connection_attrs = set(network_mapping_info.keys())
-        if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
-            raise exceptions.InvalidInput(
-                error_message=(_("Invalid keys found among the ones provided "
-                                 "in request body: %(connection_attrs)s."),
-                               connection_attrs))
-        seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
-        seg_id = network_mapping_info.get(SEGMENTATION_ID)
-        # The NSX plugin accepts 0 as a valid vlan tag
-        seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id)
-        if seg_type.lower() == 'flat' and seg_id:
-            msg = _("Cannot specify a segmentation id when "
-                    "the segmentation type is flat")
-            raise exceptions.InvalidInput(error_message=msg)
-        elif (seg_type.lower() == 'vlan' and not seg_id_valid):
-            msg = _("Invalid segmentation id (%d) for "
-                    "vlan segmentation type") % seg_id
-            raise exceptions.InvalidInput(error_message=msg)
-        return network_id
-
-    def _retrieve_gateway_connections(self, context, gateway_id,
-                                      mapping_info={}, only_one=False):
-        filters = {'network_gateway_id': [gateway_id]}
-        for k, v in mapping_info.iteritems():
-            if v and k != NETWORK_ID:
-                filters[k] = [v]
-        query = self._get_collection_query(context,
-                                           nsx_models.NetworkConnection,
-                                           filters)
-        return query.one() if only_one else query.all()
-
-    def _unset_default_network_gateways(self, context):
-        with context.session.begin(subtransactions=True):
-            context.session.query(nsx_models.NetworkGateway).update(
-                {nsx_models.NetworkGateway.default: False})
-
-    def _set_default_network_gateway(self, context, gw_id):
-        with context.session.begin(subtransactions=True):
-            gw = (context.session.query(nsx_models.NetworkGateway).
-                  filter_by(id=gw_id).one())
-            gw['default'] = True
-
-    def prevent_network_gateway_port_deletion(self, context, port):
-        """Pre-deletion check.
-
-        Ensures a port will not be deleted if is being used by a network
-        gateway. In that case an exception will be raised.
-        """
-        if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
-            raise NetworkGatewayPortInUse(port_id=port['id'],
-                                          device_owner=port['device_owner'])
-
-    def _validate_device_list(self, context, tenant_id, gateway_data):
-        device_query = self._query_gateway_devices(
-            context, filters={'id': [device['id']
-                                     for device in gateway_data['devices']]})
-        retrieved_device_ids = set()
-        for device in device_query:
-            retrieved_device_ids.add(device['id'])
-            if device['tenant_id'] != tenant_id:
-                raise GatewayDeviceNotFound(device_id=device['id'])
-        missing_device_ids = (
-            set(device['id'] for device in gateway_data['devices']) -
-            retrieved_device_ids)
-        if missing_device_ids:
-            raise GatewayDevicesNotFound(
-                device_ids=",".join(missing_device_ids))
-
-    def create_network_gateway(self, context, network_gateway,
-            validate_device_list=True):
-        gw_data = network_gateway[self.gateway_resource]
-        tenant_id = self._get_tenant_id_for_create(context, gw_data)
-        with context.session.begin(subtransactions=True):
-            gw_db = nsx_models.NetworkGateway(
-                id=gw_data.get('id', uuidutils.generate_uuid()),
-                tenant_id=tenant_id,
-                name=gw_data.get('name'))
-            # Device list is guaranteed to be a valid list, but some devices
-            # might still either not exist or belong to a different tenant
-            if validate_device_list:
-                self._validate_device_list(context, tenant_id, gw_data)
-            gw_db.devices.extend(
-                [nsx_models.NetworkGatewayDeviceReference(**device)
-                 for device in gw_data['devices']])
-            context.session.add(gw_db)
-        LOG.debug("Created network gateway with id:%s", gw_db['id'])
-        return self._make_network_gateway_dict(gw_db)
-
-    def update_network_gateway(self, context, id, network_gateway):
-        gw_data = network_gateway[self.gateway_resource]
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, id)
-            if gw_db.default:
-                raise NetworkGatewayUnchangeable(gateway_id=id)
-            # Ensure there is something to update before doing it
-            if any([gw_db[k] != gw_data[k] for k in gw_data]):
-                gw_db.update(gw_data)
-        LOG.debug("Updated network gateway with id:%s", id)
-        return self._make_network_gateway_dict(gw_db)
-
-    def get_network_gateway(self, context, id, fields=None):
-        gw_db = self._get_network_gateway(context, id)
-        return self._make_network_gateway_dict(gw_db, fields)
-
-    def delete_network_gateway(self, context, id):
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, id)
-            if gw_db.network_connections:
-                raise GatewayInUse(gateway_id=id)
-            if gw_db.default:
-                raise NetworkGatewayUnchangeable(gateway_id=id)
-            context.session.delete(gw_db)
-        LOG.debug("Network gateway '%s' was destroyed.", id)
-
-    def get_network_gateways(self, context, filters=None, fields=None,
-                             sorts=None, limit=None, marker=None,
-                             page_reverse=False):
-        marker_obj = self._get_marker_obj(
-            context, 'network_gateway', limit, marker)
-        return self._get_collection(context, nsx_models.NetworkGateway,
-                                    self._make_network_gateway_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def connect_network(self, context, network_gateway_id,
-                        network_mapping_info):
-        network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug("Connecting network '%(network_id)s' to gateway "
-                  "'%(network_gateway_id)s'",
-                  {'network_id': network_id,
-                   'network_gateway_id': network_gateway_id})
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, network_gateway_id)
-            tenant_id = self._get_tenant_id_for_create(context, gw_db)
-            # TODO(salvatore-orlando): Leverage unique constraint instead
-            # of performing another query!
-            if self._retrieve_gateway_connections(context,
-                                                  network_gateway_id,
-                                                  network_mapping_info):
-                raise GatewayConnectionInUse(mapping=network_mapping_info,
-                                             gateway_id=network_gateway_id)
-            # TODO(salvatore-orlando): Creating a port will give it an IP,
-            # but we actually do not need any. Instead of wasting an IP we
-            # should have a way to say a port shall not be associated with
-            # any subnet
-            try:
-                # We pass the segmentation type and id too - the plugin
-                # might find them useful as the network connection object
-                # does not exist yet.
-                # NOTE: they're not extended attributes, rather extra data
-                # passed in the port structure to the plugin
-                # TODO(salvatore-orlando): Verify optimal solution for
-                # ownership of the gateway port
-                port = self.create_port(context, {
-                    'port':
-                    {'tenant_id': tenant_id,
-                     'network_id': network_id,
-                     'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                     'admin_state_up': True,
-                     'fixed_ips': [],
-                     'device_id': network_gateway_id,
-                     'device_owner': DEVICE_OWNER_NET_GW_INTF,
-                     'name': '',
-                     'gw:segmentation_type':
-                     network_mapping_info.get('segmentation_type'),
-                     'gw:segmentation_id':
-                     network_mapping_info.get('segmentation_id')}})
-            except exceptions.NetworkNotFound:
-                err_msg = (_("Requested network '%(network_id)s' not found."
-                             "Unable to create network connection on "
-                             "gateway '%(network_gateway_id)s") %
-                           {'network_id': network_id,
-                            'network_gateway_id': network_gateway_id})
-                LOG.error(err_msg)
-                raise exceptions.InvalidInput(error_message=err_msg)
-            port_id = port['id']
-            LOG.debug("Gateway port for '%(network_gateway_id)s' "
-                      "created on network '%(network_id)s':%(port_id)s",
-                      {'network_gateway_id': network_gateway_id,
-                       'network_id': network_id,
-                       'port_id': port_id})
-            # Create NetworkConnection record
-            network_mapping_info['port_id'] = port_id
-            network_mapping_info['tenant_id'] = tenant_id
-            gw_db.network_connections.append(
-                nsx_models.NetworkConnection(**network_mapping_info))
-            port_id = port['id']
-            # now deallocate and recycle ip from the port
-            for fixed_ip in port.get('fixed_ips', []):
-                self._delete_ip_allocation(context, network_id,
-                                           fixed_ip['subnet_id'],
-                                           fixed_ip['ip_address'])
-            LOG.debug("Ensured no Ip addresses are configured on port %s",
-                      port_id)
-            return {'connection_info':
-                    {'network_gateway_id': network_gateway_id,
-                     'network_id': network_id,
-                     'port_id': port_id}}
-
-    def disconnect_network(self, context, network_gateway_id,
-                           network_mapping_info):
-        network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug("Disconnecting network '%(network_id)s' from gateway "
-                  "'%(network_gateway_id)s'",
-                  {'network_id': network_id,
-                   'network_gateway_id': network_gateway_id})
-        with context.session.begin(subtransactions=True):
-            # Uniquely identify connection, otherwise raise
-            try:
-                net_connection = self._retrieve_gateway_connections(
-                    context, network_gateway_id,
-                    network_mapping_info, only_one=True)
-            except sa_orm_exc.NoResultFound:
-                raise GatewayConnectionNotFound(
-                    network_mapping_info=network_mapping_info,
-                    network_gateway_id=network_gateway_id)
-            except sa_orm_exc.MultipleResultsFound:
-                raise MultipleGatewayConnections(
-                    gateway_id=network_gateway_id)
-            # Remove gateway port from network
-            # FIXME(salvatore-orlando): Ensure state of port in NSX is
-            # consistent with outcome of transaction
-            self.delete_port(context, net_connection['port_id'],
-                             nw_gw_port_check=False)
-            # Remove NetworkConnection record
-            context.session.delete(net_connection)
-
-    def _make_gateway_device_dict(self, gateway_device, fields=None,
-                                  include_nsx_id=False):
-        res = {'id': gateway_device['id'],
-               'name': gateway_device['name'],
-               'status': gateway_device['status'],
-               'connector_type': gateway_device['connector_type'],
-               'connector_ip': gateway_device['connector_ip'],
-               'tenant_id': gateway_device['tenant_id']}
-        if include_nsx_id:
-            # Return the NSX mapping as well. This attribute will not be
-            # returned in the API response anyway. Ensure it will not be
-            # filtered out in field selection.
-            if fields:
-                fields.append('nsx_id')
-            res['nsx_id'] = gateway_device['nsx_id']
-        return self._fields(res, fields)
-
-    def _get_gateway_device(self, context, device_id):
-        try:
-            return self._get_by_id(context,
-                                   nsx_models.NetworkGatewayDevice,
-                                   device_id)
-        except sa_orm_exc.NoResultFound:
-            raise GatewayDeviceNotFound(device_id=device_id)
-
-    def _is_device_in_use(self, context, device_id):
-        query = self._get_collection_query(
-            context, nsx_models.NetworkGatewayDeviceReference,
-            {'id': [device_id]})
-        return query.first()
-
-    def get_gateway_device(self, context, device_id, fields=None,
-                           include_nsx_id=False):
-        return self._make_gateway_device_dict(
-            self._get_gateway_device(context, device_id),
-            fields, include_nsx_id)
-
-    def _query_gateway_devices(self, context,
-                               filters=None, sorts=None,
-                               limit=None, marker=None,
-                               page_reverse=None):
-        marker_obj = self._get_marker_obj(
-            context, 'gateway_device', limit, marker)
-        return self._get_collection_query(context,
-                                          nsx_models.NetworkGatewayDevice,
-                                          filters=filters,
-                                          sorts=sorts,
-                                          limit=limit,
-                                          marker_obj=marker_obj,
-                                          page_reverse=page_reverse)
-
-    def get_gateway_devices(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False, include_nsx_id=False):
-        query = self._query_gateway_devices(context, filters, sorts, limit,
-                                            marker, page_reverse)
-        return [self._make_gateway_device_dict(row, fields, include_nsx_id)
-                for row in query]
-
-    def create_gateway_device(self, context, gateway_device,
-                              initial_status=STATUS_UNKNOWN):
-        device_data = gateway_device[self.device_resource]
-        tenant_id = self._get_tenant_id_for_create(context, device_data)
-        with context.session.begin(subtransactions=True):
-            device_db = nsx_models.NetworkGatewayDevice(
-                id=device_data.get('id', uuidutils.generate_uuid()),
-                tenant_id=tenant_id,
-                name=device_data.get('name'),
-                connector_type=device_data['connector_type'],
-                connector_ip=device_data['connector_ip'],
-                status=initial_status)
-            context.session.add(device_db)
-        LOG.debug("Created network gateway device: %s", device_db['id'])
-        return self._make_gateway_device_dict(device_db)
-
-    def update_gateway_device(self, context, gateway_device_id,
-                              gateway_device, include_nsx_id=False):
-        device_data = gateway_device[self.device_resource]
-        with context.session.begin(subtransactions=True):
-            device_db = self._get_gateway_device(context, gateway_device_id)
-            # Ensure there is something to update before doing it
-            if any([device_db[k] != device_data[k] for k in device_data]):
-                device_db.update(device_data)
-        LOG.debug("Updated network gateway device: %s",
-                  gateway_device_id)
-        return self._make_gateway_device_dict(
-            device_db, include_nsx_id=include_nsx_id)
-
-    def delete_gateway_device(self, context, device_id):
-        with context.session.begin(subtransactions=True):
-            # A gateway device should not be deleted
-            # if it is used in any network gateway service
-            if self._is_device_in_use(context, device_id):
-                raise GatewayDeviceInUse(device_id=device_id)
-            device_db = self._get_gateway_device(context, device_id)
-            context.session.delete(device_db)
-        LOG.debug("Deleted network gateway device: %s.", device_id)
diff --git a/neutron/plugins/vmware/dbexts/qos_db.py b/neutron/plugins/vmware/dbexts/qos_db.py
deleted file mode 100644 (file)
index a20b6ab..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from sqlalchemy.orm import exc
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import db_base_plugin_v2
-from neutron.db import models_v2
-from neutron.i18n import _LI
-from neutron.openstack.common import log
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware.extensions import qos
-
-
-LOG = log.getLogger(__name__)
-
-
-class QoSDbMixin(qos.QueuePluginBase):
-    """Mixin class to add queues."""
-
-    def create_qos_queue(self, context, qos_queue):
-        q = qos_queue['qos_queue']
-        with context.session.begin(subtransactions=True):
-            qos_queue = nsx_models.QoSQueue(
-                id=q.get('id', uuidutils.generate_uuid()),
-                name=q.get('name'),
-                tenant_id=q['tenant_id'],
-                default=q.get('default'),
-                min=q.get('min'),
-                max=q.get('max'),
-                qos_marking=q.get('qos_marking'),
-                dscp=q.get('dscp'))
-            context.session.add(qos_queue)
-        return self._make_qos_queue_dict(qos_queue)
-
-    def get_qos_queue(self, context, queue_id, fields=None):
-        return self._make_qos_queue_dict(
-            self._get_qos_queue(context, queue_id), fields)
-
-    def _get_qos_queue(self, context, queue_id):
-        try:
-            return self._get_by_id(context, nsx_models.QoSQueue, queue_id)
-        except exc.NoResultFound:
-            raise qos.QueueNotFound(id=queue_id)
-
-    def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
-                       limit=None, marker=None, page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker)
-        return self._get_collection(context, nsx_models.QoSQueue,
-                                    self._make_qos_queue_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def delete_qos_queue(self, context, queue_id):
-        qos_queue = self._get_qos_queue(context, queue_id)
-        with context.session.begin(subtransactions=True):
-            context.session.delete(qos_queue)
-
-    def _process_port_queue_mapping(self, context, port_data, queue_id):
-        port_data[qos.QUEUE] = queue_id
-        if not queue_id:
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.add(nsx_models.PortQueueMapping(
-                port_id=port_data['id'],
-                                queue_id=queue_id))
-
-    def _get_port_queue_bindings(self, context, filters=None, fields=None):
-        return self._get_collection(context, nsx_models.PortQueueMapping,
-                                    self._make_port_queue_binding_dict,
-                                    filters=filters, fields=fields)
-
-    def _delete_port_queue_mapping(self, context, port_id):
-        query = self._model_query(context, nsx_models.PortQueueMapping)
-        try:
-            binding = query.filter(
-                nsx_models.PortQueueMapping.port_id == port_id).one()
-        except exc.NoResultFound:
-            # return since this can happen if we are updating a port that
-            # did not already have a queue on it. There is no need to check
-            # if there is one before deleting if we return here.
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.delete(binding)
-
-    def _process_network_queue_mapping(self, context, net_data, queue_id):
-        net_data[qos.QUEUE] = queue_id
-        if not queue_id:
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.add(
-                nsx_models.NetworkQueueMapping(network_id=net_data['id'],
-                                               queue_id=queue_id))
-
-    def _get_network_queue_bindings(self, context, filters=None, fields=None):
-        return self._get_collection(context, nsx_models.NetworkQueueMapping,
-                                    self._make_network_queue_binding_dict,
-                                    filters=filters, fields=fields)
-
-    def _delete_network_queue_mapping(self, context, network_id):
-        query = self._model_query(context, nsx_models.NetworkQueueMapping)
-        with context.session.begin(subtransactions=True):
-            binding = query.filter_by(network_id=network_id).first()
-            if binding:
-                context.session.delete(binding)
-
-    def _extend_dict_qos_queue(self, obj_res, obj_db):
-        queue_mapping = obj_db['qos_queue']
-        if queue_mapping:
-            obj_res[qos.QUEUE] = queue_mapping.get('queue_id')
-        return obj_res
-
-    def _extend_port_dict_qos_queue(self, port_res, port_db):
-        self._extend_dict_qos_queue(port_res, port_db)
-
-    def _extend_network_dict_qos_queue(self, network_res, network_db):
-        self._extend_dict_qos_queue(network_res, network_db)
-
-    # Register dict extend functions for networks and ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attr.NETWORKS, ['_extend_network_dict_qos_queue'])
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attr.PORTS, ['_extend_port_dict_qos_queue'])
-
-    def _make_qos_queue_dict(self, queue, fields=None):
-        res = {'id': queue['id'],
-               'name': queue.get('name'),
-               'default': queue.get('default'),
-               'tenant_id': queue['tenant_id'],
-               'min': queue.get('min'),
-               'max': queue.get('max'),
-               'qos_marking': queue.get('qos_marking'),
-               'dscp': queue.get('dscp')}
-        return self._fields(res, fields)
-
-    def _make_port_queue_binding_dict(self, queue, fields=None):
-        res = {'port_id': queue['port_id'],
-               'queue_id': queue['queue_id']}
-        return self._fields(res, fields)
-
-    def _make_network_queue_binding_dict(self, queue, fields=None):
-        res = {'network_id': queue['network_id'],
-               'queue_id': queue['queue_id']}
-        return self._fields(res, fields)
-
-    def _check_for_queue_and_create(self, context, port):
-        """Check for queue and create.
-
-        This function determines if a port should be associated with a
-        queue. It works by first querying NetworkQueueMapping to determine
-        if the network is associated with a queue. If so, then it queries
-        NetworkQueueMapping for all the networks that are associated with
-        this queue. Next, it queries against all the ports on these networks
-        with the port device_id. Finally it queries PortQueueMapping. If that
-        query returns a queue_id that is returned. Otherwise a queue is
-        created that is the size of the queue associated with the network and
-        that queue_id is returned.
-
-        If the network is not associated with a queue we then query to see
-        if there is a default queue in the system. If so, a copy of that is
-        created and the queue_id is returned.
-
-        Otherwise None is returned. None is also returned if the port does not
-        have a device_id or if the device_owner is network:
-        """
-
-        queue_to_create = None
-        # If there is no device_id don't create a queue. The queue will be
-        # created on update port when the device_id is present. Also don't
-        # apply QoS to network ports.
-        if (not port.get('device_id') or
-            port['device_owner'].startswith('network:')):
-            return
-
-        # Check if there is a queue associated with the network
-        filters = {'network_id': [port['network_id']]}
-        network_queue_id = self._get_network_queue_bindings(
-            context, filters, ['queue_id'])
-        if network_queue_id:
-            # get networks that queue is associated with
-            filters = {'queue_id': [network_queue_id[0]['queue_id']]}
-            networks_with_same_queue = self._get_network_queue_bindings(
-                context, filters)
-
-            # get the ports on these networks with the same_queue and device_id
-            filters = {'device_id': [port.get('device_id')],
-                       'network_id': [network['network_id'] for
-                                      network in networks_with_same_queue]}
-            query = self._model_query(context, models_v2.Port.id)
-            query = self._apply_filters_to_query(query, models_v2.Port,
-                                                 filters)
-            ports_ids = [p[0] for p in query]
-            if ports_ids:
-                # shared queue already exists find the queue id
-                queues = self._get_port_queue_bindings(context,
-                                                       {'port_id': ports_ids},
-                                                       ['queue_id'])
-                if queues:
-                    return queues[0]['queue_id']
-
-            # get the size of the queue we want to create
-            queue_to_create = self._get_qos_queue(
-                context, network_queue_id[0]['queue_id'])
-
-        else:
-            # check for default queue
-            filters = {'default': [True]}
-            # context is elevated since default queue is owned by admin
-            queue_to_create = self.get_qos_queues(context.elevated(), filters)
-            if not queue_to_create:
-                return
-            queue_to_create = queue_to_create[0]
-
-        # create the queue
-        tenant_id = self._get_tenant_id_for_create(context, port)
-        if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'):
-            queue_to_create['max'] *= int(port[qos.RXTX_FACTOR])
-        queue = {'qos_queue': {'name': queue_to_create.get('name'),
-                               'min': queue_to_create.get('min'),
-                               'max': queue_to_create.get('max'),
-                               'dscp': queue_to_create.get('dscp'),
-                               'qos_marking':
-                               queue_to_create.get('qos_marking'),
-                               'tenant_id': tenant_id}}
-        return self.create_qos_queue(context, queue, False)['id']
-
-    def _validate_qos_queue(self, context, qos_queue):
-        if qos_queue.get('default'):
-            if context.is_admin:
-                if self.get_qos_queues(context, filters={'default': [True]}):
-                    raise qos.DefaultQueueAlreadyExists()
-            else:
-                raise qos.DefaultQueueCreateNotAdmin()
-        if qos_queue.get('qos_marking') == 'trusted':
-            dscp = qos_queue.pop('dscp')
-            if dscp:
-                # must raise because a non-zero dscp was provided
-                raise qos.QueueInvalidMarking()
-            LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
-                         "marking"), dscp)
-        max = qos_queue.get('max')
-        min = qos_queue.get('min')
-        # Max can be None
-        if max and min > max:
-            raise qos.QueueMinGreaterMax()
diff --git a/neutron/plugins/vmware/dbexts/vcns_db.py b/neutron/plugins/vmware/dbexts/vcns_db.py
deleted file mode 100644 (file)
index 1d14984..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.dbexts import vcns_models
-
-LOG = logging.getLogger(__name__)
-
-
-def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsRouterBinding(
-            router_id=router_id,
-            edge_id=vse_id,
-            lswitch_id=lswitch_id,
-            status=status)
-        session.add(binding)
-        return binding
-
-
-def get_vcns_router_binding(session, router_id):
-    with session.begin(subtransactions=True):
-        return (session.query(vcns_models.VcnsRouterBinding).
-                filter_by(router_id=router_id).first())
-
-
-def update_vcns_router_binding(session, router_id, **kwargs):
-    with session.begin(subtransactions=True):
-        binding = (session.query(vcns_models.VcnsRouterBinding).
-                   filter_by(router_id=router_id).one())
-        for key, value in kwargs.iteritems():
-            binding[key] = value
-
-
-def delete_vcns_router_binding(session, router_id):
-    with session.begin(subtransactions=True):
-        binding = (session.query(vcns_models.VcnsRouterBinding).
-                   filter_by(router_id=router_id).one())
-        session.delete(binding)
diff --git a/neutron/plugins/vmware/dhcp_meta/__init__.py b/neutron/plugins/vmware/dhcp_meta/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/vmware/dhcp_meta/combined.py b/neutron/plugins/vmware/dhcp_meta/combined.py
deleted file mode 100644 (file)
index 36ba563..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.common import constants as const
-from neutron.common import topics
-from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
-from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
-
-
-class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI):
-
-    def __init__(self, plugin, manager):
-        super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT)
-        self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager)
-
-    def notify(self, context, data, methodname):
-        [resource, action, _e] = methodname.split('.')
-        lsn_manager = self.agentless_notifier.plugin.lsn_manager
-        plugin = self.agentless_notifier.plugin
-        if resource == 'network':
-            net_id = data['network']['id']
-        elif resource in ['port', 'subnet']:
-            net_id = data[resource]['network_id']
-        else:
-            # no valid resource
-            return
-        lsn_exists = lsn_manager.lsn_exists(context, net_id)
-        treat_dhcp_owner_specially = False
-        if lsn_exists:
-            # if lsn exists, the network is one created with the new model
-            if (resource == 'subnet' and action == 'create' and
-                const.DEVICE_OWNER_DHCP not in plugin.port_special_owners):
-                # network/subnet provisioned in the new model have a plain
-                # nsx lswitch port, no vif attachment
-                    plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP)
-                    treat_dhcp_owner_specially = True
-            if (resource == 'port' and action == 'update' or
-                resource == 'subnet'):
-                self.agentless_notifier.notify(context, data, methodname)
-        elif not lsn_exists and resource in ['port', 'subnet']:
-            # call notifier for the agent-based mode
-            super(DhcpAgentNotifyAPI, self).notify(context, data, methodname)
-        if treat_dhcp_owner_specially:
-            # if subnets belong to networks created with the old model
-            # dhcp port does not need to be special cased, so put things
-            # back, since they were modified
-            plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP)
-
-
-def handle_network_dhcp_access(plugin, context, network, action):
-    nsx_svc.handle_network_dhcp_access(plugin, context, network, action)
-
-
-def handle_port_dhcp_access(plugin, context, port, action):
-    if plugin.lsn_manager.lsn_exists(context, port['network_id']):
-        nsx_svc.handle_port_dhcp_access(plugin, context, port, action)
-    else:
-        nsx_rpc.handle_port_dhcp_access(plugin, context, port, action)
-
-
-def handle_port_metadata_access(plugin, context, port, is_delete=False):
-    if plugin.lsn_manager.lsn_exists(context, port['network_id']):
-        nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete)
-    else:
-        nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete)
-
-
-def handle_router_metadata_access(plugin, context, router_id, interface=None):
-    if interface:
-        subnet = plugin.get_subnet(context, interface['subnet_id'])
-        network_id = subnet['network_id']
-        if plugin.lsn_manager.lsn_exists(context, network_id):
-            nsx_svc.handle_router_metadata_access(
-                plugin, context, router_id, interface)
-        else:
-            nsx_rpc.handle_router_metadata_access(
-                plugin, context, router_id, interface)
-    else:
-        nsx_rpc.handle_router_metadata_access(
-            plugin, context, router_id, interface)
diff --git a/neutron/plugins/vmware/dhcp_meta/constants.py b/neutron/plugins/vmware/dhcp_meta/constants.py
deleted file mode 100644 (file)
index 1e9476a..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-from neutron.common import constants as const
-from neutron.db import l3_db
-
-# A unique MAC to quickly identify the LSN port used for metadata services
-# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'.
-METADATA_MAC = "fa:15:73:74:d4:74"
-METADATA_PORT_ID = 'metadata:id'
-METADATA_PORT_NAME = 'metadata:name'
-METADATA_DEVICE_ID = 'metadata:device'
-SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP,
-                  const.DEVICE_OWNER_ROUTER_GW,
-                  l3_db.DEVICE_OWNER_ROUTER_INTF)
diff --git a/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/neutron/plugins/vmware/dhcp_meta/lsnmanager.py
deleted file mode 100644 (file)
index 65fdb3c..0000000
+++ /dev/null
@@ -1,477 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_utils import excutils
-
-from neutron.common import exceptions as n_exc
-from neutron.i18n import _LE, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware.dbexts import lsn_db
-from neutron.plugins.vmware.dhcp_meta import constants as const
-from neutron.plugins.vmware.nsxlib import lsn as lsn_api
-from neutron.plugins.vmware.nsxlib import switch as switch_api
-
-LOG = logging.getLogger(__name__)
-
-META_CONF = 'metadata-proxy'
-DHCP_CONF = 'dhcp'
-
-
-lsn_opts = [
-    cfg.BoolOpt('sync_on_missing_data', default=False,
-                help=_('Pull LSN information from NSX in case it is missing '
-                       'from the local data store. This is useful to rebuild '
-                       'the local store in case of server recovery.'))
-]
-
-
-def register_lsn_opts(config):
-    config.CONF.register_opts(lsn_opts, "NSX_LSN")
-
-
-class LsnManager(object):
-    """Manage LSN entities associated with networks."""
-
-    def __init__(self, plugin):
-        self.plugin = plugin
-
-    @property
-    def cluster(self):
-        return self.plugin.cluster
-
-    def lsn_exists(self, context, network_id):
-        """Return True if a Logical Service Node exists for the network."""
-        return self.lsn_get(
-            context, network_id, raise_on_err=False) is not None
-
-    def lsn_get(self, context, network_id, raise_on_err=True):
-        """Retrieve the LSN id associated to the network."""
-        try:
-            return lsn_api.lsn_for_network_get(self.cluster, network_id)
-        except (n_exc.NotFound, api_exc.NsxApiException):
-            if raise_on_err:
-                LOG.error(_LE('Unable to find Logical Service Node for '
-                              'network %s.'),
-                          network_id)
-                raise p_exc.LsnNotFound(entity='network',
-                                        entity_id=network_id)
-            else:
-                LOG.warn(_LW('Unable to find Logical Service Node for '
-                             'the requested network %s.'),
-                         network_id)
-
-    def lsn_create(self, context, network_id):
-        """Create a LSN associated to the network."""
-        try:
-            return lsn_api.lsn_for_network_create(self.cluster, network_id)
-        except api_exc.NsxApiException:
-            err_msg = _('Unable to create LSN for network %s') % network_id
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-
-    def lsn_delete(self, context, lsn_id):
-        """Delete a LSN given its id."""
-        try:
-            lsn_api.lsn_delete(self.cluster, lsn_id)
-        except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
-
-    def lsn_delete_by_network(self, context, network_id):
-        """Delete a LSN associated to the network."""
-        lsn_id = self.lsn_get(context, network_id, raise_on_err=False)
-        if lsn_id:
-            self.lsn_delete(context, lsn_id)
-
-    def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
-        """Retrieve LSN and LSN port for the network and the subnet."""
-        lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
-        if lsn_id:
-            try:
-                lsn_port_id = lsn_api.lsn_port_by_subnet_get(
-                    self.cluster, lsn_id, subnet_id)
-            except (n_exc.NotFound, api_exc.NsxApiException):
-                if raise_on_err:
-                    LOG.error(_LE('Unable to find Logical Service Node Port '
-                                  'for LSN %(lsn_id)s and subnet '
-                                  '%(subnet_id)s'),
-                              {'lsn_id': lsn_id, 'subnet_id': subnet_id})
-                    raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
-                                                entity='subnet',
-                                                entity_id=subnet_id)
-                else:
-                    LOG.warn(_LW('Unable to find Logical Service Node Port '
-                                 'for LSN %(lsn_id)s and subnet '
-                                 '%(subnet_id)s'),
-                             {'lsn_id': lsn_id, 'subnet_id': subnet_id})
-                return (lsn_id, None)
-            else:
-                return (lsn_id, lsn_port_id)
-        else:
-            return (None, None)
-
-    def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True):
-        """Retrieve LSN and LSN port given network and mac address."""
-        lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
-        if lsn_id:
-            try:
-                lsn_port_id = lsn_api.lsn_port_by_mac_get(
-                    self.cluster, lsn_id, mac)
-            except (n_exc.NotFound, api_exc.NsxApiException):
-                if raise_on_err:
-                    LOG.error(_LE('Unable to find Logical Service Node Port '
-                                  'for LSN %(lsn_id)s and mac address '
-                                  '%(mac)s'),
-                              {'lsn_id': lsn_id, 'mac': mac})
-                    raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
-                                                entity='MAC',
-                                                entity_id=mac)
-                else:
-                    LOG.warn(_LW('Unable to find Logical Service Node '
-                                 'Port for LSN %(lsn_id)s and mac address '
-                                 '%(mac)s'),
-                             {'lsn_id': lsn_id, 'mac': mac})
-                return (lsn_id, None)
-            else:
-                return (lsn_id, lsn_port_id)
-        else:
-            return (None, None)
-
-    def lsn_port_create(self, context, lsn_id, subnet_info):
-        """Create and return LSN port for associated subnet."""
-        try:
-            return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info)
-        except n_exc.NotFound:
-            raise p_exc.LsnNotFound(entity='', entity_id=lsn_id)
-        except api_exc.NsxApiException:
-            err_msg = _('Unable to create port for LSN  %s') % lsn_id
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-
-    def lsn_port_delete(self, context, lsn_id, lsn_port_id):
-        """Delete a LSN port from the Logical Service Node."""
-        try:
-            lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
-        except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
-
-    def lsn_port_dispose(self, context, network_id, mac_address):
-        """Delete a LSN port given the network and the mac address."""
-        lsn_id, lsn_port_id = self.lsn_port_get_by_mac(
-            context, network_id, mac_address, raise_on_err=False)
-        if lsn_port_id:
-            self.lsn_port_delete(context, lsn_id, lsn_port_id)
-            if mac_address == const.METADATA_MAC:
-                try:
-                    lswitch_port_id = switch_api.get_port_by_neutron_tag(
-                        self.cluster, network_id,
-                        const.METADATA_PORT_ID)['uuid']
-                    switch_api.delete_port(
-                        self.cluster, network_id, lswitch_port_id)
-                except (n_exc.PortNotFoundOnNetwork,
-                        api_exc.NsxApiException):
-                    LOG.warn(_LW("Metadata port not found while attempting "
-                                 "to delete it from network %s"), network_id)
-        else:
-            LOG.warn(_LW("Unable to find Logical Services Node "
-                         "Port with MAC %s"), mac_address)
-
-    def lsn_port_dhcp_setup(
-        self, context, network_id, port_id, port_data, subnet_config=None):
-        """Connect network to LSN via specified port and port_data."""
-        try:
-            lsn_id = None
-            switch_id = nsx_utils.get_nsx_switch_ids(
-                context.session, self.cluster, network_id)[0]
-            lswitch_port_id = switch_api.get_port_by_neutron_tag(
-                self.cluster, switch_id, port_id)['uuid']
-            lsn_id = self.lsn_get(context, network_id)
-            lsn_port_id = self.lsn_port_create(context, lsn_id, port_data)
-        except (n_exc.NotFound, p_exc.NsxPluginException):
-            raise p_exc.PortConfigurationError(
-                net_id=network_id, lsn_id=lsn_id, port_id=port_id)
-        else:
-            try:
-                lsn_api.lsn_port_plug_network(
-                    self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
-            except p_exc.LsnConfigurationConflict:
-                self.lsn_port_delete(context, lsn_id, lsn_port_id)
-                raise p_exc.PortConfigurationError(
-                    net_id=network_id, lsn_id=lsn_id, port_id=port_id)
-            if subnet_config:
-                self.lsn_port_dhcp_configure(
-                    context, lsn_id, lsn_port_id, subnet_config)
-            else:
-                return (lsn_id, lsn_port_id)
-
-    def lsn_port_metadata_setup(self, context, lsn_id, subnet):
-        """Connect subnet to specified LSN."""
-        data = {
-            "mac_address": const.METADATA_MAC,
-            "ip_address": subnet['cidr'],
-            "subnet_id": subnet['id']
-        }
-        network_id = subnet['network_id']
-        tenant_id = subnet['tenant_id']
-        lswitch_port_id = None
-        try:
-            switch_id = nsx_utils.get_nsx_switch_ids(
-                context.session, self.cluster, network_id)[0]
-            lswitch_port_id = switch_api.create_lport(
-                self.cluster, switch_id, tenant_id,
-                const.METADATA_PORT_ID, const.METADATA_PORT_NAME,
-                const.METADATA_DEVICE_ID, True)['uuid']
-            lsn_port_id = self.lsn_port_create(context, lsn_id, data)
-        except (n_exc.NotFound, p_exc.NsxPluginException,
-                api_exc.NsxApiException):
-            raise p_exc.PortConfigurationError(
-                net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id)
-        else:
-            try:
-                lsn_api.lsn_port_plug_network(
-                    self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
-            except p_exc.LsnConfigurationConflict:
-                self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
-                switch_api.delete_port(
-                    self.cluster, network_id, lswitch_port_id)
-                raise p_exc.PortConfigurationError(
-                    net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
-
-    def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet):
-        """Enable/disable dhcp services with the given config options."""
-        is_enabled = subnet["enable_dhcp"]
-        dhcp_options = {
-            "domain_name": cfg.CONF.NSX_DHCP.domain_name,
-            "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time,
-        }
-        dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or []
-        dns_servers.extend(subnet["dns_nameservers"])
-        if subnet['gateway_ip']:
-            dhcp_options["routers"] = subnet["gateway_ip"]
-        if dns_servers:
-            dhcp_options["domain_name_servers"] = ",".join(dns_servers)
-        if subnet["host_routes"]:
-            dhcp_options["classless_static_routes"] = (
-                ",".join(subnet["host_routes"])
-            )
-        try:
-            lsn_api.lsn_port_dhcp_configure(
-                self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options)
-        except (n_exc.NotFound, api_exc.NsxApiException):
-            err_msg = (_('Unable to configure dhcp for Logical Service '
-                         'Node %(lsn_id)s and port %(lsn_port_id)s')
-                       % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id})
-            LOG.error(err_msg)
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-
-    def lsn_metadata_configure(self, context, subnet_id, is_enabled):
-        """Configure metadata service for the specified subnet."""
-        subnet = self.plugin.get_subnet(context, subnet_id)
-        network_id = subnet['network_id']
-        meta_conf = cfg.CONF.NSX_METADATA
-        metadata_options = {
-            'metadata_server_ip': meta_conf.metadata_server_address,
-            'metadata_server_port': meta_conf.metadata_server_port,
-            'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret
-        }
-        try:
-            lsn_id = self.lsn_get(context, network_id)
-            lsn_api.lsn_metadata_configure(
-                self.cluster, lsn_id, is_enabled, metadata_options)
-        except (p_exc.LsnNotFound, api_exc.NsxApiException):
-            err_msg = (_('Unable to configure metadata '
-                         'for subnet %s') % subnet_id)
-            LOG.error(err_msg)
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-        if is_enabled:
-            try:
-                # test that the lsn port exists
-                self.lsn_port_get(context, network_id, subnet_id)
-            except p_exc.LsnPortNotFound:
-                # this might happen if subnet had dhcp off when created
-                # so create one, and wire it
-                self.lsn_port_metadata_setup(context, lsn_id, subnet)
-        else:
-            self.lsn_port_dispose(context, network_id, const.METADATA_MAC)
-
-    def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr):
-        lsn_id, lsn_port_id = self.lsn_port_get(
-            context, network_id, subnet_id, raise_on_err=False)
-        try:
-            if lsn_id and lsn_port_id:
-                hdlr(self.cluster, lsn_id, lsn_port_id, data)
-        except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.error(_LE('Error while configuring LSN '
-                          'port %s'), lsn_port_id)
-            raise p_exc.PortConfigurationError(
-                net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
-
-    def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host):
-        """Add dhcp host entry to LSN port configuration."""
-        self._lsn_port_host_conf(context, network_id, subnet_id, host,
-                                 lsn_api.lsn_port_dhcp_host_add)
-
-    def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host):
-        """Remove dhcp host entry from LSN port configuration."""
-        self._lsn_port_host_conf(context, network_id, subnet_id, host,
-                                 lsn_api.lsn_port_dhcp_host_remove)
-
-    def lsn_port_meta_host_add(self, context, network_id, subnet_id, host):
-        """Add dhcp host entry to LSN port configuration."""
-        self._lsn_port_host_conf(context, network_id, subnet_id, host,
-                                 lsn_api.lsn_port_metadata_host_add)
-
-    def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host):
-        """Remove dhcp host entry from LSN port configuration."""
-        self._lsn_port_host_conf(context, network_id, subnet_id, host,
-                                 lsn_api.lsn_port_metadata_host_remove)
-
-    def lsn_port_update(
-        self, context, network_id, subnet_id, dhcp=None, meta=None):
-        """Update the specified configuration for the LSN port."""
-        if not dhcp and not meta:
-            return
-        try:
-            lsn_id, lsn_port_id = self.lsn_port_get(
-                context, network_id, subnet_id, raise_on_err=False)
-            if dhcp and lsn_id and lsn_port_id:
-                lsn_api.lsn_port_host_entries_update(
-                    self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp)
-            if meta and lsn_id and lsn_port_id:
-                lsn_api.lsn_port_host_entries_update(
-                    self.cluster, lsn_id, lsn_port_id, META_CONF, meta)
-        except api_exc.NsxApiException:
-            raise p_exc.PortConfigurationError(
-                net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
-
-
-class PersistentLsnManager(LsnManager):
-    """Add local persistent state to LSN Manager."""
-
-    def __init__(self, plugin):
-        super(PersistentLsnManager, self).__init__(plugin)
-        self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data
-
-    def lsn_get(self, context, network_id, raise_on_err=True):
-        try:
-            obj = lsn_db.lsn_get_for_network(
-                context, network_id, raise_on_err=raise_on_err)
-            return obj.lsn_id if obj else None
-        except p_exc.LsnNotFound:
-            with excutils.save_and_reraise_exception() as ctxt:
-                ctxt.reraise = False
-                if self.sync_on_missing:
-                    lsn_id = super(PersistentLsnManager, self).lsn_get(
-                        context, network_id, raise_on_err=raise_on_err)
-                    self.lsn_save(context, network_id, lsn_id)
-                    return lsn_id
-                if raise_on_err:
-                    ctxt.reraise = True
-
-    def lsn_save(self, context, network_id, lsn_id):
-        """Save LSN-Network mapping to the DB."""
-        try:
-            lsn_db.lsn_add(context, network_id, lsn_id)
-        except db_exc.DBError:
-            err_msg = _('Unable to save LSN for network %s') % network_id
-            LOG.exception(err_msg)
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-
-    def lsn_create(self, context, network_id):
-        lsn_id = super(PersistentLsnManager,
-                       self).lsn_create(context, network_id)
-        try:
-            self.lsn_save(context, network_id, lsn_id)
-        except p_exc.NsxPluginException:
-            with excutils.save_and_reraise_exception():
-                super(PersistentLsnManager, self).lsn_delete(context, lsn_id)
-        return lsn_id
-
-    def lsn_delete(self, context, lsn_id):
-        lsn_db.lsn_remove(context, lsn_id)
-        super(PersistentLsnManager, self).lsn_delete(context, lsn_id)
-
-    def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
-        try:
-            obj = lsn_db.lsn_port_get_for_subnet(
-                context, subnet_id, raise_on_err=raise_on_err)
-            return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None)
-        except p_exc.LsnPortNotFound:
-            with excutils.save_and_reraise_exception() as ctxt:
-                ctxt.reraise = False
-                if self.sync_on_missing:
-                    lsn_id, lsn_port_id = (
-                        super(PersistentLsnManager, self).lsn_port_get(
-                            context, network_id, subnet_id,
-                            raise_on_err=raise_on_err))
-                    mac_addr = lsn_api.lsn_port_info_get(
-                        self.cluster, lsn_id, lsn_port_id)['mac_address']
-                    self.lsn_port_save(
-                        context, lsn_port_id, subnet_id, mac_addr, lsn_id)
-                    return (lsn_id, lsn_port_id)
-                if raise_on_err:
-                    ctxt.reraise = True
-
-    def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True):
-        try:
-            obj = lsn_db.lsn_port_get_for_mac(
-                context, mac, raise_on_err=raise_on_err)
-            return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None)
-        except p_exc.LsnPortNotFound:
-            with excutils.save_and_reraise_exception() as ctxt:
-                ctxt.reraise = False
-                if self.sync_on_missing:
-                    lsn_id, lsn_port_id = (
-                        super(PersistentLsnManager, self).lsn_port_get_by_mac(
-                            context, network_id, mac,
-                            raise_on_err=raise_on_err))
-                    subnet_id = lsn_api.lsn_port_info_get(
-                        self.cluster, lsn_id, lsn_port_id).get('subnet_id')
-                    self.lsn_port_save(
-                        context, lsn_port_id, subnet_id, mac, lsn_id)
-                    return (lsn_id, lsn_port_id)
-                if raise_on_err:
-                    ctxt.reraise = True
-
-    def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id):
-        """Save LSN Port information to the DB."""
-        try:
-            lsn_db.lsn_port_add_for_lsn(
-                context, lsn_port_id, subnet_id, mac_addr, lsn_id)
-        except db_exc.DBError:
-            err_msg = _('Unable to save LSN port for subnet %s') % subnet_id
-            LOG.exception(err_msg)
-            raise p_exc.NsxPluginException(err_msg=err_msg)
-
-    def lsn_port_create(self, context, lsn_id, subnet_info):
-        lsn_port_id = super(PersistentLsnManager,
-                            self).lsn_port_create(context, lsn_id, subnet_info)
-        try:
-            self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'],
-                               subnet_info['mac_address'], lsn_id)
-        except p_exc.NsxPluginException:
-            with excutils.save_and_reraise_exception():
-                super(PersistentLsnManager, self).lsn_port_delete(
-                    context, lsn_id, lsn_port_id)
-        return lsn_port_id
-
-    def lsn_port_delete(self, context, lsn_id, lsn_port_id):
-        lsn_db.lsn_port_remove(context, lsn_port_id)
-        super(PersistentLsnManager, self).lsn_port_delete(
-            context, lsn_id, lsn_port_id)
diff --git a/neutron/plugins/vmware/dhcp_meta/migration.py b/neutron/plugins/vmware/dhcp_meta/migration.py
deleted file mode 100644 (file)
index ea6fa7b..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from neutron.common import constants as const
-from neutron.common import exceptions as n_exc
-from neutron.extensions import external_net
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dhcp_meta import nsx
-from neutron.plugins.vmware.dhcp_meta import rpc
-
-LOG = logging.getLogger(__name__)
-
-
-class DhcpMetadataBuilder(object):
-
-    def __init__(self, plugin, agent_notifier):
-        self.plugin = plugin
-        self.notifier = agent_notifier
-
-    def dhcp_agent_get_all(self, context, network_id):
-        """Return the agents managing the network."""
-        return self.plugin.list_dhcp_agents_hosting_network(
-            context, network_id)['agents']
-
-    def dhcp_port_get_all(self, context, network_id):
-        """Return the dhcp ports allocated for the network."""
-        filters = {
-            'network_id': [network_id],
-            'device_owner': [const.DEVICE_OWNER_DHCP]
-        }
-        return self.plugin.get_ports(context, filters=filters)
-
-    def router_id_get(self, context, subnet=None):
-        """Return the router and interface used for the subnet."""
-        if not subnet:
-            return
-        network_id = subnet['network_id']
-        filters = {
-            'network_id': [network_id],
-            'device_owner': [const.DEVICE_OWNER_ROUTER_INTF]
-        }
-        ports = self.plugin.get_ports(context, filters=filters)
-        for port in ports:
-            if port['fixed_ips'][0]['subnet_id'] == subnet['id']:
-                return port['device_id']
-
-    def metadata_deallocate(self, context, router_id, subnet_id):
-        """Deallocate metadata services for the subnet."""
-        interface = {'subnet_id': subnet_id}
-        self.plugin.remove_router_interface(context, router_id, interface)
-
-    def metadata_allocate(self, context, router_id, subnet_id):
-        """Allocate metadata resources for the subnet via the router."""
-        interface = {'subnet_id': subnet_id}
-        self.plugin.add_router_interface(context, router_id, interface)
-
-    def dhcp_deallocate(self, context, network_id, agents, ports):
-        """Deallocate dhcp resources for the network."""
-        for agent in agents:
-            self.plugin.remove_network_from_dhcp_agent(
-                context, agent['id'], network_id)
-        for port in ports:
-            try:
-                self.plugin.delete_port(context, port['id'])
-            except n_exc.PortNotFound:
-                LOG.error(_LE('Port %s is already gone'), port['id'])
-
-    def dhcp_allocate(self, context, network_id, subnet):
-        """Allocate dhcp resources for the subnet."""
-        # Create LSN resources
-        network_data = {'id': network_id}
-        nsx.handle_network_dhcp_access(self.plugin, context,
-                                       network_data, 'create_network')
-        if subnet:
-            subnet_data = {'subnet': subnet}
-            self.notifier.notify(context, subnet_data, 'subnet.create.end')
-            # Get DHCP host and metadata entries created for the LSN
-            port = {
-                'network_id': network_id,
-                'fixed_ips': [{'subnet_id': subnet['id']}]
-            }
-            self.notifier.notify(context, {'port': port}, 'port.update.end')
-
-
-class MigrationManager(object):
-
-    def __init__(self, plugin, lsn_manager, agent_notifier):
-        self.plugin = plugin
-        self.manager = lsn_manager
-        self.builder = DhcpMetadataBuilder(plugin, agent_notifier)
-
-    def validate(self, context, network_id):
-        """Validate and return subnet's dhcp info for migration."""
-        network = self.plugin.get_network(context, network_id)
-
-        if self.manager.lsn_exists(context, network_id):
-            reason = _("LSN already exist")
-            raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason)
-
-        if network[external_net.EXTERNAL]:
-            reason = _("Cannot migrate an external network")
-            raise n_exc.BadRequest(resource='network', msg=reason)
-
-        filters = {'network_id': [network_id]}
-        subnets = self.plugin.get_subnets(context, filters=filters)
-        count = len(subnets)
-        if count == 0:
-            return None
-        elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR:
-            reason = _("Cannot migrate a 'metadata' network")
-            raise n_exc.BadRequest(resource='network', msg=reason)
-        elif count > 1:
-            reason = _("Unable to support multiple subnets per network")
-            raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason)
-        else:
-            return subnets[0]
-
-    def migrate(self, context, network_id, subnet=None):
-        """Migrate subnet resources to LSN."""
-        router_id = self.builder.router_id_get(context, subnet)
-        if router_id and subnet:
-            # Deallocate resources taken for the router, if any
-            self.builder.metadata_deallocate(context, router_id, subnet['id'])
-        if subnet:
-            # Deallocate reources taken for the agent, if any
-            agents = self.builder.dhcp_agent_get_all(context, network_id)
-            ports = self.builder.dhcp_port_get_all(context, network_id)
-            self.builder.dhcp_deallocate(context, network_id, agents, ports)
-        # (re)create the configuration for LSN
-        self.builder.dhcp_allocate(context, network_id, subnet)
-        if router_id and subnet:
-            # Allocate resources taken for the router, if any
-            self.builder.metadata_allocate(context, router_id, subnet['id'])
-
-    def report(self, context, network_id, subnet_id=None):
-        """Return a report of the dhcp and metadata resources in use."""
-        if subnet_id:
-            lsn_id, lsn_port_id = self.manager.lsn_port_get(
-                context, network_id, subnet_id, raise_on_err=False)
-        else:
-            filters = {'network_id': [network_id]}
-            subnets = self.plugin.get_subnets(context, filters=filters)
-            if subnets:
-                lsn_id, lsn_port_id = self.manager.lsn_port_get(
-                    context, network_id, subnets[0]['id'], raise_on_err=False)
-            else:
-                lsn_id = self.manager.lsn_get(context, network_id,
-                                              raise_on_err=False)
-                lsn_port_id = None
-        if lsn_id:
-            ports = [lsn_port_id] if lsn_port_id else []
-            report = {
-                'type': 'lsn',
-                'services': [lsn_id],
-                'ports': ports
-            }
-        else:
-            agents = self.builder.dhcp_agent_get_all(context, network_id)
-            ports = self.builder.dhcp_port_get_all(context, network_id)
-            report = {
-                'type': 'agent',
-                'services': [a['id'] for a in agents],
-                'ports': [p['id'] for p in ports]
-            }
-        return report
diff --git a/neutron/plugins/vmware/dhcp_meta/nsx.py b/neutron/plugins/vmware/dhcp_meta/nsx.py
deleted file mode 100644 (file)
index a4d0857..0000000
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2013 VMware, Inc.
-
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_config import cfg
-from oslo_utils import excutils
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants as const
-from neutron.common import exceptions as n_exc
-from neutron.db import db_base_plugin_v2
-from neutron.db import l3_db
-from neutron.extensions import external_net
-from neutron.i18n import _LE, _LI
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dhcp_meta import constants as d_const
-from neutron.plugins.vmware.nsxlib import lsn as lsn_api
-
-LOG = logging.getLogger(__name__)
-
-
-dhcp_opts = [
-    cfg.ListOpt('extra_domain_name_servers',
-                deprecated_group='NVP_DHCP',
-                default=[],
-                help=_('Comma separated list of additional '
-                       'domain name servers')),
-    cfg.StrOpt('domain_name',
-               deprecated_group='NVP_DHCP',
-               default='openstacklocal',
-               help=_('Domain to use for building the hostnames')),
-    cfg.IntOpt('default_lease_time', default=43200,
-               deprecated_group='NVP_DHCP',
-               help=_("Default DHCP lease time")),
-]
-
-
-metadata_opts = [
-    cfg.StrOpt('metadata_server_address',
-               deprecated_group='NVP_METADATA',
-               default='127.0.0.1',
-               help=_("IP address used by Metadata server.")),
-    cfg.IntOpt('metadata_server_port',
-               deprecated_group='NVP_METADATA',
-               default=8775,
-               help=_("TCP Port used by Metadata server.")),
-    cfg.StrOpt('metadata_shared_secret',
-               deprecated_group='NVP_METADATA',
-               default='',
-               help=_('Shared secret to sign instance-id request'),
-               secret=True)
-]
-
-
-def register_dhcp_opts(config):
-    config.CONF.register_opts(dhcp_opts, group="NSX_DHCP")
-
-
-def register_metadata_opts(config):
-    config.CONF.register_opts(metadata_opts, group="NSX_METADATA")
-
-
-class DhcpAgentNotifyAPI(object):
-
-    def __init__(self, plugin, lsn_manager):
-        self.plugin = plugin
-        self.lsn_manager = lsn_manager
-        self._handle_subnet_dhcp_access = {'create': self._subnet_create,
-                                           'update': self._subnet_update,
-                                           'delete': self._subnet_delete}
-
-    def notify(self, context, data, methodname):
-        [resource, action, _e] = methodname.split('.')
-        if resource == 'subnet':
-            self._handle_subnet_dhcp_access[action](context, data['subnet'])
-        elif resource == 'port' and action == 'update':
-            self._port_update(context, data['port'])
-
-    def _port_update(self, context, port):
-        # With no fixed IP's there's nothing that can be updated
-        if not port["fixed_ips"]:
-            return
-        network_id = port['network_id']
-        subnet_id = port["fixed_ips"][0]['subnet_id']
-        filters = {'network_id': [network_id]}
-        # Because NSX does not support updating a single host entry we
-        # got to build the whole list from scratch and update in bulk
-        ports = self.plugin.get_ports(context, filters)
-        if not ports:
-            return
-        dhcp_conf = [
-            {'mac_address': p['mac_address'],
-             'ip_address': p["fixed_ips"][0]['ip_address']}
-            for p in ports if is_user_port(p)
-        ]
-        meta_conf = [
-            {'instance_id': p['device_id'],
-             'ip_address': p["fixed_ips"][0]['ip_address']}
-            for p in ports if is_user_port(p, check_dev_id=True)
-        ]
-        self.lsn_manager.lsn_port_update(
-            context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf)
-
-    def _subnet_create(self, context, subnet, clean_on_err=True):
-        if subnet['enable_dhcp']:
-            network_id = subnet['network_id']
-            # Create port for DHCP service
-            dhcp_port = {
-                "name": "",
-                "admin_state_up": True,
-                "device_id": "",
-                "device_owner": const.DEVICE_OWNER_DHCP,
-                "network_id": network_id,
-                "tenant_id": subnet["tenant_id"],
-                "mac_address": attr.ATTR_NOT_SPECIFIED,
-                "fixed_ips": [{"subnet_id": subnet['id']}]
-            }
-            try:
-                # This will end up calling handle_port_dhcp_access
-                # down below as well as handle_port_metadata_access
-                self.plugin.create_port(context, {'port': dhcp_port})
-            except p_exc.PortConfigurationError as e:
-                LOG.error(_LE("Error while creating subnet %(cidr)s for "
-                              "network %(network)s. Please, contact "
-                              "administrator"),
-                          {"cidr": subnet["cidr"],
-                           "network": network_id})
-                db_base_plugin_v2.NeutronDbPluginV2.delete_port(
-                    self.plugin, context, e.port_id)
-                if clean_on_err:
-                    self.plugin.delete_subnet(context, subnet['id'])
-                raise n_exc.Conflict()
-
-    def _subnet_update(self, context, subnet):
-        network_id = subnet['network_id']
-        try:
-            lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get(
-                context, network_id, subnet['id'])
-            self.lsn_manager.lsn_port_dhcp_configure(
-                context, lsn_id, lsn_port_id, subnet)
-        except p_exc.LsnPortNotFound:
-            # It's possible that the subnet was created with dhcp off;
-            # check if the subnet was uplinked onto a router, and if so
-            # remove the patch attachment between the metadata port and
-            # the lsn port, in favor on the one we'll be creating during
-            # _subnet_create
-            self.lsn_manager.lsn_port_dispose(
-                context, network_id, d_const.METADATA_MAC)
-            # also, check that a dhcp port exists first and provision it
-            # accordingly
-            filters = dict(network_id=[network_id],
-                           device_owner=[const.DEVICE_OWNER_DHCP])
-            ports = self.plugin.get_ports(context, filters=filters)
-            if ports:
-                handle_port_dhcp_access(
-                    self.plugin, context, ports[0], 'create_port')
-            else:
-                self._subnet_create(context, subnet, clean_on_err=False)
-
-    def _subnet_delete(self, context, subnet):
-        # FIXME(armando-migliaccio): it looks like that a subnet filter
-        # is ineffective; so filter by network for now.
-        network_id = subnet['network_id']
-        filters = dict(network_id=[network_id],
-                       device_owner=[const.DEVICE_OWNER_DHCP])
-        # FIXME(armando-migliaccio): this may be race-y
-        ports = self.plugin.get_ports(context, filters=filters)
-        if ports:
-            # This will end up calling handle_port_dhcp_access
-            # down below as well as handle_port_metadata_access
-            self.plugin.delete_port(context, ports[0]['id'])
-
-
-def is_user_port(p, check_dev_id=False):
-    usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS
-    return usable if not check_dev_id else usable and p['device_id']
-
-
-def check_services_requirements(cluster):
-    ver = cluster.api_client.get_version()
-    # It sounds like 4.1 is the first one where DHCP in NSX
-    # will have the experimental feature
-    if ver.major >= 4 and ver.minor >= 1:
-        cluster_id = cfg.CONF.default_service_cluster_uuid
-        if not lsn_api.service_cluster_exists(cluster, cluster_id):
-            raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id)
-    else:
-        raise p_exc.InvalidVersion(version=ver)
-
-
-def handle_network_dhcp_access(plugin, context, network, action):
-    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
-             {"action": action, "resource": network})
-    if action == 'create_network':
-        network_id = network['id']
-        if network.get(external_net.EXTERNAL):
-            LOG.info(_LI("Network %s is external: no LSN to create"),
-                     network_id)
-            return
-        plugin.lsn_manager.lsn_create(context, network_id)
-    elif action == 'delete_network':
-        # NOTE(armando-migliaccio): on delete_network, network
-        # is just the network id
-        network_id = network
-        plugin.lsn_manager.lsn_delete_by_network(context, network_id)
-    LOG.info(_LI("Logical Services Node for network "
-                 "%s configured successfully"), network_id)
-
-
-def handle_port_dhcp_access(plugin, context, port, action):
-    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
-             {"action": action, "resource": port})
-    if port["device_owner"] == const.DEVICE_OWNER_DHCP:
-        network_id = port["network_id"]
-        if action == "create_port":
-            # at this point the port must have a subnet and a fixed ip
-            subnet_id = port["fixed_ips"][0]['subnet_id']
-            subnet = plugin.get_subnet(context, subnet_id)
-            subnet_data = {
-                "mac_address": port["mac_address"],
-                "ip_address": subnet['cidr'],
-                "subnet_id": subnet['id']
-            }
-            try:
-                plugin.lsn_manager.lsn_port_dhcp_setup(
-                    context, network_id, port['id'], subnet_data, subnet)
-            except p_exc.PortConfigurationError:
-                LOG.error(_LE("Error while configuring DHCP for "
-                              "port %s"), port['id'])
-                raise n_exc.NeutronException()
-        elif action == "delete_port":
-            plugin.lsn_manager.lsn_port_dispose(context, network_id,
-                                                port['mac_address'])
-    elif port["device_owner"] != const.DEVICE_OWNER_DHCP:
-        if port.get("fixed_ips"):
-            # do something only if there are IP's and dhcp is enabled
-            subnet_id = port["fixed_ips"][0]['subnet_id']
-            if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
-                LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
-                             "to do"), subnet_id)
-                return
-            host_data = {
-                "mac_address": port["mac_address"],
-                "ip_address": port["fixed_ips"][0]['ip_address']
-            }
-            network_id = port["network_id"]
-            if action == "create_port":
-                handler = plugin.lsn_manager.lsn_port_dhcp_host_add
-            elif action == "delete_port":
-                handler = plugin.lsn_manager.lsn_port_dhcp_host_remove
-            try:
-                handler(context, network_id, subnet_id, host_data)
-            except p_exc.PortConfigurationError:
-                with excutils.save_and_reraise_exception():
-                    if action == 'create_port':
-                        db_base_plugin_v2.NeutronDbPluginV2.delete_port(
-                            plugin, context, port['id'])
-    LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
-
-
-def handle_port_metadata_access(plugin, context, port, is_delete=False):
-    if is_user_port(port, check_dev_id=True):
-        network_id = port["network_id"]
-        network = plugin.get_network(context, network_id)
-        if network[external_net.EXTERNAL]:
-            LOG.info(_LI("Network %s is external: nothing to do"),
-                     network_id)
-            return
-        subnet_id = port["fixed_ips"][0]['subnet_id']
-        host_data = {
-            "instance_id": port["device_id"],
-            "tenant_id": port["tenant_id"],
-            "ip_address": port["fixed_ips"][0]['ip_address']
-        }
-        LOG.info(_LI("Configuring metadata entry for port %s"), port)
-        if not is_delete:
-            handler = plugin.lsn_manager.lsn_port_meta_host_add
-        else:
-            handler = plugin.lsn_manager.lsn_port_meta_host_remove
-        try:
-            handler(context, network_id, subnet_id, host_data)
-        except p_exc.PortConfigurationError:
-            with excutils.save_and_reraise_exception():
-                if not is_delete:
-                    db_base_plugin_v2.NeutronDbPluginV2.delete_port(
-                        plugin, context, port['id'])
-        LOG.info(_LI("Metadata for port %s configured successfully"),
-                 port['id'])
-
-
-def handle_router_metadata_access(plugin, context, router_id, interface=None):
-    LOG.info(_LI("Handle metadata access via router: %(r)s and "
-                 "interface %(i)s"), {'r': router_id, 'i': interface})
-    if interface:
-        try:
-            plugin.get_port(context, interface['port_id'])
-            is_enabled = True
-        except n_exc.NotFound:
-            is_enabled = False
-        subnet_id = interface['subnet_id']
-        try:
-            plugin.lsn_manager.lsn_metadata_configure(
-                context, subnet_id, is_enabled)
-        except p_exc.NsxPluginException:
-            with excutils.save_and_reraise_exception():
-                if is_enabled:
-                    l3_db.L3_NAT_db_mixin.remove_router_interface(
-                        plugin, context, router_id, interface)
-    LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
diff --git a/neutron/plugins/vmware/dhcp_meta/rpc.py b/neutron/plugins/vmware/dhcp_meta/rpc.py
deleted file mode 100644 (file)
index 2479bf4..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from eventlet import greenthread
-import netaddr
-from oslo_config import cfg
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.v2 import attributes
-from neutron.common import constants as const
-from neutron.common import exceptions as ntn_exc
-from neutron.db import db_base_plugin_v2
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import config
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-
-LOG = logging.getLogger(__name__)
-
-METADATA_DEFAULT_PREFIX = 30
-METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX
-METADATA_GATEWAY_IP = '169.254.169.253'
-METADATA_DHCP_ROUTE = '169.254.169.254/32'
-
-
-def handle_network_dhcp_access(plugin, context, network, action):
-    pass
-
-
-def handle_port_dhcp_access(plugin, context, port_data, action):
-    active_port = (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT
-                   and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP
-                   and port_data.get('fixed_ips', []))
-    if active_port:
-        subnet_id = port_data['fixed_ips'][0]['subnet_id']
-        subnet = plugin.get_subnet(context, subnet_id)
-        _notify_rpc_agent(context, {'subnet': subnet}, 'subnet.update.end')
-
-
-def handle_port_metadata_access(plugin, context, port, is_delete=False):
-    if (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT and
-        port.get('device_owner') == const.DEVICE_OWNER_DHCP):
-        if port.get('fixed_ips', []) or is_delete:
-            fixed_ip = port['fixed_ips'][0]
-            query = context.session.query(models_v2.Subnet)
-            subnet = query.filter(
-                models_v2.Subnet.id == fixed_ip['subnet_id']).one()
-            # If subnet does not have a gateway do not create metadata
-            # route. This is done via the enable_isolated_metadata
-            # option if desired.
-            if not subnet.get('gateway_ip'):
-                LOG.info(_LI('Subnet %s does not have a gateway, the '
-                             'metadata route will not be created'),
-                         subnet['id'])
-                return
-            metadata_routes = [r for r in subnet.routes
-                               if r['destination'] == METADATA_DHCP_ROUTE]
-            if metadata_routes:
-                # We should have only a single metadata route at any time
-                # because the route logic forbids two routes with the same
-                # destination. Update next hop with the provided IP address
-                if not is_delete:
-                    metadata_routes[0].nexthop = fixed_ip['ip_address']
-                else:
-                    context.session.delete(metadata_routes[0])
-            else:
-                # add the metadata route
-                route = models_v2.SubnetRoute(
-                    subnet_id=subnet.id,
-                    destination=METADATA_DHCP_ROUTE,
-                    nexthop=fixed_ip['ip_address'])
-                context.session.add(route)
-
-
-def handle_router_metadata_access(plugin, context, router_id, interface=None):
-    if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT:
-        LOG.debug("Metadata access network is disabled")
-        return
-    if not cfg.CONF.allow_overlapping_ips:
-        LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
-                     "the metadata access network"))
-        return
-    ctx_elevated = context.elevated()
-    device_filter = {'device_id': [router_id],
-                     'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
-    # Retrieve ports calling database plugin
-    ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
-        plugin, ctx_elevated, filters=device_filter)
-    try:
-        if ports:
-            if (interface and
-                not _find_metadata_port(plugin, ctx_elevated, ports)):
-                _create_metadata_access_network(
-                    plugin, ctx_elevated, router_id)
-            elif len(ports) == 1:
-                # The only port left might be the metadata port
-                _destroy_metadata_access_network(
-                    plugin, ctx_elevated, router_id, ports)
-        else:
-            LOG.debug("No router interface found for router '%s'. "
-                      "No metadata access network should be "
-                      "created or destroyed", router_id)
-    # TODO(salvatore-orlando): A better exception handling in the
-    # NSX plugin would allow us to improve error handling here
-    except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
-            api_exc.NsxApiException):
-        # Any exception here should be regarded as non-fatal
-        LOG.exception(_LE("An error occurred while operating on the "
-                          "metadata access network for router:'%s'"),
-                      router_id)
-
-
-def _find_metadata_port(plugin, context, ports):
-    for port in ports:
-        for fixed_ip in port['fixed_ips']:
-            cidr = netaddr.IPNetwork(
-                plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr'])
-            if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR):
-                return port
-
-
-def _create_metadata_access_network(plugin, context, router_id):
-    # Add network
-    # Network name is likely to be truncated on NSX
-    net_data = {'name': 'meta-%s' % router_id,
-                'tenant_id': '',  # intentionally not set
-                'admin_state_up': True,
-                'port_security_enabled': False,
-                'shared': False,
-                'status': const.NET_STATUS_ACTIVE}
-    meta_net = plugin.create_network(context,
-                                     {'network': net_data})
-    greenthread.sleep(0)  # yield
-    plugin.schedule_network(context, meta_net)
-    greenthread.sleep(0)  # yield
-    # From this point on there will be resources to garbage-collect
-    # in case of failures
-    meta_sub = None
-    try:
-        # Add subnet
-        subnet_data = {'network_id': meta_net['id'],
-                       'tenant_id': '',  # intentionally not set
-                       'name': 'meta-%s' % router_id,
-                       'ip_version': 4,
-                       'shared': False,
-                       'cidr': METADATA_SUBNET_CIDR,
-                       'enable_dhcp': True,
-                       # Ensure default allocation pool is generated
-                       'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
-                       'gateway_ip': METADATA_GATEWAY_IP,
-                       'dns_nameservers': [],
-                       'host_routes': []}
-        meta_sub = plugin.create_subnet(context,
-                                        {'subnet': subnet_data})
-        greenthread.sleep(0)  # yield
-        plugin.add_router_interface(context, router_id,
-                                    {'subnet_id': meta_sub['id']})
-        greenthread.sleep(0)  # yield
-        # Tell to start the metadata agent proxy, only if we had success
-        _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end')
-    except (ntn_exc.NeutronException,
-            nsx_exc.NsxPluginException,
-            api_exc.NsxApiException):
-        # It is not necessary to explicitly delete the subnet
-        # as it will be removed with the network
-        plugin.delete_network(context, meta_net['id'])
-
-
-def _destroy_metadata_access_network(plugin, context, router_id, ports):
-    if not ports:
-        return
-    meta_port = _find_metadata_port(plugin, context, ports)
-    if not meta_port:
-        return
-    meta_net_id = meta_port['network_id']
-    meta_sub_id = meta_port['fixed_ips'][0]['subnet_id']
-    plugin.remove_router_interface(
-        context, router_id, {'port_id': meta_port['id']})
-    greenthread.sleep(0)  # yield
-    context.session.expunge_all()
-    try:
-        # Remove network (this will remove the subnet too)
-        plugin.delete_network(context, meta_net_id)
-        greenthread.sleep(0)  # yield
-    except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
-            api_exc.NsxApiException):
-        # must re-add the router interface
-        plugin.add_router_interface(context, router_id,
-                                    {'subnet_id': meta_sub_id})
-    # Tell to stop the metadata agent proxy
-    _notify_rpc_agent(
-        context, {'network': {'id': meta_net_id}}, 'network.delete.end')
-
-
-def _notify_rpc_agent(context, payload, event):
-    if cfg.CONF.dhcp_agent_notification:
-        dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        dhcp_notifier.notify(context, payload, event)
diff --git a/neutron/plugins/vmware/dhcpmeta_modes.py b/neutron/plugins/vmware/dhcpmeta_modes.py
deleted file mode 100644 (file)
index e6c7b08..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_config import cfg
-from oslo_utils import importutils
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import metadata_rpc
-from neutron.common import constants as const
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db import agents_db
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import config
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.dhcp_meta import combined
-from neutron.plugins.vmware.dhcp_meta import lsnmanager
-from neutron.plugins.vmware.dhcp_meta import migration
-from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
-from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
-from neutron.plugins.vmware.extensions import lsn
-
-LOG = logging.getLogger(__name__)
-
-
-class DhcpMetadataAccess(object):
-
-    def setup_dhcpmeta_access(self):
-        """Initialize support for DHCP and Metadata services."""
-        self._init_extensions()
-        if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT:
-            self._setup_rpc_dhcp_metadata()
-            mod = nsx_rpc
-        elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
-            self._setup_nsx_dhcp_metadata()
-            mod = nsx_svc
-        elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
-            notifier = self._setup_nsx_dhcp_metadata()
-            self._setup_rpc_dhcp_metadata(notifier=notifier)
-            mod = combined
-        else:
-            error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode
-            LOG.error(error)
-            raise nsx_exc.NsxPluginException(err_msg=error)
-        self.handle_network_dhcp_access_delegate = (
-            mod.handle_network_dhcp_access
-        )
-        self.handle_port_dhcp_access_delegate = (
-            mod.handle_port_dhcp_access
-        )
-        self.handle_port_metadata_access_delegate = (
-            mod.handle_port_metadata_access
-        )
-        self.handle_metadata_access_delegate = (
-            mod.handle_router_metadata_access
-        )
-
-    def _setup_rpc_dhcp_metadata(self, notifier=None):
-        self.topic = topics.PLUGIN
-        self.conn = n_rpc.create_connection(new=True)
-        self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
-                          agents_db.AgentExtRpcCallback(),
-                          metadata_rpc.MetadataRpcCallback()]
-        self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
-        self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
-            notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
-        self.conn.consume_in_threads()
-        self.network_scheduler = importutils.import_object(
-            cfg.CONF.network_scheduler_driver
-        )
-        self.supported_extension_aliases.extend(
-            ['agent', 'dhcp_agent_scheduler'])
-
-    def _setup_nsx_dhcp_metadata(self):
-        self._check_services_requirements()
-        nsx_svc.register_dhcp_opts(cfg)
-        nsx_svc.register_metadata_opts(cfg)
-        lsnmanager.register_lsn_opts(cfg)
-        lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference)
-        self.lsn_manager = lsn_manager
-        if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
-            notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference,
-                                                  lsn_manager)
-            self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier
-            # In agentless mode, ports whose owner is DHCP need to
-            # be special cased; so add it to the list of special
-            # owners list
-            if const.DEVICE_OWNER_DHCP not in self.port_special_owners:
-                self.port_special_owners.append(const.DEVICE_OWNER_DHCP)
-        elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
-            # This becomes ineffective, as all new networks creations
-            # are handled by Logical Services Nodes in NSX
-            cfg.CONF.set_override('network_auto_schedule', False)
-            LOG.warn(_LW('network_auto_schedule has been disabled'))
-            notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
-                                                   lsn_manager)
-            self.supported_extension_aliases.append(lsn.EXT_ALIAS)
-            # Add the capability to migrate dhcp and metadata services over
-            self.migration_manager = (
-                migration.MigrationManager(
-                    self.safe_reference, lsn_manager, notifier))
-        return notifier
-
-    def _init_extensions(self):
-        extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler')
-        for ext in extensions:
-            if ext in self.supported_extension_aliases:
-                self.supported_extension_aliases.remove(ext)
-
-    def _check_services_requirements(self):
-        try:
-            error = None
-            nsx_svc.check_services_requirements(self.cluster)
-        except nsx_exc.InvalidVersion:
-            error = _("Unable to run Neutron with config option '%s', as NSX "
-                      "does not support it") % cfg.CONF.NSX.agent_mode
-        except nsx_exc.ServiceClusterUnavailable:
-            error = _("Unmet dependency for config option "
-                      "'%s'") % cfg.CONF.NSX.agent_mode
-        if error:
-            LOG.exception(error)
-            raise nsx_exc.NsxPluginException(err_msg=error)
-
-    def get_lsn(self, context, network_id, fields=None):
-        report = self.migration_manager.report(context, network_id)
-        return {'network': network_id, 'report': report}
-
-    def create_lsn(self, context, lsn):
-        network_id = lsn['lsn']['network']
-        subnet = self.migration_manager.validate(context, network_id)
-        subnet_id = None if not subnet else subnet['id']
-        self.migration_manager.migrate(context, network_id, subnet)
-        r = self.migration_manager.report(context, network_id, subnet_id)
-        return {'network': network_id, 'report': r}
-
-    def handle_network_dhcp_access(self, context, network, action):
-        self.handle_network_dhcp_access_delegate(self.safe_reference, context,
-                                                 network, action)
-
-    def handle_port_dhcp_access(self, context, port_data, action):
-        self.handle_port_dhcp_access_delegate(self.safe_reference, context,
-                                              port_data, action)
-
-    def handle_port_metadata_access(self, context, port, is_delete=False):
-        self.handle_port_metadata_access_delegate(self.safe_reference, context,
-                                                  port, is_delete)
-
-    def handle_router_metadata_access(self, context,
-                                      router_id, interface=None):
-        self.handle_metadata_access_delegate(self.safe_reference, context,
-                                             router_id, interface)
index 1be3809b5db4885b9aba789875932b2d89b73efb..83cd202803412a03e34f9ac335c253f46191aa6f 100644 (file)
@@ -19,7 +19,6 @@ from oslo_config import cfg
 
 from neutron.api.v2 import attributes
 from neutron.api.v2 import resource_helper
-from neutron.plugins.vmware.common import utils
 
 GATEWAY_RESOURCE_NAME = "network_gateway"
 DEVICE_RESOURCE_NAME = "gateway_device"
@@ -30,6 +29,20 @@ GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-')
 DEVICE_ID_ATTR = 'id'
 IFACE_NAME_ATTR = 'interface_name'
 
+
+# TODO(salv-orlando): This type definition is duplicated into
+# stackforge/vmware-nsx. This temporary duplication should be removed once the
+# plugin decomposition is finished.
+# Allowed network types for the NSX Plugin
+class NetworkTypes(object):
+    """Allowed provider network types for the NSX Plugin."""
+    L3_EXT = 'l3_ext'
+    STT = 'stt'
+    GRE = 'gre'
+    FLAT = 'flat'
+    VLAN = 'vlan'
+    BRIDGE = 'bridge'
+
 # Attribute Map for Network Gateway Resource
 # TODO(salvatore-orlando): add admin state as other neutron resources
 RESOURCE_ATTRIBUTE_MAP = {
@@ -111,11 +124,11 @@ def _validate_connector_type(data, valid_values=None):
         msg = _("A connector type is required to create a gateway device")
         return msg
     connector_types = (valid_values if valid_values else
-                       [utils.NetworkTypes.GRE,
-                        utils.NetworkTypes.STT,
-                        utils.NetworkTypes.BRIDGE,
-                        'ipsec%s' % utils.NetworkTypes.GRE,
-                        'ipsec%s' % utils.NetworkTypes.STT])
+                       [NetworkTypes.GRE,
+                        NetworkTypes.STT,
+                        NetworkTypes.BRIDGE,
+                        'ipsec%s' % NetworkTypes.GRE,
+                        'ipsec%s' % NetworkTypes.STT])
     if data not in connector_types:
         msg = _("Unknown connector type: %s") % data
         return msg
diff --git a/neutron/plugins/vmware/nsx_cluster.py b/neutron/plugins/vmware/nsx_cluster.py
deleted file mode 100644 (file)
index bf0cf37..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.i18n import _LI
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions
-
-LOG = logging.getLogger(__name__)
-DEFAULT_PORT = 443
-# Raise if one of those attributes is not specified
-REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user',
-                       'nsx_password', 'nsx_controllers']
-# Emit a INFO log if one of those attributes is not specified
-IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid']
-# Deprecated attributes
-DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route',
-                         'nvp_user', 'nvp_password', 'nvp_controllers']
-
-
-class NSXCluster(object):
-    """NSX cluster class.
-
-    Encapsulates controller connections and the API client for a NSX cluster.
-
-    Controller-specific parameters, such as timeouts are stored in the
-    elements of the controllers attribute, which are dicts.
-    """
-
-    def __init__(self, **kwargs):
-        self._required_attributes = REQUIRED_ATTRIBUTES[:]
-        self._important_attributes = IMPORTANT_ATTRIBUTES[:]
-        self._deprecated_attributes = {}
-        self._sanity_check(kwargs)
-
-        for opt, val in self._deprecated_attributes.iteritems():
-            LOG.deprecated(_("Attribute '%s' has been deprecated or moved "
-                             "to a new section. See new configuration file "
-                             "for details."), opt)
-            depr_func = getattr(self, '_process_%s' % opt, None)
-            if depr_func:
-                depr_func(val)
-
-        # If everything went according to plan these two lists should be empty
-        if self._required_attributes:
-            raise exceptions.InvalidClusterConfiguration(
-                invalid_attrs=self._required_attributes)
-        if self._important_attributes:
-            LOG.info(_LI("The following cluster attributes were "
-                         "not specified: %s'"), self._important_attributes)
-        # The API client will be explicitly created by users of this class
-        self.api_client = None
-
-    def _sanity_check(self, options):
-        # Iterating this way ensures the conf parameters also
-        # define the structure of this class
-        for arg in cfg.CONF:
-            if arg not in DEPRECATED_ATTRIBUTES:
-                setattr(self, arg, options.get(arg, cfg.CONF.get(arg)))
-                self._process_attribute(arg)
-            elif options.get(arg) is not None:
-                # Process deprecated attributes only if specified
-                self._deprecated_attributes[arg] = options.get(arg)
-
-    def _process_attribute(self, attribute):
-        # Process the attribute only if it's not empty!
-        if getattr(self, attribute, None):
-            if attribute in self._required_attributes:
-                self._required_attributes.remove(attribute)
-            if attribute in self._important_attributes:
-                self._important_attributes.remove(attribute)
-            handler_func = getattr(self, '_process_%s' % attribute, None)
-            if handler_func:
-                handler_func()
-
-    def _process_nsx_controllers(self):
-        # If this raises something is not right, so let it bubble up
-        # TODO(salvatore-orlando): Also validate attribute here
-        for i, ctrl in enumerate(self.nsx_controllers or []):
-            if len(ctrl.split(':')) == 1:
-                self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT)
-
-    def _process_nvp_controllers(self):
-        self.nsx_controllers = self.nvp_controllers
-        self._process_nsx_controllers()
diff --git a/neutron/plugins/vmware/nsxlib/__init__.py b/neutron/plugins/vmware/nsxlib/__init__.py
deleted file mode 100644 (file)
index 6e41c79..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-
-from neutron.common import exceptions as exception
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron import version
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-# Prefix to be used for all NSX API calls
-URI_PREFIX = "/ws.v1"
-NEUTRON_VERSION = version.version_info.release_string()
-
-LOG = log.getLogger(__name__)
-
-
-def _build_uri_path(resource,
-                    resource_id=None,
-                    parent_resource_id=None,
-                    fields=None,
-                    relations=None,
-                    filters=None,
-                    types=None,
-                    is_attachment=False,
-                    extra_action=None):
-    resources = resource.split('/')
-    res_path = resources[0]
-    if resource_id:
-        res_path += "/%s" % resource_id
-    if len(resources) > 1:
-        # There is also a parent resource to account for in the uri
-        res_path = "%s/%s/%s" % (resources[1],
-                                 parent_resource_id,
-                                 res_path)
-    if is_attachment:
-        res_path = "%s/attachment" % res_path
-    elif extra_action:
-        res_path = "%s/%s" % (res_path, extra_action)
-    params = []
-    params.append(fields and "fields=%s" % fields)
-    params.append(relations and "relations=%s" % relations)
-    params.append(types and "types=%s" % types)
-    if filters:
-        sorted_filters = [
-            '%s=%s' % (k, filters[k]) for k in sorted(filters.keys())
-        ]
-        params.extend(sorted_filters)
-    uri_path = "%s/%s" % (URI_PREFIX, res_path)
-    non_empty_params = [x for x in params if x is not None]
-    if non_empty_params:
-        query_string = '&'.join(non_empty_params)
-        if query_string:
-            uri_path += "?%s" % query_string
-    return uri_path
-
-
-def format_exception(etype, e, exception_locals):
-    """Consistent formatting for exceptions.
-
-    :param etype: a string describing the exception type.
-    :param e: the exception.
-    :param execption_locals: calling context local variable dict.
-    :returns: a formatted string.
-    """
-    msg = [_("Error. %(type)s exception: %(exc)s.") %
-           {'type': etype, 'exc': e}]
-    l = dict((k, v) for k, v in exception_locals.iteritems()
-             if k != 'request')
-    msg.append(_("locals=[%s]") % str(l))
-    return ' '.join(msg)
-
-
-def do_request(*args, **kwargs):
-    """Issue a request to the cluster specified in kwargs.
-
-    :param args: a list of positional arguments.
-    :param kwargs: a list of keyworkds arguments.
-    :returns: the result of the operation loaded into a python
-        object or None.
-    """
-    cluster = kwargs["cluster"]
-    try:
-        res = cluster.api_client.request(*args)
-        if res:
-            return jsonutils.loads(res)
-    except api_exc.ResourceNotFound:
-        raise exception.NotFound()
-    except api_exc.ReadOnlyMode:
-        raise nsx_exc.MaintenanceInProgress()
-
-
-def get_single_query_page(path, cluster, page_cursor=None,
-                          page_length=1000, neutron_only=True):
-    params = []
-    if page_cursor:
-        params.append("_page_cursor=%s" % page_cursor)
-    params.append("_page_length=%s" % page_length)
-    # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still
-    # used for marking Neutron entities in order to preserve compatibility
-    if neutron_only:
-        params.append("tag_scope=quantum")
-    query_params = "&".join(params)
-    path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?",
-                       query_params)
-    body = do_request(HTTP_GET, path, cluster=cluster)
-    # Result_count won't be returned if _page_cursor is supplied
-    return body['results'], body.get('page_cursor'), body.get('result_count')
-
-
-def get_all_query_pages(path, cluster):
-    need_more_results = True
-    result_list = []
-    page_cursor = None
-    while need_more_results:
-        results, page_cursor = get_single_query_page(
-            path, cluster, page_cursor)[:2]
-        if not page_cursor:
-            need_more_results = False
-        result_list.extend(results)
-    return result_list
-
-
-def mk_body(**kwargs):
-    """Convenience function creates and dumps dictionary to string.
-
-    :param kwargs: the key/value pirs to be dumped into a json string.
-    :returns: a json string.
-    """
-    return jsonutils.dumps(kwargs, ensure_ascii=False)
diff --git a/neutron/plugins/vmware/nsxlib/l2gateway.py b/neutron/plugins/vmware/nsxlib/l2gateway.py
deleted file mode 100644 (file)
index 5e20d2d..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_serialization import jsonutils
-
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import switch
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-
-GWSERVICE_RESOURCE = "gateway-service"
-TRANSPORTNODE_RESOURCE = "transport-node"
-
-LOG = log.getLogger(__name__)
-
-
-def create_l2_gw_service(cluster, tenant_id, display_name, devices):
-    """Create a NSX Layer-2 Network Gateway Service.
-
-        :param cluster: The target NSX cluster
-        :param tenant_id: Identifier of the Openstack tenant for which
-        the gateway service.
-        :param display_name: Descriptive name of this gateway service
-        :param devices: List of transport node uuids (and network
-        interfaces on them) to use for the network gateway service
-        :raise NsxApiException: if there is a problem while communicating
-        with the NSX controller
-    """
-    # NOTE(salvatore-orlando): This is a little confusing, but device_id in
-    # NSX is actually the identifier a physical interface on the gateway
-    # device, which in the Neutron API is referred as interface_name
-    gateways = [{"transport_node_uuid": device['id'],
-                 "device_id": device['interface_name'],
-                 "type": "L2Gateway"} for device in devices]
-    gwservice_obj = {
-        "display_name": utils.check_and_truncate(display_name),
-        "tags": utils.get_tags(os_tid=tenant_id),
-        "gateways": gateways,
-        "type": "L2GatewayServiceConfig"
-    }
-    return nsxlib.do_request(
-        HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE),
-        jsonutils.dumps(gwservice_obj), cluster=cluster)
-
-
-def plug_l2_gw_service(cluster, lswitch_id, lport_id,
-                       gateway_id, vlan_id=None):
-    """Plug a Layer-2 Gateway Attachment object in a logical port."""
-    att_obj = {'type': 'L2GatewayAttachment',
-               'l2_gateway_service_uuid': gateway_id}
-    if vlan_id:
-        att_obj['vlan_id'] = vlan_id
-    return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj)
-
-
-def get_l2_gw_service(cluster, gateway_id):
-    return nsxlib.do_request(
-        HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE,
-                                         resource_id=gateway_id),
-        cluster=cluster)
-
-
-def get_l2_gw_services(cluster, tenant_id=None,
-                       fields=None, filters=None):
-    actual_filters = dict(filters or {})
-    if tenant_id:
-        actual_filters['tag'] = tenant_id
-        actual_filters['tag_scope'] = 'os_tid'
-    return nsxlib.get_all_query_pages(
-        nsxlib._build_uri_path(GWSERVICE_RESOURCE,
-                               filters=actual_filters),
-        cluster)
-
-
-def update_l2_gw_service(cluster, gateway_id, display_name):
-    # TODO(salvatore-orlando): Allow updates for gateways too
-    gwservice_obj = get_l2_gw_service(cluster, gateway_id)
-    if not display_name:
-        # Nothing to update
-        return gwservice_obj
-    gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
-    return nsxlib.do_request(HTTP_PUT,
-                             nsxlib._build_uri_path(GWSERVICE_RESOURCE,
-                                                    resource_id=gateway_id),
-                             jsonutils.dumps(gwservice_obj), cluster=cluster)
-
-
-def delete_l2_gw_service(cluster, gateway_id):
-    nsxlib.do_request(HTTP_DELETE,
-                      nsxlib._build_uri_path(GWSERVICE_RESOURCE,
-                                             resource_id=gateway_id),
-                      cluster=cluster)
-
-
-def _build_gateway_device_body(tenant_id, display_name, neutron_id,
-                               connector_type, connector_ip,
-                               client_certificate, tz_uuid):
-
-    connector_type_mappings = {
-        utils.NetworkTypes.STT: "STTConnector",
-        utils.NetworkTypes.GRE: "GREConnector",
-        utils.NetworkTypes.BRIDGE: "BridgeConnector",
-        'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT",
-        'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
-    nsx_connector_type = connector_type_mappings.get(connector_type)
-    body = {"display_name": utils.check_and_truncate(display_name),
-            "tags": utils.get_tags(os_tid=tenant_id,
-                                   q_gw_dev_id=neutron_id),
-            "admin_status_enabled": True}
-
-    if connector_ip and nsx_connector_type:
-        body["transport_connectors"] = [
-            {"transport_zone_uuid": tz_uuid,
-             "ip_address": connector_ip,
-             "type": nsx_connector_type}]
-
-    if client_certificate:
-        body["credential"] = {"client_certificate":
-                              {"pem_encoded": client_certificate},
-                              "type": "SecurityCertificateCredential"}
-    return body
-
-
-def create_gateway_device(cluster, tenant_id, display_name, neutron_id,
-                          tz_uuid, connector_type, connector_ip,
-                          client_certificate):
-    body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
-                                      connector_type, connector_ip,
-                                      client_certificate, tz_uuid)
-    try:
-        return nsxlib.do_request(
-            HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE),
-            jsonutils.dumps(body, sort_keys=True), cluster=cluster)
-    except api_exc.InvalidSecurityCertificate:
-        raise nsx_exc.InvalidSecurityCertificate()
-
-
-def update_gateway_device(cluster, gateway_id, tenant_id,
-                          display_name, neutron_id,
-                          tz_uuid, connector_type, connector_ip,
-                          client_certificate):
-    body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
-                                      connector_type, connector_ip,
-                                      client_certificate, tz_uuid)
-    try:
-        return nsxlib.do_request(
-            HTTP_PUT,
-            nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
-                                   resource_id=gateway_id),
-            jsonutils.dumps(body, sort_keys=True), cluster=cluster)
-    except api_exc.InvalidSecurityCertificate:
-        raise nsx_exc.InvalidSecurityCertificate()
-
-
-def delete_gateway_device(cluster, device_uuid):
-    return nsxlib.do_request(HTTP_DELETE,
-                             nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
-                                                    device_uuid),
-                             cluster=cluster)
-
-
-def get_gateway_device_status(cluster, device_uuid):
-    status_res = nsxlib.do_request(HTTP_GET,
-                                   nsxlib._build_uri_path(
-                                       TRANSPORTNODE_RESOURCE,
-                                       device_uuid,
-                                       extra_action='status'),
-                                   cluster=cluster)
-    # Returns the connection status
-    return status_res['connection']['connected']
-
-
-def get_gateway_devices_status(cluster, tenant_id=None):
-    if tenant_id:
-        gw_device_query_path = nsxlib._build_uri_path(
-            TRANSPORTNODE_RESOURCE,
-            fields="uuid,tags",
-            relations="TransportNodeStatus",
-            filters={'tag': tenant_id,
-                     'tag_scope': 'os_tid'})
-    else:
-        gw_device_query_path = nsxlib._build_uri_path(
-            TRANSPORTNODE_RESOURCE,
-            fields="uuid,tags",
-            relations="TransportNodeStatus")
-
-    response = nsxlib.get_all_query_pages(gw_device_query_path, cluster)
-    results = {}
-    for item in response:
-        results[item['uuid']] = (item['_relations']['TransportNodeStatus']
-                                 ['connection']['connected'])
-    return results
diff --git a/neutron/plugins/vmware/nsxlib/lsn.py b/neutron/plugins/vmware/nsxlib/lsn.py
deleted file mode 100644 (file)
index 2efd2e3..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-
-from neutron.common import exceptions as exception
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-
-SERVICECLUSTER_RESOURCE = "edge-cluster"
-LSERVICESNODE_RESOURCE = "lservices-node"
-LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE
-SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret']
-
-LOG = log.getLogger(__name__)
-
-
-def service_cluster_exists(cluster, svc_cluster_id):
-    exists = False
-    try:
-        exists = (
-            svc_cluster_id and
-            nsxlib.do_request(HTTP_GET,
-                              nsxlib._build_uri_path(
-                                  SERVICECLUSTER_RESOURCE,
-                                  resource_id=svc_cluster_id),
-                              cluster=cluster) is not None)
-    except exception.NotFound:
-        pass
-    return exists
-
-
-def lsn_for_network_create(cluster, network_id):
-    lsn_obj = {
-        "edge_cluster_uuid": cluster.default_service_cluster_uuid,
-        "tags": utils.get_tags(n_network_id=network_id)
-    }
-    return nsxlib.do_request(HTTP_POST,
-                             nsxlib._build_uri_path(LSERVICESNODE_RESOURCE),
-                             jsonutils.dumps(lsn_obj),
-                             cluster=cluster)["uuid"]
-
-
-def lsn_for_network_get(cluster, network_id):
-    filters = {"tag": network_id, "tag_scope": "n_network_id"}
-    results = nsxlib.do_request(HTTP_GET,
-                                nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
-                                                       fields="uuid",
-                                                       filters=filters),
-                                cluster=cluster)['results']
-    if not results:
-        raise exception.NotFound()
-    elif len(results) == 1:
-        return results[0]['uuid']
-
-
-def lsn_delete(cluster, lsn_id):
-    nsxlib.do_request(HTTP_DELETE,
-                      nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
-                                             resource_id=lsn_id),
-                      cluster=cluster)
-
-
-def lsn_port_host_entries_update(
-    cluster, lsn_id, lsn_port_id, conf, hosts_data):
-    hosts_obj = {'hosts': hosts_data}
-    nsxlib.do_request(HTTP_PUT,
-                      nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                             parent_resource_id=lsn_id,
-                                             resource_id=lsn_port_id,
-                                             extra_action=conf),
-                      jsonutils.dumps(hosts_obj),
-                      cluster=cluster)
-
-
-def lsn_port_create(cluster, lsn_id, port_data):
-    port_obj = {
-        "ip_address": port_data["ip_address"],
-        "mac_address": port_data["mac_address"],
-        "tags": utils.get_tags(n_mac_address=port_data["mac_address"],
-                               n_subnet_id=port_data["subnet_id"]),
-        "type": "LogicalServicesNodePortConfig",
-    }
-    return nsxlib.do_request(HTTP_POST,
-                             nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                                    parent_resource_id=lsn_id),
-                             jsonutils.dumps(port_obj),
-                             cluster=cluster)["uuid"]
-
-
-def lsn_port_delete(cluster, lsn_id, lsn_port_id):
-    return nsxlib.do_request(HTTP_DELETE,
-                             nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                                    parent_resource_id=lsn_id,
-                                                    resource_id=lsn_port_id),
-                             cluster=cluster)
-
-
-def _lsn_port_get(cluster, lsn_id, filters):
-    results = nsxlib.do_request(HTTP_GET,
-                                nsxlib._build_uri_path(
-                                    LSERVICESNODEPORT_RESOURCE,
-                                    parent_resource_id=lsn_id,
-                                    fields="uuid",
-                                    filters=filters),
-                                cluster=cluster)['results']
-    if not results:
-        raise exception.NotFound()
-    elif len(results) == 1:
-        return results[0]['uuid']
-
-
-def lsn_port_by_mac_get(cluster, lsn_id, mac_address):
-    filters = {"tag": mac_address, "tag_scope": "n_mac_address"}
-    return _lsn_port_get(cluster, lsn_id, filters)
-
-
-def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id):
-    filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"}
-    return _lsn_port_get(cluster, lsn_id, filters)
-
-
-def lsn_port_info_get(cluster, lsn_id, lsn_port_id):
-    result = nsxlib.do_request(HTTP_GET,
-                               nsxlib._build_uri_path(
-                                   LSERVICESNODEPORT_RESOURCE,
-                                   parent_resource_id=lsn_id,
-                                   resource_id=lsn_port_id),
-                               cluster=cluster)
-    for tag in result['tags']:
-        if tag['scope'] == 'n_subnet_id':
-            result['subnet_id'] = tag['tag']
-            break
-    return result
-
-
-def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id):
-    patch_obj = {
-        "type": "PatchAttachment",
-        "peer_port_uuid": lswitch_port_id
-    }
-    try:
-        nsxlib.do_request(HTTP_PUT,
-                          nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                                 parent_resource_id=lsn_id,
-                                                 resource_id=lsn_port_id,
-                                                 is_attachment=True),
-                          jsonutils.dumps(patch_obj),
-                          cluster=cluster)
-    except api_exc.Conflict:
-        # This restriction might be lifted at some point
-        msg = (_("Attempt to plug Logical Services Node %(lsn)s into "
-                 "network with port %(port)s failed. PatchAttachment "
-                 "already exists with another port") %
-               {'lsn': lsn_id, 'port': lswitch_port_id})
-        LOG.exception(msg)
-        raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id)
-
-
-def _lsn_configure_action(
-    cluster, lsn_id, action, is_enabled, obj):
-    lsn_obj = {"enabled": is_enabled}
-    lsn_obj.update(obj)
-    nsxlib.do_request(HTTP_PUT,
-                      nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
-                                             resource_id=lsn_id,
-                                             extra_action=action),
-                      jsonutils.dumps(lsn_obj),
-                      cluster=cluster)
-
-
-def _lsn_port_configure_action(
-    cluster, lsn_id, lsn_port_id, action, is_enabled, obj):
-    nsxlib.do_request(HTTP_PUT,
-                      nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
-                                             resource_id=lsn_id,
-                                             extra_action=action),
-                      jsonutils.dumps({"enabled": is_enabled}),
-                      cluster=cluster)
-    nsxlib.do_request(HTTP_PUT,
-                      nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                             parent_resource_id=lsn_id,
-                                             resource_id=lsn_port_id,
-                                             extra_action=action),
-                      jsonutils.dumps(obj),
-                      cluster=cluster)
-
-
-def _get_opts(name, value):
-    return {"name": name, "value": str(value)}
-
-
-def lsn_port_dhcp_configure(
-        cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None):
-    dhcp_options = dhcp_options or {}
-    opts = [_get_opts(key, val) for key, val in dhcp_options.iteritems()]
-    dhcp_obj = {'options': opts}
-    _lsn_port_configure_action(
-        cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj)
-
-
-def lsn_metadata_configure(
-        cluster, lsn_id, is_enabled=True, metadata_info=None):
-    meta_obj = {
-        'metadata_server_ip': metadata_info['metadata_server_ip'],
-        'metadata_server_port': metadata_info['metadata_server_port'],
-    }
-    if metadata_info:
-        opts = [
-            _get_opts(opt, metadata_info[opt])
-            for opt in SUPPORTED_METADATA_OPTIONS
-            if metadata_info.get(opt)
-        ]
-        if opts:
-            meta_obj["options"] = opts
-    _lsn_configure_action(
-        cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj)
-
-
-def _lsn_port_host_action(
-    cluster, lsn_id, lsn_port_id, host_obj, extra_action, action):
-    nsxlib.do_request(HTTP_POST,
-                      nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
-                                             parent_resource_id=lsn_id,
-                                             resource_id=lsn_port_id,
-                                             extra_action=extra_action,
-                                             filters={"action": action}),
-                      jsonutils.dumps(host_obj),
-                      cluster=cluster)
-
-
-def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data):
-    _lsn_port_host_action(
-        cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host')
-
-
-def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data):
-    _lsn_port_host_action(
-        cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host')
-
-
-def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data):
-    _lsn_port_host_action(
-        cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host')
-
-
-def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data):
-    _lsn_port_host_action(cluster, lsn_id, lsn_port_id,
-                          host_data, 'metadata-proxy', 'remove_host')
diff --git a/neutron/plugins/vmware/nsxlib/queue.py b/neutron/plugins/vmware/nsxlib/queue.py
deleted file mode 100644 (file)
index 0a291e4..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as exception
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-
-LQUEUE_RESOURCE = "lqueue"
-
-LOG = log.getLogger(__name__)
-
-
-def create_lqueue(cluster, queue_data):
-    params = {
-        'name': 'display_name',
-        'qos_marking': 'qos_marking',
-        'min': 'min_bandwidth_rate',
-        'max': 'max_bandwidth_rate',
-        'dscp': 'dscp'
-    }
-    queue_obj = dict(
-        (nsx_name, queue_data.get(api_name))
-        for api_name, nsx_name in params.iteritems()
-        if attr.is_attr_set(queue_data.get(api_name))
-    )
-    if 'display_name' in queue_obj:
-        queue_obj['display_name'] = utils.check_and_truncate(
-            queue_obj['display_name'])
-
-    queue_obj['tags'] = utils.get_tags()
-    try:
-        return nsxlib.do_request(HTTP_POST,
-                                 nsxlib._build_uri_path(LQUEUE_RESOURCE),
-                                 jsonutils.dumps(queue_obj),
-                                 cluster=cluster)['uuid']
-    except api_exc.NsxApiException:
-        # FIXME(salv-orlando): This should not raise NeutronException
-        with excutils.save_and_reraise_exception():
-            raise exception.NeutronException()
-
-
-def delete_lqueue(cluster, queue_id):
-    try:
-        nsxlib.do_request(HTTP_DELETE,
-                          nsxlib._build_uri_path(LQUEUE_RESOURCE,
-                                                 resource_id=queue_id),
-                          cluster=cluster)
-    except Exception:
-        # FIXME(salv-orlando): This should not raise NeutronException
-        with excutils.save_and_reraise_exception():
-            raise exception.NeutronException()
diff --git a/neutron/plugins/vmware/nsxlib/router.py b/neutron/plugins/vmware/nsxlib/router.py
deleted file mode 100644 (file)
index 0a39e7a..0000000
+++ /dev/null
@@ -1,707 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-
-from neutron.common import exceptions as exception
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import switch
-from neutron.plugins.vmware.nsxlib import versioning
-
-# @versioning.versioned decorator makes the apparent function body
-# totally unrelated to the real function.  This confuses pylint :(
-# pylint: disable=assignment-from-no-return
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-
-LROUTER_RESOURCE = "lrouter"
-LROUTER_RESOURCE = "lrouter"
-LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE
-LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE
-LROUTERNAT_RESOURCE = "nat/lrouter"
-# Constants for NAT rules
-MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
-              "destination_port_min", "source_ip_addresses",
-              "source_port_max", "source_port_min", "protocol"]
-
-LOG = log.getLogger(__name__)
-
-
-def _prepare_lrouter_body(name, neutron_router_id, tenant_id,
-                          router_type, distributed=None, **kwargs):
-    body = {
-        "display_name": utils.check_and_truncate(name),
-        "tags": utils.get_tags(os_tid=tenant_id,
-                               q_router_id=neutron_router_id),
-        "routing_config": {
-            "type": router_type
-        },
-        "type": "LogicalRouterConfig",
-        "replication_mode": cfg.CONF.NSX.replication_mode,
-    }
-    # add the distributed key only if not None (ie: True or False)
-    if distributed is not None:
-        body['distributed'] = distributed
-    if kwargs:
-        body["routing_config"].update(kwargs)
-    return body
-
-
-def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
-                                     display_name, nexthop, distributed=None):
-    implicit_routing_config = {
-        "default_route_next_hop": {
-            "gateway_ip_address": nexthop,
-            "type": "RouterNextHop"
-        },
-    }
-    lrouter_obj = _prepare_lrouter_body(
-        display_name, neutron_router_id, tenant_id,
-        "SingleDefaultRouteImplicitRoutingConfig",
-        distributed=distributed,
-        **implicit_routing_config)
-    return nsxlib.do_request(HTTP_POST,
-                             nsxlib._build_uri_path(LROUTER_RESOURCE),
-                             jsonutils.dumps(lrouter_obj), cluster=cluster)
-
-
-def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
-                                    display_name, nexthop):
-    """Create a NSX logical router on the specified cluster.
-
-        :param cluster: The target NSX cluster
-        :param tenant_id: Identifier of the Openstack tenant for which
-        the logical router is being created
-        :param display_name: Descriptive name of this logical router
-        :param nexthop: External gateway IP address for the logical router
-        :raise NsxApiException: if there is a problem while communicating
-        with the NSX controller
-    """
-    return _create_implicit_routing_lrouter(
-        cluster, neutron_router_id, tenant_id, display_name, nexthop)
-
-
-def create_implicit_routing_lrouter_with_distribution(
-    cluster, neutron_router_id, tenant_id, display_name,
-    nexthop, distributed=None):
-    """Create a NSX logical router on the specified cluster.
-
-    This function also allows for creating distributed lrouters
-    :param cluster: The target NSX cluster
-    :param tenant_id: Identifier of the Openstack tenant for which
-    the logical router is being created
-    :param display_name: Descriptive name of this logical router
-    :param nexthop: External gateway IP address for the logical router
-    :param distributed: True for distributed logical routers
-    :raise NsxApiException: if there is a problem while communicating
-    with the NSX controller
-    """
-    return _create_implicit_routing_lrouter(
-        cluster, neutron_router_id, tenant_id,
-        display_name, nexthop, distributed)
-
-
-def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
-                                    display_name, nexthop, distributed=None):
-    lrouter_obj = _prepare_lrouter_body(
-        display_name, neutron_router_id, tenant_id,
-        "RoutingTableRoutingConfig", distributed=distributed)
-    router = nsxlib.do_request(HTTP_POST,
-                               nsxlib._build_uri_path(LROUTER_RESOURCE),
-                               jsonutils.dumps(lrouter_obj), cluster=cluster)
-    default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop}
-    create_explicit_route_lrouter(cluster, router['uuid'], default_gw)
-    return router
-
-
-def delete_lrouter(cluster, lrouter_id):
-    nsxlib.do_request(HTTP_DELETE,
-                      nsxlib._build_uri_path(LROUTER_RESOURCE,
-                                             resource_id=lrouter_id),
-                      cluster=cluster)
-
-
-def get_lrouter(cluster, lrouter_id):
-    return nsxlib.do_request(HTTP_GET,
-                             nsxlib._build_uri_path(
-                                 LROUTER_RESOURCE,
-                                 resource_id=lrouter_id,
-                                 relations='LogicalRouterStatus'),
-                             cluster=cluster)
-
-
-def query_lrouters(cluster, fields=None, filters=None):
-    return nsxlib.get_all_query_pages(
-        nsxlib._build_uri_path(LROUTER_RESOURCE,
-                               fields=fields,
-                               relations='LogicalRouterStatus',
-                               filters=filters),
-        cluster)
-
-
-def get_lrouters(cluster, tenant_id, fields=None, filters=None):
-    # FIXME(salv-orlando): Fields parameter is ignored in this routine
-    actual_filters = {}
-    if filters:
-        actual_filters.update(filters)
-    if tenant_id:
-        actual_filters['tag'] = tenant_id
-        actual_filters['tag_scope'] = 'os_tid'
-    lrouter_fields = "uuid,display_name,fabric_status,tags"
-    return query_lrouters(cluster, lrouter_fields, actual_filters)
-
-
-def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop):
-    lrouter_obj = get_lrouter(cluster, r_id)
-    if not display_name and not nexthop:
-        # Nothing to update
-        return lrouter_obj
-    # It seems that this is faster than the doing an if on display_name
-    lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or
-                                   lrouter_obj["display_name"])
-    if nexthop:
-        nh_element = lrouter_obj["routing_config"].get(
-            "default_route_next_hop")
-        if nh_element:
-            nh_element["gateway_ip_address"] = nexthop
-    return nsxlib.do_request(HTTP_PUT,
-                             nsxlib._build_uri_path(LROUTER_RESOURCE,
-                                                    resource_id=r_id),
-                             jsonutils.dumps(lrouter_obj),
-                             cluster=cluster)
-
-
-def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'):
-    static_filter = {'protocol': protocol_type}
-    existing_routes = nsxlib.do_request(
-        HTTP_GET,
-        nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
-                               filters=static_filter,
-                               fields="*",
-                               parent_resource_id=router_id),
-        cluster=cluster)['results']
-    return existing_routes
-
-
-def delete_explicit_route_lrouter(cluster, router_id, route_id):
-    nsxlib.do_request(HTTP_DELETE,
-                      nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
-                                             resource_id=route_id,
-                                             parent_resource_id=router_id),
-                      cluster=cluster)
-
-
-def create_explicit_route_lrouter(cluster, router_id, route):
-    next_hop_ip = route.get("nexthop") or route.get("next_hop_ip")
-    prefix = route.get("destination") or route.get("prefix")
-    uuid = nsxlib.do_request(
-        HTTP_POST,
-        nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
-                               parent_resource_id=router_id),
-        jsonutils.dumps({
-            "action": "accept",
-            "next_hop_ip": next_hop_ip,
-            "prefix": prefix,
-            "protocol": "static"
-        }),
-        cluster=cluster)['uuid']
-    return uuid
-
-
-def update_explicit_routes_lrouter(cluster, router_id, routes):
-    # Update in bulk: delete them all, and add the ones specified
-    # but keep track of what is been modified to allow roll-backs
-    # in case of failures
-    nsx_routes = get_explicit_routes_lrouter(cluster, router_id)
-    try:
-        deleted_routes = []
-        added_routes = []
-        # omit the default route (0.0.0.0/0) from the processing;
-        # this must be handled through the nexthop for the router
-        for route in nsx_routes:
-            prefix = route.get("destination") or route.get("prefix")
-            if prefix != '0.0.0.0/0':
-                delete_explicit_route_lrouter(cluster,
-                                              router_id,
-                                              route['uuid'])
-                deleted_routes.append(route)
-        for route in routes:
-            prefix = route.get("destination") or route.get("prefix")
-            if prefix != '0.0.0.0/0':
-                uuid = create_explicit_route_lrouter(cluster,
-                                                     router_id, route)
-                added_routes.append(uuid)
-    except api_exc.NsxApiException:
-        LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
-                          'router %(router_id)s'),
-                      {'routes': routes, 'router_id': router_id})
-        # Roll back to keep NSX in consistent state
-        with excutils.save_and_reraise_exception():
-            if nsx_routes:
-                if deleted_routes:
-                    for route in deleted_routes:
-                        create_explicit_route_lrouter(cluster,
-                                                      router_id, route)
-                if added_routes:
-                    for route_id in added_routes:
-                        delete_explicit_route_lrouter(cluster,
-                                                      router_id, route_id)
-    return nsx_routes
-
-
-def get_default_route_explicit_routing_lrouter_v33(cluster, router_id):
-    static_filter = {"protocol": "static",
-                     "prefix": "0.0.0.0/0"}
-    default_route = nsxlib.do_request(
-        HTTP_GET,
-        nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
-                               filters=static_filter,
-                               fields="*",
-                               parent_resource_id=router_id),
-        cluster=cluster)["results"][0]
-    return default_route
-
-
-def get_default_route_explicit_routing_lrouter_v32(cluster, router_id):
-    # Scan all routes because 3.2 does not support query by prefix
-    all_routes = get_explicit_routes_lrouter(cluster, router_id)
-    for route in all_routes:
-        if route['prefix'] == '0.0.0.0/0':
-            return route
-
-
-def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop):
-    default_route = get_default_route_explicit_routing_lrouter(cluster,
-                                                               router_id)
-    if next_hop != default_route["next_hop_ip"]:
-        new_default_route = {"action": "accept",
-                             "next_hop_ip": next_hop,
-                             "prefix": "0.0.0.0/0",
-                             "protocol": "static"}
-        nsxlib.do_request(HTTP_PUT,
-                          nsxlib._build_uri_path(
-                              LROUTERRIB_RESOURCE,
-                              resource_id=default_route['uuid'],
-                              parent_resource_id=router_id),
-                          jsonutils.dumps(new_default_route),
-                          cluster=cluster)
-
-
-def update_explicit_routing_lrouter(cluster, router_id,
-                                    display_name, next_hop, routes=None):
-    update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop)
-    if next_hop:
-        update_default_gw_explicit_routing_lrouter(cluster,
-                                                   router_id, next_hop)
-    if routes is not None:
-        return update_explicit_routes_lrouter(cluster, router_id, routes)
-
-
-def query_lrouter_lports(cluster, lr_uuid, fields="*",
-                         filters=None, relations=None):
-    uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
-                                 parent_resource_id=lr_uuid,
-                                 fields=fields, filters=filters,
-                                 relations=relations)
-    return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
-
-
-def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
-                        display_name, admin_status_enabled, ip_addresses,
-                        mac_address=None):
-    """Creates a logical port on the assigned logical router."""
-    lport_obj = dict(
-        admin_status_enabled=admin_status_enabled,
-        display_name=display_name,
-        tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
-        ip_addresses=ip_addresses,
-        type="LogicalRouterPortConfig"
-    )
-    # Only add the mac_address to lport_obj if present. This is because
-    # when creating the fake_ext_gw there is no mac_address present.
-    if mac_address:
-        lport_obj['mac_address'] = mac_address
-    path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
-                                  parent_resource_id=lrouter_uuid)
-    result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
-                               cluster=cluster)
-
-    LOG.debug("Created logical port %(lport_uuid)s on "
-              "logical router %(lrouter_uuid)s",
-              {'lport_uuid': result['uuid'],
-               'lrouter_uuid': lrouter_uuid})
-    return result
-
-
-def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
-                        tenant_id, neutron_port_id, display_name,
-                        admin_status_enabled, ip_addresses):
-    """Updates a logical port on the assigned logical router."""
-    lport_obj = dict(
-        admin_status_enabled=admin_status_enabled,
-        display_name=display_name,
-        tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
-        ip_addresses=ip_addresses,
-        type="LogicalRouterPortConfig"
-    )
-    # Do not pass null items to NSX
-    for key in lport_obj.keys():
-        if lport_obj[key] is None:
-            del lport_obj[key]
-    path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
-                                  lrouter_port_uuid,
-                                  parent_resource_id=lrouter_uuid)
-    result = nsxlib.do_request(HTTP_PUT, path,
-                               jsonutils.dumps(lport_obj),
-                               cluster=cluster)
-    LOG.debug("Updated logical port %(lport_uuid)s on "
-              "logical router %(lrouter_uuid)s",
-              {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
-    return result
-
-
-def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
-    """Creates a logical port on the assigned logical router."""
-    path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
-                                  lrouter_uuid)
-    nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
-    LOG.debug("Delete logical router port %(lport_uuid)s on "
-              "logical router %(lrouter_uuid)s",
-              {'lport_uuid': lport_uuid,
-               'lrouter_uuid': lrouter_uuid})
-
-
-def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
-    nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid,
-                               relations="LogicalPortAttachment")
-    relations = nsx_port.get('_relations')
-    if relations:
-        att_data = relations.get('LogicalPortAttachment')
-        if att_data:
-            lrp_uuid = att_data.get('peer_port_uuid')
-            if lrp_uuid:
-                delete_router_lport(cluster, lr_uuid, lrp_uuid)
-
-
-def find_router_gw_port(context, cluster, router_id):
-    """Retrieves the external gateway port for a NSX logical router."""
-
-    # Find the uuid of nsx ext gw logical router port
-    # TODO(salvatore-orlando): Consider storing it in Neutron DB
-    results = query_lrouter_lports(
-        cluster, router_id,
-        relations="LogicalPortAttachment")
-    for lport in results:
-        if '_relations' in lport:
-            attachment = lport['_relations'].get('LogicalPortAttachment')
-            if attachment and attachment.get('type') == 'L3GatewayAttachment':
-                return lport
-
-
-def plug_router_port_attachment(cluster, router_id, port_id,
-                                attachment_uuid, nsx_attachment_type,
-                                attachment_vlan=None):
-    """Attach a router port to the given attachment.
-
-    Current attachment types:
-       - PatchAttachment [-> logical switch port uuid]
-       - L3GatewayAttachment [-> L3GatewayService uuid]
-    For the latter attachment type a VLAN ID can be specified as well.
-    """
-    uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
-                                 is_attachment=True)
-    attach_obj = {}
-    attach_obj["type"] = nsx_attachment_type
-    if nsx_attachment_type == "PatchAttachment":
-        attach_obj["peer_port_uuid"] = attachment_uuid
-    elif nsx_attachment_type == "L3GatewayAttachment":
-        attach_obj["l3_gateway_service_uuid"] = attachment_uuid
-        if attachment_vlan:
-            attach_obj['vlan_id'] = attachment_vlan
-    else:
-        raise nsx_exc.InvalidAttachmentType(
-            attachment_type=nsx_attachment_type)
-    return nsxlib.do_request(
-        HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster)
-
-
-def _create_nat_match_obj(**kwargs):
-    nat_match_obj = {'ethertype': 'IPv4'}
-    delta = set(kwargs.keys()) - set(MATCH_KEYS)
-    if delta:
-        raise Exception(_("Invalid keys for NAT match: %s"), delta)
-    nat_match_obj.update(kwargs)
-    return nat_match_obj
-
-
-def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
-    LOG.debug("Creating NAT rule: %s", nat_rule_obj)
-    uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
-                                 parent_resource_id=router_id)
-    return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
-                             cluster=cluster)
-
-
-def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
-    return {"to_source_ip_address_min": min_src_ip,
-            "to_source_ip_address_max": max_src_ip,
-            "type": "SourceNatRule",
-            "match": nat_match_obj}
-
-
-def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
-    LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
-                 "in this version of the NSX platform"))
-
-
-def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
-    LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
-                 "in this version of the NSX platform"))
-
-
-def create_lrouter_snat_rule_v2(cluster, router_id,
-                                min_src_ip, max_src_ip, match_criteria=None):
-
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip,
-                                to_dst_port=None, match_criteria=None):
-
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = {
-        "to_destination_ip_address_min": dst_ip,
-        "to_destination_ip_address_max": dst_ip,
-        "type": "DestinationNatRule",
-        "match": nat_match_obj
-    }
-    if to_dst_port:
-        nat_rule_obj['to_destination_port'] = to_dst_port
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None,
-                                  match_criteria=None):
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = {
-        "type": "NoSourceNatRule",
-        "match": nat_match_obj
-    }
-    if order:
-        nat_rule_obj['order'] = order
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None,
-                                  match_criteria=None):
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = {
-        "type": "NoDestinationNatRule",
-        "match": nat_match_obj
-    }
-    if order:
-        nat_rule_obj['order'] = order
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip,
-                                order=None, match_criteria=None):
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
-    if order:
-        nat_rule_obj['order'] = order
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None,
-                                order=None, match_criteria=None):
-
-    nat_match_obj = _create_nat_match_obj(**match_criteria)
-    nat_rule_obj = {
-        "to_destination_ip_address": dst_ip,
-        "type": "DestinationNatRule",
-        "match": nat_match_obj
-    }
-    if to_dst_port:
-        nat_rule_obj['to_destination_port'] = to_dst_port
-    if order:
-        nat_rule_obj['order'] = order
-    return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
-
-
-def delete_nat_rules_by_match(cluster, router_id, rule_type,
-                              max_num_expected,
-                              min_num_expected=0,
-                              raise_on_len_mismatch=True,
-                              **kwargs):
-    # remove nat rules
-    nat_rules = query_nat_rules(cluster, router_id)
-    to_delete_ids = []
-    for r in nat_rules:
-        if (r['type'] != rule_type):
-            continue
-
-        for key, value in kwargs.iteritems():
-            if not (key in r['match'] and r['match'][key] == value):
-                break
-        else:
-            to_delete_ids.append(r['uuid'])
-    num_rules_to_delete = len(to_delete_ids)
-    if (num_rules_to_delete < min_num_expected or
-        num_rules_to_delete > max_num_expected):
-        if raise_on_len_mismatch:
-            raise nsx_exc.NatRuleMismatch(actual_rules=num_rules_to_delete,
-                                          min_rules=min_num_expected,
-                                          max_rules=max_num_expected)
-        else:
-            LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
-                         "is not in the expected range (%(min_exp_rule_num)d,"
-                         "%(max_exp_rule_num)d)"),
-                     {'actual_rule_num': num_rules_to_delete,
-                      'min_exp_rule_num': min_num_expected,
-                      'max_exp_rule_num': max_num_expected})
-
-    for rule_id in to_delete_ids:
-        delete_router_nat_rule(cluster, router_id, rule_id)
-    # Return number of deleted rules - useful at least for
-    # testing purposes
-    return num_rules_to_delete
-
-
-def delete_router_nat_rule(cluster, router_id, rule_id):
-    uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
-    nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
-
-
-def query_nat_rules(cluster, router_id, fields="*", filters=None):
-    uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
-                                 parent_resource_id=router_id,
-                                 fields=fields, filters=filters)
-    return nsxlib.get_all_query_pages(uri, cluster)
-
-
-# NOTE(salvatore-orlando): The following FIXME applies in general to
-# each operation on list attributes.
-# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
-def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
-                            ips_to_add, ips_to_remove):
-    uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
-    try:
-        port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
-        # TODO(salvatore-orlando): Enforce ips_to_add intersection with
-        # ips_to_remove is empty
-        ip_address_set = set(port['ip_addresses'])
-        ip_address_set = ip_address_set - set(ips_to_remove)
-        ip_address_set = ip_address_set | set(ips_to_add)
-        # Set is not JSON serializable - convert to list
-        port['ip_addresses'] = list(ip_address_set)
-        nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port),
-                          cluster=cluster)
-    except exception.NotFound:
-        # FIXME(salv-orlando):avoid raising different exception
-        data = {'lport_id': lport_id, 'lrouter_id': lrouter_id}
-        msg = (_("Router Port %(lport_id)s not found on router "
-                 "%(lrouter_id)s") % data)
-        LOG.exception(msg)
-        raise nsx_exc.NsxPluginException(err_msg=msg)
-    except api_exc.NsxApiException as e:
-        msg = _("An exception occurred while updating IP addresses on a "
-                "router logical port:%s") % e
-        LOG.exception(msg)
-        raise nsx_exc.NsxPluginException(err_msg=msg)
-
-
-ROUTER_FUNC_DICT = {
-    'create_lrouter': {
-        2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, },
-        3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter,
-            1: create_implicit_routing_lrouter_with_distribution,
-            2: create_explicit_routing_lrouter, }, },
-    'update_lrouter': {
-        2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, },
-        3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter,
-            2: update_explicit_routing_lrouter, }, },
-    'create_lrouter_dnat_rule': {
-        2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, },
-        3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, },
-    'create_lrouter_snat_rule': {
-        2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, },
-        3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, },
-    'create_lrouter_nosnat_rule': {
-        2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, },
-        3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, },
-    'create_lrouter_nodnat_rule': {
-        2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, },
-        3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, },
-    'get_default_route_explicit_routing_lrouter': {
-        3: {versioning.DEFAULT_VERSION:
-            get_default_route_explicit_routing_lrouter_v32,
-            2: get_default_route_explicit_routing_lrouter_v32, }, },
-}
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def create_lrouter(cluster, *args, **kwargs):
-    if kwargs.get('distributed', None):
-        v = cluster.api_client.get_version()
-        if (v.major, v.minor) < (3, 1):
-            raise nsx_exc.InvalidVersion(version=v)
-        return v
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs):
-    pass
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def update_lrouter(cluster, *args, **kwargs):
-    if kwargs.get('routes', None):
-        v = cluster.api_client.get_version()
-        if (v.major, v.minor) < (3, 2):
-            raise nsx_exc.InvalidVersion(version=v)
-        return v
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def create_lrouter_dnat_rule(cluster, *args, **kwargs):
-    pass
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def create_lrouter_snat_rule(cluster, *args, **kwargs):
-    pass
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def create_lrouter_nosnat_rule(cluster, *args, **kwargs):
-    pass
-
-
-@versioning.versioned(ROUTER_FUNC_DICT)
-def create_lrouter_nodnat_rule(cluster, *args, **kwargs):
-    pass
diff --git a/neutron/plugins/vmware/nsxlib/secgroup.py b/neutron/plugins/vmware/nsxlib/secgroup.py
deleted file mode 100644 (file)
index 7c3c3ba..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.i18n import _LW
-from neutron.openstack.common import log
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-
-SECPROF_RESOURCE = "security-profile"
-
-LOG = log.getLogger(__name__)
-
-
-def mk_body(**kwargs):
-    """Convenience function creates and dumps dictionary to string.
-
-    :param kwargs: the key/value pirs to be dumped into a json string.
-    :returns: a json string.
-    """
-    return jsonutils.dumps(kwargs, ensure_ascii=False)
-
-
-def query_security_profiles(cluster, fields=None, filters=None):
-    return nsxlib.get_all_query_pages(
-        nsxlib._build_uri_path(SECPROF_RESOURCE,
-                               fields=fields,
-                               filters=filters),
-        cluster)
-
-
-def create_security_profile(cluster, tenant_id, neutron_id, security_profile):
-    """Create a security profile on the NSX backend.
-
-    :param cluster: a NSX cluster object reference
-    :param tenant_id: identifier of the Neutron tenant
-    :param neutron_id: neutron security group identifier
-    :param security_profile: dictionary with data for
-    configuring the NSX security profile.
-    """
-    path = "/ws.v1/security-profile"
-    # Allow all dhcp responses and all ingress traffic
-    hidden_rules = {'logical_port_egress_rules':
-                    [{'ethertype': 'IPv4',
-                      'protocol': constants.PROTO_NUM_UDP,
-                      'port_range_min': constants.DHCP_RESPONSE_PORT,
-                      'port_range_max': constants.DHCP_RESPONSE_PORT,
-                      'ip_prefix': '0.0.0.0/0'}],
-                    'logical_port_ingress_rules':
-                    [{'ethertype': 'IPv4'},
-                     {'ethertype': 'IPv6'}]}
-    display_name = utils.check_and_truncate(security_profile.get('name'))
-    # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for
-    # historical reasons
-    body = mk_body(
-        tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id),
-        display_name=display_name,
-        logical_port_ingress_rules=(
-            hidden_rules['logical_port_ingress_rules']),
-        logical_port_egress_rules=hidden_rules['logical_port_egress_rules']
-    )
-    rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster)
-    if security_profile.get('name') == 'default':
-        # If security group is default allow ip traffic between
-        # members of the same security profile is allowed and ingress traffic
-        # from the switch
-        rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4',
-                                                'profile_uuid': rsp['uuid']},
-                                               {'ethertype': 'IPv6',
-                                                'profile_uuid': rsp['uuid']}],
-                 'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
-                                                {'ethertype': 'IPv6'}]}
-
-        update_security_group_rules(cluster, rsp['uuid'], rules)
-    LOG.debug("Created Security Profile: %s", rsp)
-    return rsp
-
-
-def update_security_group_rules(cluster, spid, rules):
-    path = "/ws.v1/security-profile/%s" % spid
-
-    # Allow all dhcp responses in
-    rules['logical_port_egress_rules'].append(
-        {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
-         'port_range_min': constants.DHCP_RESPONSE_PORT,
-         'port_range_max': constants.DHCP_RESPONSE_PORT,
-         'ip_prefix': '0.0.0.0/0'})
-    # If there are no ingress rules add bunk rule to drop all ingress traffic
-    if not rules['logical_port_ingress_rules']:
-        rules['logical_port_ingress_rules'].append(
-            {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'})
-    try:
-        body = mk_body(
-            logical_port_ingress_rules=rules['logical_port_ingress_rules'],
-            logical_port_egress_rules=rules['logical_port_egress_rules'])
-        rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster)
-    except exceptions.NotFound as e:
-        LOG.error(nsxlib.format_exception("Unknown", e, locals()))
-        #FIXME(salvatore-orlando): This should not raise NeutronException
-        raise exceptions.NeutronException()
-    LOG.debug("Updated Security Profile: %s", rsp)
-    return rsp
-
-
-def update_security_profile(cluster, spid, name):
-    return nsxlib.do_request(
-        HTTP_PUT,
-        nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid),
-        jsonutils.dumps({"display_name": utils.check_and_truncate(name)}),
-        cluster=cluster)
-
-
-def delete_security_profile(cluster, spid):
-    path = "/ws.v1/security-profile/%s" % spid
-
-    try:
-        nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
-    except exceptions.NotFound:
-        with excutils.save_and_reraise_exception():
-            # This is not necessarily an error condition
-            LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
-                     spid)
diff --git a/neutron/plugins/vmware/nsxlib/switch.py b/neutron/plugins/vmware/nsxlib/switch.py
deleted file mode 100644 (file)
index e2eb758..0000000
+++ /dev/null
@@ -1,398 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-
-from neutron.common import constants
-from neutron.common import exceptions as exception
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-
-LSWITCH_RESOURCE = "lswitch"
-LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
-
-LOG = log.getLogger(__name__)
-
-
-def _configure_extensions(lport_obj, mac_address, fixed_ips,
-                          port_security_enabled, security_profiles,
-                          queue_id, mac_learning_enabled,
-                          allowed_address_pairs):
-    lport_obj['allowed_address_pairs'] = []
-    if port_security_enabled:
-        for fixed_ip in fixed_ips:
-            ip_address = fixed_ip.get('ip_address')
-            if ip_address:
-                lport_obj['allowed_address_pairs'].append(
-                    {'mac_address': mac_address, 'ip_address': ip_address})
-        # add address pair allowing src_ip 0.0.0.0 to leave
-        # this is required for outgoing dhcp request
-        lport_obj["allowed_address_pairs"].append(
-            {"mac_address": mac_address,
-             "ip_address": "0.0.0.0"})
-    lport_obj['security_profiles'] = list(security_profiles or [])
-    lport_obj['queue_uuid'] = queue_id
-    if mac_learning_enabled is not None:
-        lport_obj["mac_learning"] = mac_learning_enabled
-        lport_obj["type"] = "LogicalSwitchPortConfig"
-    for address_pair in list(allowed_address_pairs or []):
-        lport_obj['allowed_address_pairs'].append(
-            {'mac_address': address_pair['mac_address'],
-             'ip_address': address_pair['ip_address']})
-
-
-def get_lswitch_by_id(cluster, lswitch_id):
-    try:
-        lswitch_uri_path = nsxlib._build_uri_path(
-            LSWITCH_RESOURCE, lswitch_id,
-            relations="LogicalSwitchStatus")
-        return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
-    except exception.NotFound:
-        # FIXME(salv-orlando): this should not raise a neutron exception
-        raise exception.NetworkNotFound(net_id=lswitch_id)
-
-
-def get_lswitches(cluster, neutron_net_id):
-
-    def lookup_switches_by_tag():
-        # Fetch extra logical switches
-        lswitch_query_path = nsxlib._build_uri_path(
-            LSWITCH_RESOURCE,
-            fields="uuid,display_name,tags,lport_count",
-            relations="LogicalSwitchStatus",
-            filters={'tag': neutron_net_id,
-                     'tag_scope': 'quantum_net_id'})
-        return nsxlib.get_all_query_pages(lswitch_query_path, cluster)
-
-    lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
-                                              relations="LogicalSwitchStatus")
-    results = []
-    try:
-        ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
-        results.append(ls)
-        for tag in ls['tags']:
-            if (tag['scope'] == "multi_lswitch" and
-                tag['tag'] == "True"):
-                results.extend(lookup_switches_by_tag())
-    except exception.NotFound:
-        # This is legit if the neutron network was created using
-        # a post-Havana version of the plugin
-        results.extend(lookup_switches_by_tag())
-    if results:
-        return results
-    else:
-        raise exception.NetworkNotFound(net_id=neutron_net_id)
-
-
-def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
-                   transport_zones_config,
-                   shared=None,
-                   **kwargs):
-    # The tag scope adopts a slightly different naming convention for
-    # historical reasons
-    lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
-                   "transport_zones": transport_zones_config,
-                   "replication_mode": cfg.CONF.NSX.replication_mode,
-                   "tags": utils.get_tags(os_tid=tenant_id,
-                                          quantum_net_id=neutron_net_id)}
-    # TODO(salv-orlando): Now that we have async status synchronization
-    # this tag is perhaps not needed anymore
-    if shared:
-        lswitch_obj["tags"].append({"tag": "true",
-                                    "scope": "shared"})
-    if "tags" in kwargs:
-        lswitch_obj["tags"].extend(kwargs["tags"])
-    uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
-    lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
-                                cluster=cluster)
-    LOG.debug("Created logical switch: %s", lswitch['uuid'])
-    return lswitch
-
-
-def update_lswitch(cluster, lswitch_id, display_name,
-                   tenant_id=None, **kwargs):
-    uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
-    lswitch_obj = {"display_name": utils.check_and_truncate(display_name)}
-    # NOTE: tag update will not 'merge' existing tags with new ones.
-    tags = []
-    if tenant_id:
-        tags = utils.get_tags(os_tid=tenant_id)
-    # The 'tags' kwarg might existing and be None
-    tags.extend(kwargs.get('tags') or [])
-    if tags:
-        lswitch_obj['tags'] = tags
-    try:
-        return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
-                                 cluster=cluster)
-    except exception.NotFound:
-        LOG.exception(_LE("Network not found."))
-        raise exception.NetworkNotFound(net_id=lswitch_id)
-
-
-def delete_network(cluster, net_id, lswitch_id):
-    delete_networks(cluster, net_id, [lswitch_id])
-
-
-#TODO(salvatore-orlando): Simplify and harmonize
-def delete_networks(cluster, net_id, lswitch_ids):
-    for ls_id in lswitch_ids:
-        path = "/ws.v1/lswitch/%s" % ls_id
-        try:
-            nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
-        except exception.NotFound:
-            LOG.exception(_LE("Network not found."))
-            raise exception.NetworkNotFound(net_id=ls_id)
-
-
-def query_lswitch_lports(cluster, ls_uuid, fields="*",
-                         filters=None, relations=None):
-    # Fix filter for attachments
-    if filters and "attachment" in filters:
-        filters['attachment_vif_uuid'] = filters["attachment"]
-        del filters['attachment']
-    uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
-                                 parent_resource_id=ls_uuid,
-                                 fields=fields,
-                                 filters=filters,
-                                 relations=relations)
-    return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
-
-
-def delete_port(cluster, switch, port):
-    uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
-    try:
-        nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
-    except exception.NotFound:
-        LOG.exception(_LE("Port or Network not found"))
-        raise exception.PortNotFoundOnNetwork(
-            net_id=switch, port_id=port)
-    except api_exc.NsxApiException:
-        raise exception.NeutronException()
-
-
-def get_ports(cluster, networks=None, devices=None, tenants=None):
-    vm_filter_obsolete = ""
-    vm_filter = ""
-    tenant_filter = ""
-    # This is used when calling delete_network. Neutron checks to see if
-    # the network has any ports.
-    if networks:
-        # FIXME (Aaron) If we get more than one network_id this won't work
-        lswitch = networks[0]
-    else:
-        lswitch = "*"
-    if devices:
-        for device_id in devices:
-            vm_filter_obsolete = '&'.join(
-                ["tag_scope=vm_id",
-                 "tag=%s" % utils.device_id_to_vm_id(device_id,
-                                                     obfuscate=True),
-                 vm_filter_obsolete])
-            vm_filter = '&'.join(
-                ["tag_scope=vm_id",
-                 "tag=%s" % utils.device_id_to_vm_id(device_id),
-                 vm_filter])
-    if tenants:
-        for tenant in tenants:
-            tenant_filter = '&'.join(
-                ["tag_scope=os_tid",
-                 "tag=%s" % tenant,
-                 tenant_filter])
-
-    nsx_lports = {}
-    lport_fields_str = ("tags,admin_status_enabled,display_name,"
-                        "fabric_status_up")
-    try:
-        lport_query_path_obsolete = (
-            "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
-            "&relations=LogicalPortStatus" %
-            (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
-        lport_query_path = (
-            "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
-            "&relations=LogicalPortStatus" %
-            (lswitch, lport_fields_str, vm_filter, tenant_filter))
-        try:
-            # NOTE(armando-migliaccio): by querying with obsolete tag first
-            # current deployments won't take the performance hit of a double
-            # call. In release L-** or M-**, we might want to swap the calls
-            # as it's likely that ports with the new tag would outnumber the
-            # ones with the old tag
-            ports = nsxlib.get_all_query_pages(lport_query_path_obsolete,
-                                               cluster)
-            if not ports:
-                ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
-        except exception.NotFound:
-            LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
-            ports = None
-
-        if ports:
-            for port in ports:
-                for tag in port["tags"]:
-                    if tag["scope"] == "q_port_id":
-                        nsx_lports[tag["tag"]] = port
-    except Exception:
-        err_msg = _("Unable to get ports")
-        LOG.exception(err_msg)
-        raise nsx_exc.NsxPluginException(err_msg=err_msg)
-    return nsx_lports
-
-
-def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
-    """Get port by neutron tag.
-
-    Returns the NSX UUID of the logical port with tag q_port_id equal to
-    neutron_port_id or None if the port is not Found.
-    """
-    uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
-                                 parent_resource_id=lswitch_uuid,
-                                 fields='uuid',
-                                 filters={'tag': neutron_port_id,
-                                          'tag_scope': 'q_port_id'})
-    LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
-              "on: '%(lswitch_uuid)s'",
-              {'neutron_port_id': neutron_port_id,
-               'lswitch_uuid': lswitch_uuid})
-    res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
-    num_results = len(res["results"])
-    if num_results >= 1:
-        if num_results > 1:
-            LOG.warn(_LW("Found '%(num_ports)d' ports with "
-                         "q_port_id tag: '%(neutron_port_id)s'. "
-                         "Only 1 was expected."),
-                     {'num_ports': num_results,
-                      'neutron_port_id': neutron_port_id})
-        return res["results"][0]
-
-
-def get_port(cluster, network, port, relations=None):
-    LOG.info(_LI("get_port() %(network)s %(port)s"),
-             {'network': network, 'port': port})
-    uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
-    if relations:
-        uri += "relations=%s" % relations
-    try:
-        return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
-    except exception.NotFound:
-        LOG.exception(_LE("Port or Network not found."))
-        raise exception.PortNotFoundOnNetwork(
-            port_id=port, net_id=network)
-
-
-def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
-                display_name, device_id, admin_status_enabled,
-                mac_address=None, fixed_ips=None, port_security_enabled=None,
-                security_profiles=None, queue_id=None,
-                mac_learning_enabled=None, allowed_address_pairs=None):
-    lport_obj = dict(
-        admin_status_enabled=admin_status_enabled,
-        display_name=utils.check_and_truncate(display_name),
-        tags=utils.get_tags(os_tid=tenant_id,
-                            q_port_id=neutron_port_id,
-                            vm_id=utils.device_id_to_vm_id(device_id)))
-
-    _configure_extensions(lport_obj, mac_address, fixed_ips,
-                          port_security_enabled, security_profiles,
-                          queue_id, mac_learning_enabled,
-                          allowed_address_pairs)
-
-    path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
-    try:
-        result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
-                                   cluster=cluster)
-        LOG.debug("Updated logical port %(result)s "
-                  "on logical switch %(uuid)s",
-                  {'result': result['uuid'], 'uuid': lswitch_uuid})
-        return result
-    except exception.NotFound:
-        LOG.exception(_LE("Port or Network not found."))
-        raise exception.PortNotFoundOnNetwork(
-            port_id=lport_uuid, net_id=lswitch_uuid)
-
-
-def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
-                 display_name, device_id, admin_status_enabled,
-                 mac_address=None, fixed_ips=None, port_security_enabled=None,
-                 security_profiles=None, queue_id=None,
-                 mac_learning_enabled=None, allowed_address_pairs=None):
-    """Creates a logical port on the assigned logical switch."""
-    display_name = utils.check_and_truncate(display_name)
-    lport_obj = dict(
-        admin_status_enabled=admin_status_enabled,
-        display_name=display_name,
-        tags=utils.get_tags(os_tid=tenant_id,
-                            q_port_id=neutron_port_id,
-                            vm_id=utils.device_id_to_vm_id(device_id))
-    )
-
-    _configure_extensions(lport_obj, mac_address, fixed_ips,
-                          port_security_enabled, security_profiles,
-                          queue_id, mac_learning_enabled,
-                          allowed_address_pairs)
-
-    path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
-                                  parent_resource_id=lswitch_uuid)
-    result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
-                               cluster=cluster)
-
-    LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
-              {'result': result['uuid'], 'uuid': lswitch_uuid})
-    return result
-
-
-def get_port_status(cluster, lswitch_id, port_id):
-    """Retrieve the operational status of the port."""
-    try:
-        r = nsxlib.do_request(HTTP_GET,
-                              "/ws.v1/lswitch/%s/lport/%s/status" %
-                              (lswitch_id, port_id), cluster=cluster)
-    except exception.NotFound:
-        LOG.exception(_LE("Port not found."))
-        raise exception.PortNotFoundOnNetwork(
-            port_id=port_id, net_id=lswitch_id)
-    if r['link_status_up'] is True:
-        return constants.PORT_STATUS_ACTIVE
-    else:
-        return constants.PORT_STATUS_DOWN
-
-
-def plug_interface(cluster, lswitch_id, lport_id, att_obj):
-    return nsxlib.do_request(HTTP_PUT,
-                             nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
-                                                    lport_id, lswitch_id,
-                                                    is_attachment=True),
-                             jsonutils.dumps(att_obj),
-                             cluster=cluster)
-
-
-def plug_vif_interface(
-    cluster, lswitch_id, port_id, port_type, attachment=None):
-    """Plug a VIF Attachment object in a logical port."""
-    lport_obj = {}
-    if attachment:
-        lport_obj["vif_uuid"] = attachment
-
-    lport_obj["type"] = port_type
-    return plug_interface(cluster, lswitch_id, port_id, lport_obj)
diff --git a/neutron/plugins/vmware/nsxlib/versioning.py b/neutron/plugins/vmware/nsxlib/versioning.py
deleted file mode 100644 (file)
index 0845a7d..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2014 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import inspect
-
-from neutron.plugins.vmware.api_client import exception
-
-DEFAULT_VERSION = -1
-
-
-def versioned(func_table):
-
-    def versioned_function(wrapped_func):
-        func_name = wrapped_func.__name__
-
-        def dispatch_versioned_function(cluster, *args, **kwargs):
-            # Call the wrapper function, in case we need to
-            # run validation checks regarding versions. It
-            # should return the NSX version
-            v = (wrapped_func(cluster, *args, **kwargs) or
-                 cluster.api_client.get_version())
-            func = get_function_by_version(func_table, func_name, v)
-            func_kwargs = kwargs
-            arg_spec = inspect.getargspec(func)
-            if not arg_spec.keywords and not arg_spec.varargs:
-                # drop args unknown to function from func_args
-                arg_set = set(func_kwargs.keys())
-                for arg in arg_set - set(arg_spec.args):
-                    del func_kwargs[arg]
-            # NOTE(salvatore-orlando): shall we fail here if a required
-            # argument is not passed, or let the called function raise?
-            return func(cluster, *args, **func_kwargs)
-
-        return dispatch_versioned_function
-    return versioned_function
-
-
-def get_function_by_version(func_table, func_name, ver):
-    if ver:
-        if ver.major not in func_table[func_name]:
-            major = max(func_table[func_name].keys())
-            minor = max(func_table[func_name][major].keys())
-            if major > ver.major:
-                raise NotImplementedError(_("Operation may not be supported"))
-        else:
-            major = ver.major
-            minor = ver.minor
-            if ver.minor not in func_table[func_name][major]:
-                minor = DEFAULT_VERSION
-        return func_table[func_name][major][minor]
-    else:
-        msg = _('NSX version is not set. Unable to complete request '
-                'correctly. Check log for NSX communication errors.')
-        raise exception.ServiceUnavailable(message=msg)
index abe346876cc0baa42efa28d1ae5f1bc0b3015387..c4efeb22001987c5f1a4b5cc5ffe329595e9e3ba 100644 (file)
@@ -15,6 +15,9 @@
 #    under the License.
 #
 
-from neutron.plugins.vmware.plugins import base
+from vmware_nsx.neutron.plugins.vmware.plugins import base as nsx_mh
 
-NsxPlugin = base.NsxPluginV2
+NsxMhPlugin = nsx_mh.NsxPluginV2
+# The 'NsxPlugin' name will be deprecated in Liberty
+# and replaced by the 'NsxMhPlugin' name
+NsxPlugin = NsxMhPlugin
diff --git a/neutron/plugins/vmware/plugins/__init__.py b/neutron/plugins/vmware/plugins/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/vmware/plugins/base.py b/neutron/plugins/vmware/plugins/base.py
deleted file mode 100644 (file)
index 853f1bb..0000000
+++ /dev/null
@@ -1,2500 +0,0 @@
-# Copyright 2012 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import uuid
-
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_utils import excutils
-from sqlalchemy import exc as sql_exc
-from sqlalchemy.orm import exc as sa_exc
-import webob.exc
-
-from neutron.api import extensions as neutron_extensions
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import base
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron.common import utils
-from neutron import context as q_context
-from neutron.db import agentschedulers_db
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import extraroute_db
-from neutron.db import l3_db
-from neutron.db import l3_dvr_db
-from neutron.db import l3_gwmode_db
-from neutron.db import models_v2
-from neutron.db import portbindings_db
-from neutron.db import portsecurity_db
-from neutron.db import quota_db  # noqa
-from neutron.db import securitygroups_db
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import external_net as ext_net_extn
-from neutron.extensions import extraroute
-from neutron.extensions import l3
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import portbindings as pbin
-from neutron.extensions import portsecurity as psec
-from neutron.extensions import providernet as pnet
-from neutron.extensions import securitygroup as ext_sg
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.common import constants as plugin_const
-from neutron.plugins import vmware
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import config  # noqa
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware.common import securitygroups as sg_utils
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.common import utils as c_utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
-from neutron.plugins.vmware.dbexts import maclearning as mac_db
-from neutron.plugins.vmware.dbexts import networkgw_db
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware.dbexts import qos_db
-from neutron.plugins.vmware import dhcpmeta_modes
-from neutron.plugins.vmware.extensions import maclearning as mac_ext
-from neutron.plugins.vmware.extensions import networkgw
-from neutron.plugins.vmware.extensions import qos
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import queue as queuelib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-
-LOG = logging.getLogger(__name__)
-
-NSX_NOSNAT_RULES_ORDER = 10
-NSX_FLOATINGIP_NAT_RULES_ORDER = 224
-NSX_EXTGW_NAT_RULES_ORDER = 255
-NSX_DEFAULT_NEXTHOP = '1.1.1.1'
-
-
-class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
-                  agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                  db_base_plugin_v2.NeutronDbPluginV2,
-                  dhcpmeta_modes.DhcpMetadataAccess,
-                  l3_dvr_db.L3_NAT_with_dvr_db_mixin,
-                  external_net_db.External_net_db_mixin,
-                  extraroute_db.ExtraRoute_db_mixin,
-                  l3_gwmode_db.L3_NAT_db_mixin,
-                  mac_db.MacLearningDbMixin,
-                  networkgw_db.NetworkGatewayMixin,
-                  portbindings_db.PortBindingMixin,
-                  portsecurity_db.PortSecurityDbMixin,
-                  qos_db.QoSDbMixin,
-                  securitygroups_db.SecurityGroupDbMixin):
-
-    supported_extension_aliases = ["allowed-address-pairs",
-                                   "binding",
-                                   "dvr",
-                                   "ext-gw-mode",
-                                   "extraroute",
-                                   "mac-learning",
-                                   "multi-provider",
-                                   "network-gateway",
-                                   "nvp-qos",
-                                   "port-security",
-                                   "provider",
-                                   "qos-queue",
-                                   "quotas",
-                                   "external-net",
-                                   "router",
-                                   "security-group"]
-
-    __native_bulk_support = True
-    __native_pagination_support = True
-    __native_sorting_support = True
-
-    # Map nova zones to cluster for easy retrieval
-    novazone_cluster_map = {}
-
-    def __init__(self):
-        super(NsxPluginV2, self).__init__()
-        config.validate_config_options()
-        # TODO(salv-orlando): Replace These dicts with
-        # collections.defaultdict for better handling of default values
-        # Routines for managing logical ports in NSX
-        self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW,
-                                    l3_db.DEVICE_OWNER_ROUTER_INTF]
-        self._port_drivers = {
-            'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
-                       self._nsx_create_ext_gw_port,
-                       l3_db.DEVICE_OWNER_FLOATINGIP:
-                       self._nsx_create_fip_port,
-                       l3_db.DEVICE_OWNER_ROUTER_INTF:
-                       self._nsx_create_router_port,
-                       networkgw_db.DEVICE_OWNER_NET_GW_INTF:
-                       self._nsx_create_l2_gw_port,
-                       'default': self._nsx_create_port},
-            'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
-                       self._nsx_delete_ext_gw_port,
-                       l3_db.DEVICE_OWNER_ROUTER_INTF:
-                       self._nsx_delete_router_port,
-                       l3_db.DEVICE_OWNER_FLOATINGIP:
-                       self._nsx_delete_fip_port,
-                       networkgw_db.DEVICE_OWNER_NET_GW_INTF:
-                       self._nsx_delete_port,
-                       'default': self._nsx_delete_port}
-        }
-
-        neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH])
-        self.nsx_opts = cfg.CONF.NSX
-        self.nsx_sync_opts = cfg.CONF.NSX_SYNC
-        self.cluster = nsx_utils.create_nsx_cluster(
-            cfg.CONF,
-            self.nsx_opts.concurrent_connections,
-            self.nsx_opts.nsx_gen_timeout)
-
-        self.base_binding_dict = {
-            pbin.VIF_TYPE: pbin.VIF_TYPE_OVS,
-            pbin.VIF_DETAILS: {
-                # TODO(rkukura): Replace with new VIF security details
-                pbin.CAP_PORT_FILTER:
-                'security-group' in self.supported_extension_aliases}}
-
-        self._extend_fault_map()
-        self.setup_dhcpmeta_access()
-        # Set this flag to false as the default gateway has not
-        # been yet updated from the config file
-        self._is_default_net_gw_in_sync = False
-        # Create a synchronizer instance for backend sync
-        self._synchronizer = sync.NsxSynchronizer(
-            self.safe_reference, self.cluster,
-            self.nsx_sync_opts.state_sync_interval,
-            self.nsx_sync_opts.min_sync_req_delay,
-            self.nsx_sync_opts.min_chunk_size,
-            self.nsx_sync_opts.max_random_sync_delay)
-        self.start_periodic_dhcp_agent_status_check()
-
-    def _ensure_default_network_gateway(self):
-        if self._is_default_net_gw_in_sync:
-            return
-        # Add the gw in the db as default, and unset any previous default
-        def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
-        try:
-            ctx = q_context.get_admin_context()
-            self._unset_default_network_gateways(ctx)
-            if not def_l2_gw_uuid:
-                return
-            try:
-                def_network_gw = self._get_network_gateway(ctx,
-                                                           def_l2_gw_uuid)
-            except networkgw_db.GatewayNotFound:
-                # Create in DB only - don't go to backend
-                def_gw_data = {'id': def_l2_gw_uuid,
-                               'name': 'default L2 gateway service',
-                               'devices': []}
-                gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_')
-                def_network_gw = super(
-                    NsxPluginV2, self).create_network_gateway(
-                        ctx, {gw_res_name: def_gw_data})
-            # In any case set is as default
-            self._set_default_network_gateway(ctx, def_network_gw['id'])
-            # Ensure this method is executed only once
-            self._is_default_net_gw_in_sync = True
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Unable to process default l2 gw service: "
-                                  "%s"),
-                              def_l2_gw_uuid)
-
-    def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
-        """Build ip_addresses data structure for logical router port.
-
-        No need to perform validation on IPs - this has already been
-        done in the l3_db mixin class.
-        """
-        ip_addresses = []
-        for ip in fixed_ips:
-            if not subnet_ids or (ip['subnet_id'] in subnet_ids):
-                subnet = self._get_subnet(context, ip['subnet_id'])
-                ip_prefix = '%s/%s' % (ip['ip_address'],
-                                       subnet['cidr'].split('/')[1])
-                ip_addresses.append(ip_prefix)
-        return ip_addresses
-
-    def _create_and_attach_router_port(self, cluster, context,
-                                       nsx_router_id, port_data,
-                                       attachment_type, attachment,
-                                       attachment_vlan=None,
-                                       subnet_ids=None):
-        # Use a fake IP address if gateway port is not 'real'
-        ip_addresses = (port_data.get('fake_ext_gw') and
-                        ['0.0.0.0/31'] or
-                        self._build_ip_address_list(context,
-                                                    port_data['fixed_ips'],
-                                                    subnet_ids))
-        try:
-            lrouter_port = routerlib.create_router_lport(
-                cluster, nsx_router_id, port_data.get('tenant_id', 'fake'),
-                port_data.get('id', 'fake'), port_data.get('name', 'fake'),
-                port_data.get('admin_state_up', True), ip_addresses,
-                port_data.get('mac_address'))
-            LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
-        except api_exc.NsxApiException:
-            LOG.exception(_LE("Unable to create port on NSX logical router "
-                              "%s"),
-                          nsx_router_id)
-            raise nsx_exc.NsxPluginException(
-                err_msg=_("Unable to create logical router port for neutron "
-                          "port id %(port_id)s on router %(nsx_router_id)s") %
-                {'port_id': port_data.get('id'),
-                 'nsx_router_id': nsx_router_id})
-        self._update_router_port_attachment(cluster, context, nsx_router_id,
-                                            port_data, lrouter_port['uuid'],
-                                            attachment_type, attachment,
-                                            attachment_vlan)
-        return lrouter_port
-
-    def _update_router_gw_info(self, context, router_id, info):
-        # NOTE(salvatore-orlando): We need to worry about rollback of NSX
-        # configuration in case of failures in the process
-        # Ref. LP bug 1102301
-        router = self._get_router(context, router_id)
-        # Check whether SNAT rule update should be triggered
-        # NSX also supports multiple external networks so there is also
-        # the possibility that NAT rules should be replaced
-        current_ext_net_id = router.gw_port_id and router.gw_port.network_id
-        new_ext_net_id = info and info.get('network_id')
-        # SNAT should be enabled unless info['enable_snat'] is
-        # explicitly set to false
-        enable_snat = new_ext_net_id and info.get('enable_snat', True)
-        # Remove if ext net removed, changed, or if snat disabled
-        remove_snat_rules = (current_ext_net_id and
-                             new_ext_net_id != current_ext_net_id or
-                             router.enable_snat and not enable_snat)
-        # Add rules if snat is enabled, and if either the external network
-        # changed or snat was previously disabled
-        # NOTE: enable_snat == True implies new_ext_net_id != None
-        add_snat_rules = (enable_snat and
-                          (new_ext_net_id != current_ext_net_id or
-                           not router.enable_snat))
-        router = super(NsxPluginV2, self)._update_router_gw_info(
-            context, router_id, info, router=router)
-        # Add/Remove SNAT rules as needed
-        # Create an elevated context for dealing with metadata access
-        # cidrs which are created within admin context
-        ctx_elevated = context.elevated()
-        if remove_snat_rules or add_snat_rules:
-            cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id)
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        if remove_snat_rules:
-            # Be safe and concede NAT rules might not exist.
-            # Therefore use min_num_expected=0
-            for cidr in cidrs:
-                routerlib.delete_nat_rules_by_match(
-                    self.cluster, nsx_router_id, "SourceNatRule",
-                    max_num_expected=1, min_num_expected=0,
-                    raise_on_len_mismatch=False,
-                    source_ip_addresses=cidr)
-        if add_snat_rules:
-            ip_addresses = self._build_ip_address_list(
-                ctx_elevated, router.gw_port['fixed_ips'])
-            # Set the SNAT rule for each subnet (only first IP)
-            for cidr in cidrs:
-                cidr_prefix = int(cidr.split('/')[1])
-                routerlib.create_lrouter_snat_rule(
-                    self.cluster, nsx_router_id,
-                    ip_addresses[0].split('/')[0],
-                    ip_addresses[0].split('/')[0],
-                    order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix,
-                    match_criteria={'source_ip_addresses': cidr})
-
-    def _update_router_port_attachment(self, cluster, context,
-                                       nsx_router_id, port_data,
-                                       nsx_router_port_id,
-                                       attachment_type,
-                                       attachment,
-                                       attachment_vlan=None):
-        if not nsx_router_port_id:
-            nsx_router_port_id = self._find_router_gw_port(context, port_data)
-        try:
-            routerlib.plug_router_port_attachment(cluster, nsx_router_id,
-                                                  nsx_router_port_id,
-                                                  attachment,
-                                                  attachment_type,
-                                                  attachment_vlan)
-            LOG.debug("Attached %(att)s to NSX router port %(port)s",
-                      {'att': attachment, 'port': nsx_router_port_id})
-        except api_exc.NsxApiException:
-            # Must remove NSX logical port
-            routerlib.delete_router_lport(cluster, nsx_router_id,
-                                          nsx_router_port_id)
-            LOG.exception(_LE("Unable to plug attachment in NSX logical "
-                              "router port %(r_port_id)s, associated with "
-                              "Neutron %(q_port_id)s"),
-                          {'r_port_id': nsx_router_port_id,
-                           'q_port_id': port_data.get('id')})
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("Unable to plug attachment in router port "
-                           "%(r_port_id)s for neutron port id %(q_port_id)s "
-                           "on router %(router_id)s") %
-                         {'r_port_id': nsx_router_port_id,
-                          'q_port_id': port_data.get('id'),
-                          'router_id': nsx_router_id}))
-
-    def _get_port_by_device_id(self, context, device_id, device_owner):
-        """Retrieve ports associated with a specific device id.
-
-        Used for retrieving all neutron ports attached to a given router.
-        """
-        port_qry = context.session.query(models_v2.Port)
-        return port_qry.filter_by(
-            device_id=device_id,
-            device_owner=device_owner,).all()
-
-    def _find_router_subnets_cidrs(self, context, router_id):
-        """Retrieve subnets attached to the specified router."""
-        ports = self._get_port_by_device_id(context, router_id,
-                                            l3_db.DEVICE_OWNER_ROUTER_INTF)
-        # No need to check for overlapping CIDRs
-        cidrs = []
-        for port in ports:
-            for ip in port.get('fixed_ips', []):
-                cidrs.append(self._get_subnet(context,
-                                              ip.subnet_id).cidr)
-        return cidrs
-
-    def _nsx_find_lswitch_for_port(self, context, port_data):
-        network = self._get_network(context, port_data['network_id'])
-        network_bindings = nsx_db.get_network_bindings(
-            context.session, port_data['network_id'])
-        max_ports = self.nsx_opts.max_lp_per_overlay_ls
-        allow_extra_lswitches = False
-        for network_binding in network_bindings:
-            if network_binding.binding_type in (c_utils.NetworkTypes.FLAT,
-                                                c_utils.NetworkTypes.VLAN):
-                max_ports = self.nsx_opts.max_lp_per_bridged_ls
-                allow_extra_lswitches = True
-                break
-        try:
-            return self._handle_lswitch_selection(
-                context, self.cluster, network, network_bindings,
-                max_ports, allow_extra_lswitches)
-        except api_exc.NsxApiException:
-            err_desc = _("An exception occurred while selecting logical "
-                         "switch for the port")
-            LOG.exception(err_desc)
-            raise nsx_exc.NsxPluginException(err_msg=err_desc)
-
-    def _nsx_create_port_helper(self, session, ls_uuid, port_data,
-                                do_port_security=True):
-        # Convert Neutron security groups identifiers into NSX security
-        # profiles identifiers
-        nsx_sec_profile_ids = [
-            nsx_utils.get_nsx_security_group_id(
-                session, self.cluster, neutron_sg_id) for
-            neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])]
-        return switchlib.create_lport(self.cluster,
-                                      ls_uuid,
-                                      port_data['tenant_id'],
-                                      port_data['id'],
-                                      port_data['name'],
-                                      port_data['device_id'],
-                                      port_data['admin_state_up'],
-                                      port_data['mac_address'],
-                                      port_data['fixed_ips'],
-                                      port_data[psec.PORTSECURITY],
-                                      nsx_sec_profile_ids,
-                                      port_data.get(qos.QUEUE),
-                                      port_data.get(mac_ext.MAC_LEARNING),
-                                      port_data.get(addr_pair.ADDRESS_PAIRS))
-
-    def _handle_create_port_exception(self, context, port_id,
-                                      ls_uuid, lp_uuid):
-        with excutils.save_and_reraise_exception():
-            # rollback nsx logical port only if it was successfully
-            # created on NSX. Should this command fail the original
-            # exception will be raised.
-            if lp_uuid:
-                # Remove orphaned port from NSX
-                switchlib.delete_port(self.cluster, ls_uuid, lp_uuid)
-            # rollback the neutron-nsx port mapping
-            nsx_db.delete_neutron_nsx_port_mapping(context.session,
-                                                   port_id)
-            LOG.exception(_LE("An exception occurred while creating the "
-                              "neutron port %s on the NSX plaform"), port_id)
-
-    def _nsx_create_port(self, context, port_data):
-        """Driver for creating a logical switch port on NSX platform."""
-        # FIXME(salvatore-orlando): On the NSX platform we do not really have
-        # external networks. So if as user tries and create a "regular" VIF
-        # port on an external network we are unable to actually create.
-        # However, in order to not break unit tests, we need to still create
-        # the DB object and return success
-        if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
-                         "external networks. Port %s will be down."),
-                     port_data['network_id'])
-            # No need to actually update the DB state - the default is down
-            return port_data
-        lport = None
-        selected_lswitch = None
-        try:
-            selected_lswitch = self._nsx_find_lswitch_for_port(context,
-                                                               port_data)
-            lport = self._nsx_create_port_helper(context.session,
-                                                 selected_lswitch['uuid'],
-                                                 port_data,
-                                                 True)
-            nsx_db.add_neutron_nsx_port_mapping(
-                context.session, port_data['id'],
-                selected_lswitch['uuid'], lport['uuid'])
-            if port_data['device_owner'] not in self.port_special_owners:
-                switchlib.plug_vif_interface(
-                    self.cluster, selected_lswitch['uuid'],
-                    lport['uuid'], "VifAttachment", port_data['id'])
-            LOG.debug("_nsx_create_port completed for port %(name)s "
-                      "on network %(network_id)s. The new port id is "
-                      "%(id)s.", port_data)
-        except (api_exc.NsxApiException, n_exc.NeutronException):
-            self._handle_create_port_exception(
-                context, port_data['id'],
-                selected_lswitch and selected_lswitch['uuid'],
-                lport and lport['uuid'])
-        except db_exc.DBError as e:
-            if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
-                isinstance(e.inner_exception, sql_exc.IntegrityError)):
-                LOG.warning(
-                    _LW("Concurrent network deletion detected; Back-end "
-                        "Port %(nsx_id)s creation to be rolled back for "
-                        "Neutron port: %(neutron_id)s"),
-                    {'nsx_id': lport['uuid'],
-                     'neutron_id': port_data['id']})
-                if selected_lswitch and lport:
-                    try:
-                        switchlib.delete_port(self.cluster,
-                                              selected_lswitch['uuid'],
-                                              lport['uuid'])
-                    except n_exc.NotFound:
-                        LOG.debug("NSX Port %s already gone", lport['uuid'])
-
-    def _nsx_delete_port(self, context, port_data):
-        # FIXME(salvatore-orlando): On the NSX platform we do not really have
-        # external networks. So deleting regular ports from external networks
-        # does not make sense. However we cannot raise as this would break
-        # unit tests.
-        if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
-                         "external networks. Port %s will be down."),
-                     port_data['network_id'])
-            return
-        nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
-            context.session, self.cluster, port_data['id'])
-        if not nsx_port_id:
-            LOG.debug("Port '%s' was already deleted on NSX platform", id)
-            return
-        # TODO(bgh): if this is a bridged network and the lswitch we just got
-        # back will have zero ports after the delete we should garbage collect
-        # the lswitch.
-        try:
-            switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id)
-            LOG.debug("_nsx_delete_port completed for port %(port_id)s "
-                      "on network %(net_id)s",
-                      {'port_id': port_data['id'],
-                       'net_id': port_data['network_id']})
-        except n_exc.NotFound:
-            LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
-
-    def _nsx_delete_router_port(self, context, port_data):
-        # Delete logical router port
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, port_data['device_id'])
-        nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
-            context.session, self.cluster, port_data['id'])
-        if not nsx_port_id:
-            LOG.warn(
-                _LW("Neutron port %(port_id)s not found on NSX backend. "
-                    "Terminating delete operation. A dangling router port "
-                    "might have been left on router %(router_id)s"),
-                {'port_id': port_data['id'],
-                 'router_id': nsx_router_id})
-            return
-        try:
-            routerlib.delete_peer_router_lport(self.cluster,
-                                               nsx_router_id,
-                                               nsx_switch_id,
-                                               nsx_port_id)
-        except api_exc.NsxApiException:
-            # Do not raise because the issue might as well be that the
-            # router has already been deleted, so there would be nothing
-            # to do here
-            LOG.exception(_LE("Ignoring exception as this means the peer "
-                              "for port '%s' has already been deleted."),
-                          nsx_port_id)
-
-        # Delete logical switch port
-        self._nsx_delete_port(context, port_data)
-
-    def _nsx_create_router_port(self, context, port_data):
-        """Driver for creating a switch port to be connected to a router."""
-        # No router ports on external networks!
-        if self._network_is_external(context, port_data['network_id']):
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("It is not allowed to create router interface "
-                           "ports on external networks as '%s'") %
-                         port_data['network_id']))
-        ls_port = None
-        selected_lswitch = None
-        try:
-            selected_lswitch = self._nsx_find_lswitch_for_port(
-                context, port_data)
-            # Do not apply port security here!
-            ls_port = self._nsx_create_port_helper(
-                context.session, selected_lswitch['uuid'],
-                port_data, False)
-            # Assuming subnet being attached is on first fixed ip
-            # element in port data
-            subnet_id = port_data['fixed_ips'][0]['subnet_id']
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, port_data['device_id'])
-            # Create peer port on logical router
-            self._create_and_attach_router_port(
-                self.cluster, context, nsx_router_id, port_data,
-                "PatchAttachment", ls_port['uuid'],
-                subnet_ids=[subnet_id])
-            nsx_db.add_neutron_nsx_port_mapping(
-                context.session, port_data['id'],
-                selected_lswitch['uuid'], ls_port['uuid'])
-            LOG.debug("_nsx_create_router_port completed for port "
-                      "%(name)s on network %(network_id)s. The new "
-                      "port id is %(id)s.",
-                      port_data)
-        except (api_exc.NsxApiException, n_exc.NeutronException):
-            self._handle_create_port_exception(
-                context, port_data['id'],
-                selected_lswitch and selected_lswitch['uuid'],
-                ls_port and ls_port['uuid'])
-
-    def _find_router_gw_port(self, context, port_data):
-        router_id = port_data['device_id']
-        if not router_id:
-            raise n_exc.BadRequest(_("device_id field must be populated in "
-                                   "order to create an external gateway "
-                                   "port for network %s"),
-                                   port_data['network_id'])
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        lr_port = routerlib.find_router_gw_port(context, self.cluster,
-                                                nsx_router_id)
-        if not lr_port:
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("The gateway port for the NSX router %s "
-                           "was not found on the backend")
-                         % nsx_router_id))
-        return lr_port
-
-    @lockutils.synchronized('vmware', 'neutron-')
-    def _nsx_create_ext_gw_port(self, context, port_data):
-        """Driver for creating an external gateway port on NSX platform."""
-        # TODO(salvatore-orlando): Handle NSX resource
-        # rollback when something goes not quite as expected
-        lr_port = self._find_router_gw_port(context, port_data)
-        ip_addresses = self._build_ip_address_list(context,
-                                                   port_data['fixed_ips'])
-        # This operation actually always updates a NSX logical port
-        # instead of creating one. This is because the gateway port
-        # is created at the same time as the NSX logical router, otherwise
-        # the fabric status of the NSX router will be down.
-        # admin_status should always be up for the gateway port
-        # regardless of what the user specifies in neutron
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, port_data['device_id'])
-        routerlib.update_router_lport(self.cluster,
-                                      nsx_router_id,
-                                      lr_port['uuid'],
-                                      port_data['tenant_id'],
-                                      port_data['id'],
-                                      port_data['name'],
-                                      True,
-                                      ip_addresses)
-        ext_network = self.get_network(context, port_data['network_id'])
-        if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT:
-            # Update attachment
-            physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or
-                                self.cluster.default_l3_gw_service_uuid)
-            self._update_router_port_attachment(
-                self.cluster, context, nsx_router_id, port_data,
-                lr_port['uuid'],
-                "L3GatewayAttachment",
-                physical_network,
-                ext_network[pnet.SEGMENTATION_ID])
-
-        LOG.debug("_nsx_create_ext_gw_port completed on external network "
-                  "%(ext_net_id)s, attached to router:%(router_id)s. "
-                  "NSX port id is %(nsx_port_id)s",
-                  {'ext_net_id': port_data['network_id'],
-                   'router_id': nsx_router_id,
-                   'nsx_port_id': lr_port['uuid']})
-
-    @lockutils.synchronized('vmware', 'neutron-')
-    def _nsx_delete_ext_gw_port(self, context, port_data):
-        # TODO(salvatore-orlando): Handle NSX resource
-        # rollback when something goes not quite as expected
-        try:
-            lr_port = self._find_router_gw_port(context, port_data)
-            # Delete is actually never a real delete, otherwise the NSX
-            # logical router will stop working
-            router_id = port_data['device_id']
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, router_id)
-            routerlib.update_router_lport(self.cluster,
-                                          nsx_router_id,
-                                          lr_port['uuid'],
-                                          port_data['tenant_id'],
-                                          port_data['id'],
-                                          port_data['name'],
-                                          True,
-                                          ['0.0.0.0/31'])
-            # Reset attachment
-            self._update_router_port_attachment(
-                self.cluster, context, nsx_router_id, port_data,
-                lr_port['uuid'],
-                "L3GatewayAttachment",
-                self.cluster.default_l3_gw_service_uuid)
-            LOG.debug("_nsx_delete_ext_gw_port completed on external network "
-                      "%(ext_net_id)s, attached to NSX router:%(router_id)s",
-                      {'ext_net_id': port_data['network_id'],
-                       'router_id': nsx_router_id})
-        except n_exc.NotFound:
-            LOG.debug("Logical router resource %s not found "
-                      "on NSX platform : the router may have "
-                      "already been deleted",
-                      port_data['device_id'])
-        except api_exc.NsxApiException:
-            raise nsx_exc.NsxPluginException(
-                err_msg=_("Unable to update logical router"
-                          "on NSX Platform"))
-
-    def _nsx_create_l2_gw_port(self, context, port_data):
-        """Create a switch port, and attach it to a L2 gateway attachment."""
-        # FIXME(salvatore-orlando): On the NSX platform we do not really have
-        # external networks. So if as user tries and create a "regular" VIF
-        # port on an external network we are unable to actually create.
-        # However, in order to not break unit tests, we need to still create
-        # the DB object and return success
-        if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
-                         "external networks. Port %s will be down."),
-                     port_data['network_id'])
-            # No need to actually update the DB state - the default is down
-            return port_data
-        lport = None
-        try:
-            selected_lswitch = self._nsx_find_lswitch_for_port(
-                context, port_data)
-            lport = self._nsx_create_port_helper(
-                context.session,
-                selected_lswitch['uuid'],
-                port_data,
-                True)
-            nsx_db.add_neutron_nsx_port_mapping(
-                context.session, port_data['id'],
-                selected_lswitch['uuid'], lport['uuid'])
-            l2gwlib.plug_l2_gw_service(
-                self.cluster,
-                selected_lswitch['uuid'],
-                lport['uuid'],
-                port_data['device_id'],
-                int(port_data.get('gw:segmentation_id') or 0))
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                if lport:
-                    switchlib.delete_port(self.cluster,
-                                          selected_lswitch['uuid'],
-                                          lport['uuid'])
-        LOG.debug("_nsx_create_l2_gw_port completed for port %(name)s "
-                  "on network %(network_id)s. The new port id "
-                  "is %(id)s.", port_data)
-
-    def _nsx_create_fip_port(self, context, port_data):
-        # As we do not create ports for floating IPs in NSX,
-        # this is a no-op driver
-        pass
-
-    def _nsx_delete_fip_port(self, context, port_data):
-        # As we do not create ports for floating IPs in NSX,
-        # this is a no-op driver
-        pass
-
-    def _extend_fault_map(self):
-        """Extends the Neutron Fault Map.
-
-        Exceptions specific to the NSX Plugin are mapped to standard
-        HTTP Exceptions.
-        """
-        base.FAULT_MAP.update({nsx_exc.InvalidNovaZone:
-                               webob.exc.HTTPBadRequest,
-                               nsx_exc.NoMorePortsException:
-                               webob.exc.HTTPBadRequest,
-                               nsx_exc.MaintenanceInProgress:
-                               webob.exc.HTTPServiceUnavailable,
-                               nsx_exc.InvalidSecurityCertificate:
-                               webob.exc.HTTPBadRequest})
-
-    def _validate_provider_create(self, context, network):
-        segments = network.get(mpnet.SEGMENTS)
-        if not attr.is_attr_set(segments):
-            return
-
-        mpnet.check_duplicate_segments(segments)
-        for segment in segments:
-            network_type = segment.get(pnet.NETWORK_TYPE)
-            physical_network = segment.get(pnet.PHYSICAL_NETWORK)
-            physical_network_set = attr.is_attr_set(physical_network)
-            segmentation_id = segment.get(pnet.SEGMENTATION_ID)
-            network_type_set = attr.is_attr_set(network_type)
-            segmentation_id_set = attr.is_attr_set(segmentation_id)
-
-            # If the physical_network_uuid isn't passed in use the default one.
-            if not physical_network_set:
-                physical_network = cfg.CONF.default_tz_uuid
-
-            err_msg = None
-            if not network_type_set:
-                err_msg = _("%s required") % pnet.NETWORK_TYPE
-            elif network_type in (c_utils.NetworkTypes.GRE,
-                                  c_utils.NetworkTypes.STT,
-                                  c_utils.NetworkTypes.FLAT):
-                if segmentation_id_set:
-                    err_msg = _("Segmentation ID cannot be specified with "
-                                "flat network type")
-            elif network_type == c_utils.NetworkTypes.VLAN:
-                if not segmentation_id_set:
-                    err_msg = _("Segmentation ID must be specified with "
-                                "vlan network type")
-                elif (segmentation_id_set and
-                      not utils.is_valid_vlan_tag(segmentation_id)):
-                    err_msg = (_("%(segmentation_id)s out of range "
-                                 "(%(min_id)s through %(max_id)s)") %
-                               {'segmentation_id': segmentation_id,
-                                'min_id': constants.MIN_VLAN_TAG,
-                                'max_id': constants.MAX_VLAN_TAG})
-                else:
-                    # Verify segment is not already allocated
-                    bindings = (
-                        nsx_db.get_network_bindings_by_vlanid_and_physical_net(
-                            context.session, segmentation_id,
-                            physical_network)
-                    )
-                    if bindings:
-                        raise n_exc.VlanIdInUse(
-                            vlan_id=segmentation_id,
-                            physical_network=physical_network)
-            elif network_type == c_utils.NetworkTypes.L3_EXT:
-                if (segmentation_id_set and
-                    not utils.is_valid_vlan_tag(segmentation_id)):
-                    err_msg = (_("%(segmentation_id)s out of range "
-                                 "(%(min_id)s through %(max_id)s)") %
-                               {'segmentation_id': segmentation_id,
-                                'min_id': constants.MIN_VLAN_TAG,
-                                'max_id': constants.MAX_VLAN_TAG})
-            else:
-                err_msg = (_("%(net_type_param)s %(net_type_value)s not "
-                             "supported") %
-                           {'net_type_param': pnet.NETWORK_TYPE,
-                            'net_type_value': network_type})
-            if err_msg:
-                raise n_exc.InvalidInput(error_message=err_msg)
-            # TODO(salvatore-orlando): Validate tranport zone uuid
-            # which should be specified in physical_network
-
-    def _extend_network_dict_provider(self, context, network,
-                                      multiprovider=None, bindings=None):
-        if not bindings:
-            bindings = nsx_db.get_network_bindings(context.session,
-                                                   network['id'])
-        if not multiprovider:
-            multiprovider = nsx_db.is_multiprovider_network(context.session,
-                                                            network['id'])
-        # With NSX plugin 'normal' overlay networks will have no binding
-        # TODO(salvatore-orlando) make sure users can specify a distinct
-        # phy_uuid as 'provider network' for STT net type
-        if bindings:
-            if not multiprovider:
-                # network came in through provider networks api
-                network[pnet.NETWORK_TYPE] = bindings[0].binding_type
-                network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
-                network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
-            else:
-                # network come in though multiprovider networks api
-                network[mpnet.SEGMENTS] = [
-                    {pnet.NETWORK_TYPE: binding.binding_type,
-                     pnet.PHYSICAL_NETWORK: binding.phy_uuid,
-                     pnet.SEGMENTATION_ID: binding.vlan_id}
-                    for binding in bindings]
-
-    def extend_port_dict_binding(self, port_res, port_db):
-        super(NsxPluginV2, self).extend_port_dict_binding(port_res, port_db)
-        port_res[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL
-
-    def _handle_lswitch_selection(self, context, cluster, network,
-                                  network_bindings, max_ports,
-                                  allow_extra_lswitches):
-        lswitches = nsx_utils.fetch_nsx_switches(
-            context.session, cluster, network.id)
-        try:
-            return [ls for ls in lswitches
-                    if (ls['_relations']['LogicalSwitchStatus']
-                        ['lport_count'] < max_ports)].pop(0)
-        except IndexError:
-            # Too bad, no switch available
-            LOG.debug("No switch has available ports (%d checked)",
-                      len(lswitches))
-        if allow_extra_lswitches:
-            # The 'main' logical switch is either the only one available
-            # or the one where the 'multi_lswitch' tag was set
-            while lswitches:
-                main_ls = lswitches.pop(0)
-                tag_dict = dict((x['scope'], x['tag'])
-                                for x in main_ls['tags'])
-                if 'multi_lswitch' in tag_dict:
-                    break
-            else:
-                # by construction this statement is hit if there is only one
-                # logical switch and the multi_lswitch tag has not been set.
-                # The tag must therefore be added.
-                tags = main_ls['tags']
-                tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
-                switchlib.update_lswitch(cluster,
-                                         main_ls['uuid'],
-                                         main_ls['display_name'],
-                                         network['tenant_id'],
-                                         tags=tags)
-            transport_zone_config = self._convert_to_nsx_transport_zones(
-                cluster, network, bindings=network_bindings)
-            selected_lswitch = switchlib.create_lswitch(
-                cluster, network.id, network.tenant_id,
-                "%s-ext-%s" % (network.name, len(lswitches)),
-                transport_zone_config)
-            # add a mapping between the neutron network and the newly
-            # created logical switch
-            nsx_db.add_neutron_nsx_network_mapping(
-                context.session, network.id, selected_lswitch['uuid'])
-            return selected_lswitch
-        else:
-            LOG.error(_LE("Maximum number of logical ports reached for "
-                          "logical network %s"), network.id)
-            raise nsx_exc.NoMorePortsException(network=network.id)
-
-    def _convert_to_nsx_transport_zones(self, cluster, network=None,
-                                        bindings=None):
-        # TODO(salv-orlando): Remove this method and call nsx-utils direct
-        return nsx_utils.convert_to_nsx_transport_zones(
-            cluster.default_tz_uuid, network, bindings,
-            default_transport_type=cfg.CONF.NSX.default_transport_type)
-
-    def _convert_to_transport_zones_dict(self, network):
-        """Converts the provider request body to multiprovider.
-        Returns: True if request is multiprovider False if provider
-        and None if neither.
-        """
-        if any(attr.is_attr_set(network.get(f))
-               for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                         pnet.SEGMENTATION_ID)):
-            if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
-                raise mpnet.SegmentsSetInConjunctionWithProviders()
-            # convert to transport zone list
-            network[mpnet.SEGMENTS] = [
-                {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
-                 pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
-                 pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
-            del network[pnet.NETWORK_TYPE]
-            del network[pnet.PHYSICAL_NETWORK]
-            del network[pnet.SEGMENTATION_ID]
-            return False
-        if attr.is_attr_set(mpnet.SEGMENTS):
-            return True
-
-    def create_network(self, context, network):
-        net_data = network['network']
-        tenant_id = self._get_tenant_id_for_create(context, net_data)
-        self._ensure_default_security_group(context, tenant_id)
-        # Process the provider network extension
-        provider_type = self._convert_to_transport_zones_dict(net_data)
-        self._validate_provider_create(context, net_data)
-        # Replace ATTR_NOT_SPECIFIED with None before sending to NSX
-        for key, value in network['network'].iteritems():
-            if value is attr.ATTR_NOT_SPECIFIED:
-                net_data[key] = None
-        # FIXME(arosen) implement admin_state_up = False in NSX
-        if net_data['admin_state_up'] is False:
-            LOG.warning(_LW("Network with admin_state_up=False are not yet "
-                            "supported by this plugin. Ignoring setting for "
-                            "network %s"), net_data.get('name', '<unknown>'))
-        transport_zone_config = self._convert_to_nsx_transport_zones(
-            self.cluster, net_data)
-        external = net_data.get(ext_net_extn.EXTERNAL)
-        # NOTE(salv-orlando): Pre-generating uuid for Neutron
-        # network. This will be removed once the network create operation
-        # becomes an asynchronous task
-        net_data['id'] = str(uuid.uuid4())
-        if (not attr.is_attr_set(external) or
-            attr.is_attr_set(external) and not external):
-            lswitch = switchlib.create_lswitch(
-                self.cluster, net_data['id'],
-                tenant_id, net_data.get('name'),
-                transport_zone_config,
-                shared=net_data.get(attr.SHARED))
-
-        with context.session.begin(subtransactions=True):
-            new_net = super(NsxPluginV2, self).create_network(context,
-                                                              network)
-            # Process port security extension
-            self._process_network_port_security_create(
-                context, net_data, new_net)
-            # DB Operations for setting the network as external
-            self._process_l3_create(context, new_net, net_data)
-            # Process QoS queue extension
-            net_queue_id = net_data.get(qos.QUEUE)
-            if net_queue_id:
-                # Raises if not found
-                self.get_qos_queue(context, net_queue_id)
-                self._process_network_queue_mapping(
-                    context, new_net, net_queue_id)
-            # Add mapping between neutron network and NSX switch
-            if (not attr.is_attr_set(external) or
-                attr.is_attr_set(external) and not external):
-                nsx_db.add_neutron_nsx_network_mapping(
-                    context.session, new_net['id'],
-                    lswitch['uuid'])
-            if (net_data.get(mpnet.SEGMENTS) and
-                isinstance(provider_type, bool)):
-                net_bindings = []
-                for tz in net_data[mpnet.SEGMENTS]:
-                    segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0)
-                    segmentation_id_set = attr.is_attr_set(segmentation_id)
-                    if not segmentation_id_set:
-                        segmentation_id = 0
-                    net_bindings.append(nsx_db.add_network_binding(
-                        context.session, new_net['id'],
-                        tz.get(pnet.NETWORK_TYPE),
-                        tz.get(pnet.PHYSICAL_NETWORK),
-                        segmentation_id))
-                if provider_type:
-                    nsx_db.set_multiprovider_network(context.session,
-                                                     new_net['id'])
-                self._extend_network_dict_provider(context, new_net,
-                                                   provider_type,
-                                                   net_bindings)
-        self.handle_network_dhcp_access(context, new_net,
-                                        action='create_network')
-        return new_net
-
-    def delete_network(self, context, id):
-        external = self._network_is_external(context, id)
-        # Before removing entry from Neutron DB, retrieve NSX switch
-        # identifiers for removing them from backend
-        if not external:
-            lswitch_ids = nsx_utils.get_nsx_switch_ids(
-                context.session, self.cluster, id)
-        with context.session.begin(subtransactions=True):
-            self._process_l3_delete(context, id)
-            nsx_db.delete_network_bindings(context.session, id)
-            super(NsxPluginV2, self).delete_network(context, id)
-
-        # Do not go to NSX for external networks
-        if not external:
-            try:
-                switchlib.delete_networks(self.cluster, id, lswitch_ids)
-            except n_exc.NotFound:
-                LOG.warning(_LW("The following logical switches were not "
-                                "found on the NSX backend:%s"), lswitch_ids)
-        self.handle_network_dhcp_access(context, id, action='delete_network')
-        LOG.debug("Delete network complete for network: %s", id)
-
-    def get_network(self, context, id, fields=None):
-        with context.session.begin(subtransactions=True):
-            # goto to the plugin DB and fetch the network
-            network = self._get_network(context, id)
-            if (self.nsx_sync_opts.always_read_status or
-                fields and 'status' in fields):
-                # External networks are not backed by nsx lswitches
-                if not network.external:
-                    # Perform explicit state synchronization
-                    self._synchronizer.synchronize_network(context, network)
-            # Don't do field selection here otherwise we won't be able
-            # to add provider networks fields
-            net_result = self._make_network_dict(network)
-            self._extend_network_dict_provider(context, net_result)
-        return self._fields(net_result, fields)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-        filters = filters or {}
-        with context.session.begin(subtransactions=True):
-            networks = (
-                super(NsxPluginV2, self).get_networks(
-                    context, filters, fields, sorts,
-                    limit, marker, page_reverse))
-            for net in networks:
-                self._extend_network_dict_provider(context, net)
-        return [self._fields(network, fields) for network in networks]
-
-    def update_network(self, context, id, network):
-        pnet._raise_if_updates_provider_attributes(network['network'])
-        if network["network"].get("admin_state_up") is False:
-            raise NotImplementedError(_("admin_state_up=False networks "
-                                        "are not supported."))
-        with context.session.begin(subtransactions=True):
-            net = super(NsxPluginV2, self).update_network(context, id, network)
-            if psec.PORTSECURITY in network['network']:
-                self._process_network_port_security_update(
-                    context, network['network'], net)
-            net_queue_id = network['network'].get(qos.QUEUE)
-            if net_queue_id:
-                self._delete_network_queue_mapping(context, id)
-                self._process_network_queue_mapping(context, net, net_queue_id)
-            self._process_l3_update(context, net, network['network'])
-            self._extend_network_dict_provider(context, net)
-        # If provided, update port name on backend; treat backend failures as
-        # not critical (log error, but do not raise)
-        if 'name' in network['network']:
-            # in case of chained switches update name only for the first one
-            nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
-                context.session, self.cluster, id)
-            if not nsx_switch_ids or len(nsx_switch_ids) < 1:
-                LOG.warn(_LW("Unable to find NSX mappings for neutron "
-                             "network:%s"), id)
-            try:
-                switchlib.update_lswitch(self.cluster,
-                                         nsx_switch_ids[0],
-                                         network['network']['name'])
-            except api_exc.NsxApiException as e:
-                LOG.warn(_LW("Logical switch update on NSX backend failed. "
-                             "Neutron network id:%(net_id)s; "
-                             "NSX lswitch id:%(lswitch_id)s;"
-                             "Error:%(error)s"),
-                         {'net_id': id, 'lswitch_id': nsx_switch_ids[0],
-                          'error': e})
-
-        return net
-
-    def create_port(self, context, port):
-        # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
-        # then we pass the port to the policy engine. The reason why we don't
-        # pass the value to the policy engine when the port is
-        # ATTR_NOT_SPECIFIED is for the case where a port is created on a
-        # shared network that is not owned by the tenant.
-        port_data = port['port']
-        # Set port status as 'DOWN'. This will be updated by backend sync.
-        port_data['status'] = constants.PORT_STATUS_DOWN
-        with context.session.begin(subtransactions=True):
-            # First we allocate port in neutron database
-            neutron_db = super(NsxPluginV2, self).create_port(context, port)
-            neutron_port_id = neutron_db['id']
-            # Update fields obtained from neutron db (eg: MAC address)
-            port["port"].update(neutron_db)
-            self.handle_port_metadata_access(context, neutron_db)
-            # port security extension checks
-            (port_security, has_ip) = self._determine_port_security_and_has_ip(
-                context, port_data)
-            port_data[psec.PORTSECURITY] = port_security
-            self._process_port_port_security_create(
-                context, port_data, neutron_db)
-            # allowed address pair checks
-            if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)):
-                if not port_security:
-                    raise addr_pair.AddressPairAndPortSecurityRequired()
-                else:
-                    self._process_create_allowed_address_pairs(
-                        context, neutron_db,
-                        port_data[addr_pair.ADDRESS_PAIRS])
-            else:
-                # remove ATTR_NOT_SPECIFIED
-                port_data[addr_pair.ADDRESS_PAIRS] = []
-
-            # security group extension checks
-            # NOTE: check_update_has_security_groups works fine for
-            # create operations as well
-            if port_security and has_ip:
-                self._ensure_default_security_group_on_port(context, port)
-            elif self._check_update_has_security_groups(
-                 {'port': port_data}):
-                raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-            port_data[ext_sg.SECURITYGROUPS] = (
-                self._get_security_groups_on_port(context, port))
-            self._process_port_create_security_group(
-                context, port_data, port_data[ext_sg.SECURITYGROUPS])
-            # QoS extension checks
-            port_queue_id = self._check_for_queue_and_create(
-                context, port_data)
-            self._process_port_queue_mapping(
-                context, port_data, port_queue_id)
-            if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)):
-                self._create_mac_learning_state(context, port_data)
-            elif mac_ext.MAC_LEARNING in port_data:
-                port_data.pop(mac_ext.MAC_LEARNING)
-            self._process_portbindings_create_and_update(context,
-                                                         port['port'],
-                                                         port_data)
-            # For some reason the port bindings DB mixin does not handle
-            # the VNIC_TYPE attribute, which is required by nova for
-            # setting up VIFs.
-            context.session.flush()
-            port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL
-
-        # DB Operation is complete, perform NSX operation
-        try:
-            port_data = port['port'].copy()
-            port_create_func = self._port_drivers['create'].get(
-                port_data['device_owner'],
-                self._port_drivers['create']['default'])
-            port_create_func(context, port_data)
-            LOG.debug("port created on NSX backend for tenant "
-                      "%(tenant_id)s: (%(id)s)", port_data)
-        except n_exc.NotFound:
-            LOG.warning(_LW("Logical switch for network %s was not "
-                            "found in NSX."), port_data['network_id'])
-            # Put port in error on neutron DB
-            with context.session.begin(subtransactions=True):
-                port = self._get_port(context, neutron_port_id)
-                port_data['status'] = constants.PORT_STATUS_ERROR
-                port['status'] = port_data['status']
-                context.session.add(port)
-        except Exception:
-            # Port must be removed from neutron DB
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Unable to create port or set port "
-                              "attachment in NSX."))
-                with context.session.begin(subtransactions=True):
-                    self._delete_port(context, neutron_port_id)
-
-        self.handle_port_dhcp_access(context, port_data, action='create_port')
-        return port_data
-
-    def update_port(self, context, id, port):
-        delete_security_groups = self._check_update_deletes_security_groups(
-            port)
-        has_security_groups = self._check_update_has_security_groups(port)
-        delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
-            port)
-        has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
-
-        with context.session.begin(subtransactions=True):
-            ret_port = super(NsxPluginV2, self).update_port(
-                context, id, port)
-            # Save current mac learning state to check whether it's
-            # being updated or not
-            old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING)
-            # copy values over - except fixed_ips as
-            # they've already been processed
-            port['port'].pop('fixed_ips', None)
-            ret_port.update(port['port'])
-            tenant_id = self._get_tenant_id_for_create(context, ret_port)
-
-            # populate port_security setting
-            if psec.PORTSECURITY not in port['port']:
-                ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
-                    context, id)
-            has_ip = self._ip_on_port(ret_port)
-            # validate port security and allowed address pairs
-            if not ret_port[psec.PORTSECURITY]:
-                #  has address pairs in request
-                if has_addr_pairs:
-                    raise addr_pair.AddressPairAndPortSecurityRequired()
-                elif not delete_addr_pairs:
-                    # check if address pairs are in db
-                    ret_port[addr_pair.ADDRESS_PAIRS] = (
-                        self.get_allowed_address_pairs(context, id))
-                    if ret_port[addr_pair.ADDRESS_PAIRS]:
-                        raise addr_pair.AddressPairAndPortSecurityRequired()
-
-            if (delete_addr_pairs or has_addr_pairs):
-                # delete address pairs and read them in
-                self._delete_allowed_address_pairs(context, id)
-                self._process_create_allowed_address_pairs(
-                    context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS])
-            # checks if security groups were updated adding/modifying
-            # security groups, port security is set and port has ip
-            if not (has_ip and ret_port[psec.PORTSECURITY]):
-                if has_security_groups:
-                    raise psec.PortSecurityAndIPRequiredForSecurityGroups()
-                # Update did not have security groups passed in. Check
-                # that port does not have any security groups already on it.
-                filters = {'port_id': [id]}
-                security_groups = (
-                    super(NsxPluginV2, self)._get_port_security_group_bindings(
-                        context, filters)
-                )
-                if security_groups and not delete_security_groups:
-                    raise psec.PortSecurityPortHasSecurityGroup()
-
-            if (delete_security_groups or has_security_groups):
-                # delete the port binding and read it with the new rules.
-                self._delete_port_security_group_bindings(context, id)
-                sgids = self._get_security_groups_on_port(context, port)
-                self._process_port_create_security_group(context, ret_port,
-                                                         sgids)
-
-            if psec.PORTSECURITY in port['port']:
-                self._process_port_port_security_update(
-                    context, port['port'], ret_port)
-
-            port_queue_id = self._check_for_queue_and_create(
-                context, ret_port)
-            # Populate the mac learning attribute
-            new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING)
-            if (new_mac_learning_state is not None and
-                old_mac_learning_state != new_mac_learning_state):
-                self._update_mac_learning_state(context, id,
-                                                new_mac_learning_state)
-                ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state
-            self._delete_port_queue_mapping(context, ret_port['id'])
-            self._process_port_queue_mapping(context, ret_port,
-                                             port_queue_id)
-            LOG.debug("Updating port: %s", port)
-            nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
-                context.session, self.cluster, id)
-            # Convert Neutron security groups identifiers into NSX security
-            # profiles identifiers
-            nsx_sec_profile_ids = [
-                nsx_utils.get_nsx_security_group_id(
-                    context.session, self.cluster, neutron_sg_id) for
-                neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])]
-
-            if nsx_port_id:
-                try:
-                    switchlib.update_port(
-                        self.cluster, nsx_switch_id, nsx_port_id,
-                        id, tenant_id,
-                        ret_port['name'],
-                        ret_port['device_id'],
-                        ret_port['admin_state_up'],
-                        ret_port['mac_address'],
-                        ret_port['fixed_ips'],
-                        ret_port[psec.PORTSECURITY],
-                        nsx_sec_profile_ids,
-                        ret_port[qos.QUEUE],
-                        ret_port.get(mac_ext.MAC_LEARNING),
-                        ret_port.get(addr_pair.ADDRESS_PAIRS))
-
-                    # Update the port status from nsx. If we fail here hide it
-                    # since the port was successfully updated but we were not
-                    # able to retrieve the status.
-                    ret_port['status'] = switchlib.get_port_status(
-                        self.cluster, nsx_switch_id,
-                        nsx_port_id)
-                # FIXME(arosen) improve exception handling.
-                except Exception:
-                    ret_port['status'] = constants.PORT_STATUS_ERROR
-                    LOG.exception(_LE("Unable to update port id: %s."),
-                                  nsx_port_id)
-
-            # If nsx_port_id is not in database or in nsx put in error state.
-            else:
-                ret_port['status'] = constants.PORT_STATUS_ERROR
-
-            self._process_portbindings_create_and_update(context,
-                                                         port['port'],
-                                                         ret_port)
-        return ret_port
-
-    def delete_port(self, context, id, l3_port_check=True,
-                    nw_gw_port_check=True):
-        """Deletes a port on a specified Virtual Network.
-
-        If the port contains a remote interface attachment, the remote
-        interface is first un-plugged and then the port is deleted.
-
-        :returns: None
-        :raises: exception.PortInUse
-        :raises: exception.PortNotFound
-        :raises: exception.NetworkNotFound
-        """
-        # if needed, check to see if this is a port owned by
-        # a l3 router.  If so, we should prevent deletion here
-        if l3_port_check:
-            self.prevent_l3_port_deletion(context, id)
-        neutron_db_port = self.get_port(context, id)
-        # Perform the same check for ports owned by layer-2 gateways
-        if nw_gw_port_check:
-            self.prevent_network_gateway_port_deletion(context,
-                                                       neutron_db_port)
-        port_delete_func = self._port_drivers['delete'].get(
-            neutron_db_port['device_owner'],
-            self._port_drivers['delete']['default'])
-
-        port_delete_func(context, neutron_db_port)
-        self.disassociate_floatingips(context, id)
-        with context.session.begin(subtransactions=True):
-            queue = self._get_port_queue_bindings(context, {'port_id': [id]})
-            # metadata_dhcp_host_route
-            self.handle_port_metadata_access(
-                context, neutron_db_port, is_delete=True)
-            super(NsxPluginV2, self).delete_port(context, id)
-            # Delete qos queue if possible
-            if queue:
-                self.delete_qos_queue(context, queue[0]['queue_id'], False)
-        self.handle_port_dhcp_access(
-            context, neutron_db_port, action='delete_port')
-
-    def get_port(self, context, id, fields=None):
-        with context.session.begin(subtransactions=True):
-            if (self.nsx_sync_opts.always_read_status or
-                fields and 'status' in fields):
-                # Perform explicit state synchronization
-                db_port = self._get_port(context, id)
-                self._synchronizer.synchronize_port(
-                    context, db_port)
-                return self._make_port_dict(db_port, fields)
-            else:
-                return super(NsxPluginV2, self).get_port(context, id, fields)
-
-    def get_router(self, context, id, fields=None):
-        if (self.nsx_sync_opts.always_read_status or
-            fields and 'status' in fields):
-            db_router = self._get_router(context, id)
-            # Perform explicit state synchronization
-            self._synchronizer.synchronize_router(
-                context, db_router)
-            return self._make_router_dict(db_router, fields)
-        else:
-            return super(NsxPluginV2, self).get_router(context, id, fields)
-
-    def _create_lrouter(self, context, router, nexthop):
-        tenant_id = self._get_tenant_id_for_create(context, router)
-        distributed = router.get('distributed')
-        try:
-            lrouter = routerlib.create_lrouter(
-                self.cluster, router['id'],
-                tenant_id, router['name'], nexthop,
-                distributed=attr.is_attr_set(distributed) and distributed)
-        except nsx_exc.InvalidVersion:
-            msg = _("Cannot create a distributed router with the NSX "
-                    "platform currently in execution. Please, try "
-                    "without specifying the 'distributed' attribute.")
-            LOG.exception(msg)
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        except api_exc.NsxApiException:
-            err_msg = _("Unable to create logical router on NSX Platform")
-            LOG.exception(err_msg)
-            raise nsx_exc.NsxPluginException(err_msg=err_msg)
-
-        # Create the port here - and update it later if we have gw_info
-        try:
-            self._create_and_attach_router_port(
-                self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
-                "L3GatewayAttachment",
-                self.cluster.default_l3_gw_service_uuid)
-        except nsx_exc.NsxPluginException:
-            LOG.exception(_LE("Unable to create L3GW port on logical router "
-                              "%(router_uuid)s. Verify Default Layer-3 "
-                              "Gateway service %(def_l3_gw_svc)s id is "
-                              "correct"),
-                          {'router_uuid': lrouter['uuid'],
-                           'def_l3_gw_svc':
-                           self.cluster.default_l3_gw_service_uuid})
-            # Try and remove logical router from NSX
-            routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
-            # Return user a 500 with an apter message
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("Unable to create router %s on NSX backend") %
-                         router['id']))
-        lrouter['status'] = plugin_const.ACTIVE
-        return lrouter
-
-    def create_router(self, context, router):
-        # NOTE(salvatore-orlando): We completely override this method in
-        # order to be able to use the NSX ID as Neutron ID
-        # TODO(salvatore-orlando): Propose upstream patch for allowing
-        # 3rd parties to specify IDs as we do with l2 plugin
-        r = router['router']
-        has_gw_info = False
-        tenant_id = self._get_tenant_id_for_create(context, r)
-        # default value to set - nsx wants it (even if we don't have it)
-        nexthop = NSX_DEFAULT_NEXTHOP
-        # if external gateway info are set, then configure nexthop to
-        # default external gateway
-        if 'external_gateway_info' in r and r.get('external_gateway_info'):
-            has_gw_info = True
-            gw_info = r['external_gateway_info']
-            del r['external_gateway_info']
-            # The following DB read will be performed again when updating
-            # gateway info. This is not great, but still better than
-            # creating NSX router here and updating it later
-            network_id = (gw_info.get('network_id', None) if gw_info
-                          else None)
-            if network_id:
-                ext_net = self._get_network(context, network_id)
-                if not ext_net.external:
-                    msg = (_("Network '%s' is not a valid external "
-                             "network") % network_id)
-                    raise n_exc.BadRequest(resource='router', msg=msg)
-                if ext_net.subnets:
-                    ext_subnet = ext_net.subnets[0]
-                    nexthop = ext_subnet.gateway_ip
-        # NOTE(salv-orlando): Pre-generating uuid for Neutron
-        # router. This will be removed once the router create operation
-        # becomes an asynchronous task
-        neutron_router_id = str(uuid.uuid4())
-        r['id'] = neutron_router_id
-        lrouter = self._create_lrouter(context, r, nexthop)
-        # Update 'distributed' with value returned from NSX
-        # This will be useful for setting the value if the API request
-        # did not specify any value for the 'distributed' attribute
-        # Platforms older than 3.x do not support the attribute
-        r['distributed'] = lrouter.get('distributed', False)
-        # TODO(salv-orlando): Deal with backend object removal in case
-        # of db failures
-        with context.session.begin(subtransactions=True):
-            # Transaction nesting is needed to avoid foreign key violations
-            # when processing the distributed router binding
-            with context.session.begin(subtransactions=True):
-                router_db = l3_db.Router(id=neutron_router_id,
-                                         tenant_id=tenant_id,
-                                         name=r['name'],
-                                         admin_state_up=r['admin_state_up'],
-                                         status=lrouter['status'])
-                context.session.add(router_db)
-                self._process_extra_attr_router_create(context, router_db, r)
-                # Ensure neutron router is moved into the transaction's buffer
-                context.session.flush()
-                # Add mapping between neutron and nsx identifiers
-                nsx_db.add_neutron_nsx_router_mapping(
-                    context.session, router_db['id'], lrouter['uuid'])
-
-        if has_gw_info:
-            # NOTE(salv-orlando): This operation has been moved out of the
-            # database transaction since it performs several NSX queries,
-            # ithis ncreasing the risk of deadlocks between eventlet and
-            # sqlalchemy operations.
-            # Set external gateway and remove router in case of failure
-            try:
-                self._update_router_gw_info(context, router_db['id'], gw_info)
-            except (n_exc.NeutronException, api_exc.NsxApiException):
-                with excutils.save_and_reraise_exception():
-                    # As setting gateway failed, the router must be deleted
-                    # in order to ensure atomicity
-                    router_id = router_db['id']
-                    LOG.warn(_LW("Failed to set gateway info for router being "
-                                 "created:%s - removing router"), router_id)
-                    self.delete_router(context, router_id)
-                    LOG.info(_LI("Create router failed while setting external "
-                                 "gateway. Router:%s has been removed from "
-                                 "DB and backend"),
-                             router_id)
-        return self._make_router_dict(router_db)
-
-    def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        return routerlib.update_lrouter(
-            self.cluster, nsx_router_id, name,
-            nexthop, routes=routes)
-
-    def _update_lrouter_routes(self, context, router_id, routes):
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        routerlib.update_explicit_routes_lrouter(
-            self.cluster, nsx_router_id, routes)
-
-    def update_router(self, context, router_id, router):
-        # Either nexthop is updated or should be kept as it was before
-        r = router['router']
-        nexthop = None
-        if 'external_gateway_info' in r and r.get('external_gateway_info'):
-            gw_info = r['external_gateway_info']
-            # The following DB read will be performed again when updating
-            # gateway info. This is not great, but still better than
-            # creating NSX router here and updating it later
-            network_id = (gw_info.get('network_id', None) if gw_info
-                          else None)
-            if network_id:
-                ext_net = self._get_network(context, network_id)
-                if not ext_net.external:
-                    msg = (_("Network '%s' is not a valid external "
-                             "network") % network_id)
-                    raise n_exc.BadRequest(resource='router', msg=msg)
-                if ext_net.subnets:
-                    ext_subnet = ext_net.subnets[0]
-                    nexthop = ext_subnet.gateway_ip
-        try:
-            for route in r.get('routes', []):
-                if route['destination'] == '0.0.0.0/0':
-                    msg = _("'routes' cannot contain route '0.0.0.0/0', "
-                            "this must be updated through the default "
-                            "gateway attribute")
-                    raise n_exc.BadRequest(resource='router', msg=msg)
-            previous_routes = self._update_lrouter(
-                context, router_id, r.get('name'),
-                nexthop, routes=r.get('routes'))
-        # NOTE(salv-orlando): The exception handling below is not correct, but
-        # unfortunately nsxlib raises a neutron notfound exception when an
-        # object is not found in the underlying backend
-        except n_exc.NotFound:
-            # Put the router in ERROR status
-            with context.session.begin(subtransactions=True):
-                router_db = self._get_router(context, router_id)
-                router_db['status'] = constants.NET_STATUS_ERROR
-            raise nsx_exc.NsxPluginException(
-                err_msg=_("Logical router %s not found "
-                          "on NSX Platform") % router_id)
-        except api_exc.NsxApiException:
-            raise nsx_exc.NsxPluginException(
-                err_msg=_("Unable to update logical router on NSX Platform"))
-        except nsx_exc.InvalidVersion:
-            msg = _("Request cannot contain 'routes' with the NSX "
-                    "platform currently in execution. Please, try "
-                    "without specifying the static routes.")
-            LOG.exception(msg)
-            raise n_exc.BadRequest(resource='router', msg=msg)
-        try:
-            return super(NsxPluginV2, self).update_router(context,
-                                                          router_id, router)
-        except (extraroute.InvalidRoutes,
-                extraroute.RouterInterfaceInUseByRoute,
-                extraroute.RoutesExhausted):
-            with excutils.save_and_reraise_exception():
-                # revert changes made to NSX
-                self._update_lrouter_routes(
-                    context, router_id, previous_routes)
-
-    def _delete_lrouter(self, context, router_id, nsx_router_id):
-        # The neutron router id (router_id) is ignored in this routine,
-        # but used in plugins deriving from this one
-        routerlib.delete_lrouter(self.cluster, nsx_router_id)
-
-    def delete_router(self, context, router_id):
-        with context.session.begin(subtransactions=True):
-            # TODO(salv-orlando): This call should have no effect on delete
-            # router, but if it does, it should not happen within a
-            # transaction, and it should be restored on rollback
-            self.handle_router_metadata_access(
-                context, router_id, interface=None)
-            # Pre-delete checks
-            # NOTE(salv-orlando): These checks will be repeated anyway when
-            # calling the superclass. This is wasteful, but is the simplest
-            # way of ensuring a consistent removal of the router both in
-            # the neutron Database and in the NSX backend.
-            # TODO(salv-orlando): split pre-delete checks and actual
-            # deletion in superclass.
-
-            # Ensure that the router is not used
-            fips = self.get_floatingips_count(
-                context.elevated(), filters={'router_id': [router_id]})
-            if fips:
-                raise l3.RouterInUse(router_id=router_id)
-
-            device_filter = {'device_id': [router_id],
-                             'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
-            ports = self._core_plugin.get_ports_count(context.elevated(),
-                                                      filters=device_filter)
-            if ports:
-                raise l3.RouterInUse(router_id=router_id)
-
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        # It is safe to remove the router from the database, so remove it
-        # from the backend
-        try:
-            self._delete_lrouter(context, router_id, nsx_router_id)
-        except n_exc.NotFound:
-            # This is not a fatal error, but needs to be logged
-            LOG.warning(_LW("Logical router '%s' not found "
-                            "on NSX Platform"), router_id)
-        except api_exc.NsxApiException:
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("Unable to delete logical router '%s' "
-                           "on NSX Platform") % nsx_router_id))
-        # Remove the NSX mapping first in order to ensure a mapping to
-        # a non-existent NSX router is not left in the DB in case of
-        # failure while removing the router from the neutron DB
-        try:
-            nsx_db.delete_neutron_nsx_router_mapping(
-                context.session, router_id)
-        except db_exc.DBError as d_exc:
-            # Do not make this error fatal
-            LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
-                         "%(router_id)s because of the following exception:"
-                         "%(d_exc)s"), {'router_id': router_id,
-                                        'd_exc': str(d_exc)})
-        # Perform the actual delete on the Neutron DB
-        super(NsxPluginV2, self).delete_router(context, router_id)
-
-    def _add_subnet_snat_rule(self, context, router, subnet):
-        gw_port = router.gw_port
-        if gw_port and router.enable_snat:
-            # There is a change gw_port might have multiple IPs
-            # In that case we will consider only the first one
-            if gw_port.get('fixed_ips'):
-                snat_ip = gw_port['fixed_ips'][0]['ip_address']
-                cidr_prefix = int(subnet['cidr'].split('/')[1])
-                nsx_router_id = nsx_utils.get_nsx_router_id(
-                    context.session, self.cluster, router['id'])
-                routerlib.create_lrouter_snat_rule(
-                    self.cluster, nsx_router_id, snat_ip, snat_ip,
-                    order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix,
-                    match_criteria={'source_ip_addresses': subnet['cidr']})
-
-    def _delete_subnet_snat_rule(self, context, router, subnet):
-        # Remove SNAT rule if external gateway is configured
-        if router.gw_port:
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, router['id'])
-            routerlib.delete_nat_rules_by_match(
-                self.cluster, nsx_router_id, "SourceNatRule",
-                max_num_expected=1, min_num_expected=1,
-                raise_on_len_mismatch=False,
-                source_ip_addresses=subnet['cidr'])
-
-    def add_router_interface(self, context, router_id, interface_info):
-        # When adding interface by port_id we need to create the
-        # peer port on the nsx logical router in this routine
-        port_id = interface_info.get('port_id')
-        router_iface_info = super(NsxPluginV2, self).add_router_interface(
-            context, router_id, interface_info)
-        # router_iface_info will always have a subnet_id attribute
-        subnet_id = router_iface_info['subnet_id']
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        if port_id:
-            port_data = self.get_port(context, port_id)
-            # If security groups are present we need to remove them as
-            # this is a router port and disable port security.
-            if port_data['security_groups']:
-                self.update_port(context, port_id,
-                                 {'port': {'security_groups': [],
-                                           psec.PORTSECURITY: False}})
-            nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
-                context.session, self.cluster, port_id)
-            # Unplug current attachment from lswitch port
-            switchlib.plug_vif_interface(self.cluster, nsx_switch_id,
-                                         nsx_port_id, "NoAttachment")
-            # Create logical router port and plug patch attachment
-            self._create_and_attach_router_port(
-                self.cluster, context, nsx_router_id, port_data,
-                "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id])
-        subnet = self._get_subnet(context, subnet_id)
-        # If there is an external gateway we need to configure the SNAT rule.
-        # Fetch router from DB
-        router = self._get_router(context, router_id)
-        self._add_subnet_snat_rule(context, router, subnet)
-        routerlib.create_lrouter_nosnat_rule(
-            self.cluster, nsx_router_id,
-            order=NSX_NOSNAT_RULES_ORDER,
-            match_criteria={'destination_ip_addresses': subnet['cidr']})
-
-        # Ensure the NSX logical router has a connection to a 'metadata access'
-        # network (with a proxy listening on its DHCP port), by creating it
-        # if needed.
-        self.handle_router_metadata_access(
-            context, router_id, interface=router_iface_info)
-        LOG.debug("Add_router_interface completed for subnet:%(subnet_id)s "
-                  "and router:%(router_id)s",
-                  {'subnet_id': subnet_id, 'router_id': router_id})
-        return router_iface_info
-
-    def remove_router_interface(self, context, router_id, interface_info):
-        # The code below is duplicated from base class, but comes handy
-        # as we need to retrieve the router port id before removing the port
-        subnet = None
-        subnet_id = None
-        if 'port_id' in interface_info:
-            port_id = interface_info['port_id']
-            # find subnet_id - it is need for removing the SNAT rule
-            port = self._get_port(context, port_id)
-            if port.get('fixed_ips'):
-                subnet_id = port['fixed_ips'][0]['subnet_id']
-            if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
-                    port['device_id'] == router_id):
-                raise l3.RouterInterfaceNotFound(router_id=router_id,
-                                                 port_id=port_id)
-        elif 'subnet_id' in interface_info:
-            subnet_id = interface_info['subnet_id']
-            subnet = self._get_subnet(context, subnet_id)
-            rport_qry = context.session.query(models_v2.Port)
-            ports = rport_qry.filter_by(
-                device_id=router_id,
-                device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
-                network_id=subnet['network_id'])
-            for p in ports:
-                if p['fixed_ips'][0]['subnet_id'] == subnet_id:
-                    port_id = p['id']
-                    break
-            else:
-                raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
-                                                          subnet_id=subnet_id)
-        # Finally remove the data from the Neutron DB
-        # This will also destroy the port on the logical switch
-        info = super(NsxPluginV2, self).remove_router_interface(
-            context, router_id, interface_info)
-
-        try:
-            # Ensure the connection to the 'metadata access network'
-            # is removed  (with the network) if this the last subnet
-            # on the router
-            self.handle_router_metadata_access(
-                context, router_id, interface=info)
-            if not subnet:
-                subnet = self._get_subnet(context, subnet_id)
-            router = self._get_router(context, router_id)
-            # If router is enabled_snat = False there are no snat rules to
-            # delete.
-            if router.enable_snat:
-                self._delete_subnet_snat_rule(context, router, subnet)
-            # Relax the minimum expected number as the nosnat rules
-            # do not exist in 2.x deployments
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, router_id)
-            routerlib.delete_nat_rules_by_match(
-                self.cluster, nsx_router_id, "NoSourceNatRule",
-                max_num_expected=1, min_num_expected=0,
-                raise_on_len_mismatch=False,
-                destination_ip_addresses=subnet['cidr'])
-        except n_exc.NotFound:
-            LOG.error(_LE("Logical router resource %s not found "
-                          "on NSX platform"), router_id)
-        except api_exc.NsxApiException:
-            raise nsx_exc.NsxPluginException(
-                err_msg=(_("Unable to update logical router"
-                           "on NSX Platform")))
-        return info
-
-    def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
-                                       internal_ip, nsx_router_id,
-                                       min_num_rules_expected=0):
-        """Finds and removes NAT rules from a NSX router."""
-        # NOTE(salv-orlando): The context parameter is ignored in this method
-        # but used by derived classes
-        try:
-            # Remove DNAT rule for the floating IP
-            routerlib.delete_nat_rules_by_match(
-                self.cluster, nsx_router_id, "DestinationNatRule",
-                max_num_expected=1,
-                min_num_expected=min_num_rules_expected,
-                destination_ip_addresses=floating_ip_address)
-
-            # Remove SNAT rules for the floating IP
-            routerlib.delete_nat_rules_by_match(
-                self.cluster, nsx_router_id, "SourceNatRule",
-                max_num_expected=1,
-                min_num_expected=min_num_rules_expected,
-                source_ip_addresses=internal_ip)
-            routerlib.delete_nat_rules_by_match(
-                self.cluster, nsx_router_id, "SourceNatRule",
-                max_num_expected=1,
-                min_num_expected=min_num_rules_expected,
-                destination_ip_addresses=internal_ip)
-
-        except api_exc.NsxApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("An error occurred while removing NAT rules "
-                                  "on the NSX platform for floating ip:%s"),
-                              floating_ip_address)
-        except nsx_exc.NatRuleMismatch:
-            # Do not surface to the user
-            LOG.warning(_LW("An incorrect number of matching NAT rules "
-                            "was found on the NSX platform"))
-
-    def _remove_floatingip_address(self, context, fip_db):
-        # Remove floating IP address from logical router port
-        # Fetch logical port of router's external gateway
-        router_id = fip_db.router_id
-        nsx_router_id = nsx_utils.get_nsx_router_id(
-            context.session, self.cluster, router_id)
-        nsx_gw_port_id = routerlib.find_router_gw_port(
-            context, self.cluster, nsx_router_id)['uuid']
-        ext_neutron_port_db = self._get_port(context.elevated(),
-                                             fip_db.floating_port_id)
-        nsx_floating_ips = self._build_ip_address_list(
-            context.elevated(), ext_neutron_port_db['fixed_ips'])
-        routerlib.update_lrouter_port_ips(self.cluster,
-                                          nsx_router_id,
-                                          nsx_gw_port_id,
-                                          ips_to_add=[],
-                                          ips_to_remove=nsx_floating_ips)
-
-    def _get_fip_assoc_data(self, context, fip, floatingip_db):
-        if fip.get('fixed_ip_address') and not fip.get('port_id'):
-            msg = _("fixed_ip_address cannot be specified without a port_id")
-            raise n_exc.BadRequest(resource='floatingip', msg=msg)
-        port_id = internal_ip = router_id = None
-        if fip.get('port_id'):
-            fip_qry = context.session.query(l3_db.FloatingIP)
-            port_id, internal_ip, router_id = self.get_assoc_data(
-                context,
-                fip,
-                floatingip_db['floating_network_id'])
-            try:
-                fip_qry.filter_by(
-                    fixed_port_id=fip['port_id'],
-                    floating_network_id=floatingip_db['floating_network_id'],
-                    fixed_ip_address=internal_ip).one()
-                raise l3.FloatingIPPortAlreadyAssociated(
-                    port_id=fip['port_id'],
-                    fip_id=floatingip_db['id'],
-                    floating_ip_address=floatingip_db['floating_ip_address'],
-                    fixed_ip=floatingip_db['fixed_ip_address'],
-                    net_id=floatingip_db['floating_network_id'])
-            except sa_exc.NoResultFound:
-                pass
-        return (port_id, internal_ip, router_id)
-
-    def _floatingip_status(self, floatingip_db, associated):
-        if (associated and
-            floatingip_db['status'] != constants.FLOATINGIP_STATUS_ACTIVE):
-            return constants.FLOATINGIP_STATUS_ACTIVE
-        elif (not associated and
-              floatingip_db['status'] != constants.FLOATINGIP_STATUS_DOWN):
-            return constants.FLOATINGIP_STATUS_DOWN
-        # in any case ensure the status is not reset by this method!
-        return floatingip_db['status']
-
-    def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
-        """Update floating IP association data.
-
-        Overrides method from base class.
-        The method is augmented for creating NAT rules in the process.
-        """
-        # Store router currently serving the floating IP
-        old_router_id = floatingip_db.router_id
-        port_id, internal_ip, router_id = self._get_fip_assoc_data(
-            context, fip, floatingip_db)
-        floating_ip = floatingip_db['floating_ip_address']
-        # If there's no association router_id will be None
-        if router_id:
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, router_id)
-            self._retrieve_and_delete_nat_rules(
-                context, floating_ip, internal_ip, nsx_router_id)
-            # Fetch logical port of router's external gateway
-        # Fetch logical port of router's external gateway
-        nsx_floating_ips = self._build_ip_address_list(
-            context.elevated(), external_port['fixed_ips'])
-        floating_ip = floatingip_db['floating_ip_address']
-        # Retrieve and delete existing NAT rules, if any
-        if old_router_id:
-            nsx_old_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, old_router_id)
-            # Retrieve the current internal ip
-            _p, _s, old_internal_ip = self._internal_fip_assoc_data(
-                context, {'id': floatingip_db.id,
-                          'port_id': floatingip_db.fixed_port_id,
-                          'fixed_ip_address': floatingip_db.fixed_ip_address,
-                          'tenant_id': floatingip_db.tenant_id})
-            nsx_gw_port_id = routerlib.find_router_gw_port(
-                context, self.cluster, nsx_old_router_id)['uuid']
-            self._retrieve_and_delete_nat_rules(
-                context, floating_ip, old_internal_ip, nsx_old_router_id)
-            routerlib.update_lrouter_port_ips(
-                self.cluster, nsx_old_router_id, nsx_gw_port_id,
-                ips_to_add=[], ips_to_remove=nsx_floating_ips)
-
-        if router_id:
-            nsx_gw_port_id = routerlib.find_router_gw_port(
-                context, self.cluster, nsx_router_id)['uuid']
-            # Re-create NAT rules only if a port id is specified
-            if fip.get('port_id'):
-                try:
-                    # Setup DNAT rules for the floating IP
-                    routerlib.create_lrouter_dnat_rule(
-                        self.cluster, nsx_router_id, internal_ip,
-                        order=NSX_FLOATINGIP_NAT_RULES_ORDER,
-                        match_criteria={'destination_ip_addresses':
-                                        floating_ip})
-                    # Setup SNAT rules for the floating IP
-                    # Create a SNAT rule for enabling connectivity to the
-                    # floating IP from the same network as the internal port
-                    # Find subnet id for internal_ip from fixed_ips
-                    internal_port = self._get_port(context, port_id)
-                    # Cchecks not needed on statements below since otherwise
-                    # _internal_fip_assoc_data would have raised
-                    subnet_ids = [ip['subnet_id'] for ip in
-                                  internal_port['fixed_ips'] if
-                                  ip['ip_address'] == internal_ip]
-                    internal_subnet_cidr = self._build_ip_address_list(
-                        context, internal_port['fixed_ips'],
-                        subnet_ids=subnet_ids)[0]
-                    routerlib.create_lrouter_snat_rule(
-                        self.cluster, nsx_router_id, floating_ip, floating_ip,
-                        order=NSX_NOSNAT_RULES_ORDER - 1,
-                        match_criteria={'source_ip_addresses':
-                                        internal_subnet_cidr,
-                                        'destination_ip_addresses':
-                                        internal_ip})
-                    # setup snat rule such that src ip of a IP packet when
-                    # using floating is the floating ip itself.
-                    routerlib.create_lrouter_snat_rule(
-                        self.cluster, nsx_router_id, floating_ip, floating_ip,
-                        order=NSX_FLOATINGIP_NAT_RULES_ORDER,
-                        match_criteria={'source_ip_addresses': internal_ip})
-
-                    # Add Floating IP address to router_port
-                    routerlib.update_lrouter_port_ips(
-                        self.cluster, nsx_router_id, nsx_gw_port_id,
-                        ips_to_add=nsx_floating_ips, ips_to_remove=[])
-                except api_exc.NsxApiException:
-                    LOG.exception(_LE("An error occurred while creating NAT "
-                                      "rules on the NSX platform for floating "
-                                      "ip:%(floating_ip)s mapped to "
-                                      "internal ip:%(internal_ip)s"),
-                                  {'floating_ip': floating_ip,
-                                   'internal_ip': internal_ip})
-                    msg = _("Failed to update NAT rules for floatingip update")
-                    raise nsx_exc.NsxPluginException(err_msg=msg)
-        # Update also floating ip status (no need to call base class method)
-        floatingip_db.update(
-            {'fixed_ip_address': internal_ip,
-             'fixed_port_id': port_id,
-             'router_id': router_id,
-             'status': self._floatingip_status(floatingip_db, router_id)})
-
-    def delete_floatingip(self, context, id):
-        fip_db = self._get_floatingip(context, id)
-        # Check whether the floating ip is associated or not
-        if fip_db.fixed_port_id:
-            nsx_router_id = nsx_utils.get_nsx_router_id(
-                context.session, self.cluster, fip_db.router_id)
-            self._retrieve_and_delete_nat_rules(context,
-                                                fip_db.floating_ip_address,
-                                                fip_db.fixed_ip_address,
-                                                nsx_router_id,
-                                                min_num_rules_expected=1)
-            # Remove floating IP address from logical router port
-            self._remove_floatingip_address(context, fip_db)
-        return super(NsxPluginV2, self).delete_floatingip(context, id)
-
-    def disassociate_floatingips(self, context, port_id):
-        try:
-            fip_qry = context.session.query(l3_db.FloatingIP)
-            fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
-
-            for fip_db in fip_dbs:
-                nsx_router_id = nsx_utils.get_nsx_router_id(
-                    context.session, self.cluster, fip_db.router_id)
-                self._retrieve_and_delete_nat_rules(context,
-                                                    fip_db.floating_ip_address,
-                                                    fip_db.fixed_ip_address,
-                                                    nsx_router_id,
-                                                    min_num_rules_expected=1)
-                self._remove_floatingip_address(context, fip_db)
-        except sa_exc.NoResultFound:
-            LOG.debug("The port '%s' is not associated with floating IPs",
-                      port_id)
-        except n_exc.NotFound:
-            LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
-
-        # NOTE(ihrachys): L3 agent notifications don't make sense for
-        # NSX VMWare plugin since there is no L3 agent in such setup, so
-        # disabling them here.
-        super(NsxPluginV2, self).disassociate_floatingips(
-            context, port_id, do_notify=False)
-
-    def create_network_gateway(self, context, network_gateway):
-        """Create a layer-2 network gateway.
-
-        Create the gateway service on NSX platform and corresponding data
-        structures in Neutron datase.
-        """
-        gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME]
-        tenant_id = self._get_tenant_id_for_create(context, gw_data)
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        # Validate provided gateway device list
-        self._validate_device_list(context, tenant_id, gw_data)
-        devices = gw_data['devices']
-        # Populate default physical network where not specified
-        for device in devices:
-            if not device.get('interface_name'):
-                device['interface_name'] = self.cluster.default_interface_name
-        try:
-            # Replace Neutron device identifiers with NSX identifiers
-            dev_map = dict((dev['id'], dev['interface_name']) for
-                           dev in devices)
-            nsx_devices = []
-            for db_device in self._query_gateway_devices(
-                context, filters={'id': [device['id'] for device in devices]}):
-                nsx_devices.append(
-                    {'id': db_device['nsx_id'],
-                     'interface_name': dev_map[db_device['id']]})
-            nsx_res = l2gwlib.create_l2_gw_service(
-                self.cluster, tenant_id, gw_data['name'], nsx_devices)
-            nsx_uuid = nsx_res.get('uuid')
-        except api_exc.Conflict:
-            raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name'])
-        except api_exc.NsxApiException:
-            err_msg = _("Unable to create l2_gw_service for: %s") % gw_data
-            LOG.exception(err_msg)
-            raise nsx_exc.NsxPluginException(err_msg=err_msg)
-        gw_data['id'] = nsx_uuid
-        return super(NsxPluginV2, self).create_network_gateway(
-            context, network_gateway, validate_device_list=False)
-
-    def delete_network_gateway(self, context, gateway_id):
-        """Remove a layer-2 network gateway.
-
-        Remove the gateway service from NSX platform and corresponding data
-        structures in Neutron datase.
-        """
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        with context.session.begin(subtransactions=True):
-            try:
-                super(NsxPluginV2, self).delete_network_gateway(
-                    context, gateway_id)
-                l2gwlib.delete_l2_gw_service(self.cluster, gateway_id)
-            except api_exc.ResourceNotFound:
-                # Do not cause a 500 to be returned to the user if
-                # the corresponding NSX resource does not exist
-                LOG.exception(_LE("Unable to remove gateway service from "
-                                  "NSX plaform - the resource was not found"))
-
-    def get_network_gateway(self, context, id, fields=None):
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        return super(NsxPluginV2, self).get_network_gateway(context,
-                                                            id, fields)
-
-    def get_network_gateways(self, context, filters=None, fields=None,
-                             sorts=None, limit=None, marker=None,
-                             page_reverse=False):
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        # Ensure the tenant_id attribute is populated on returned gateways
-        return super(NsxPluginV2, self).get_network_gateways(
-            context, filters, fields, sorts, limit, marker, page_reverse)
-
-    def update_network_gateway(self, context, id, network_gateway):
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        # Update gateway on backend when there's a name change
-        name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name')
-        if name:
-            try:
-                l2gwlib.update_l2_gw_service(self.cluster, id, name)
-            except api_exc.NsxApiException:
-                # Consider backend failures as non-fatal, but still warn
-                # because this might indicate something dodgy is going on
-                LOG.warn(_LW("Unable to update name on NSX backend "
-                             "for network gateway: %s"), id)
-        return super(NsxPluginV2, self).update_network_gateway(
-            context, id, network_gateway)
-
-    def connect_network(self, context, network_gateway_id,
-                        network_mapping_info):
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        try:
-            return super(NsxPluginV2, self).connect_network(
-                context, network_gateway_id, network_mapping_info)
-        except api_exc.Conflict:
-            raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id)
-
-    def disconnect_network(self, context, network_gateway_id,
-                           network_mapping_info):
-        # Ensure the default gateway in the config file is in sync with the db
-        self._ensure_default_network_gateway()
-        return super(NsxPluginV2, self).disconnect_network(
-            context, network_gateway_id, network_mapping_info)
-
-    def _get_nsx_device_id(self, context, device_id):
-        return self._get_gateway_device(context, device_id)['nsx_id']
-
-    def _rollback_gw_device(self, context, device_id, gw_data=None,
-                            new_status=None, is_create=False):
-        LOG.error(_LE("Rolling back database changes for gateway device %s "
-                      "because of an error in the NSX backend"), device_id)
-        with context.session.begin(subtransactions=True):
-            query = self._model_query(
-                context, nsx_models.NetworkGatewayDevice).filter(
-                    nsx_models.NetworkGatewayDevice.id == device_id)
-            if is_create:
-                query.delete(synchronize_session=False)
-            else:
-                super(NsxPluginV2, self).update_gateway_device(
-                    context, device_id,
-                    {networkgw.DEVICE_RESOURCE_NAME: gw_data})
-                if new_status:
-                    query.update({'status': new_status},
-                                 synchronize_session=False)
-
-    # TODO(salv-orlando): Handlers for Gateway device operations should be
-    # moved into the appropriate nsx_handlers package once the code for the
-    # blueprint nsx-async-backend-communication merges
-    def create_gateway_device_handler(self, context, gateway_device,
-                                      client_certificate):
-        neutron_id = gateway_device['id']
-        try:
-            nsx_res = l2gwlib.create_gateway_device(
-                self.cluster,
-                gateway_device['tenant_id'],
-                gateway_device['name'],
-                neutron_id,
-                self.cluster.default_tz_uuid,
-                gateway_device['connector_type'],
-                gateway_device['connector_ip'],
-                client_certificate)
-
-            # Fetch status (it needs another NSX API call)
-            device_status = nsx_utils.get_nsx_device_status(self.cluster,
-                                                            nsx_res['uuid'])
-
-            # set NSX GW device in neutron database and update status
-            with context.session.begin(subtransactions=True):
-                query = self._model_query(
-                    context, nsx_models.NetworkGatewayDevice).filter(
-                        nsx_models.NetworkGatewayDevice.id == neutron_id)
-                query.update({'status': device_status,
-                              'nsx_id': nsx_res['uuid']},
-                             synchronize_session=False)
-            LOG.debug("Neutron gateway device: %(neutron_id)s; "
-                      "NSX transport node identifier: %(nsx_id)s; "
-                      "Operational status: %(status)s.",
-                      {'neutron_id': neutron_id,
-                       'nsx_id': nsx_res['uuid'],
-                       'status': device_status})
-            return device_status
-        except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
-            with excutils.save_and_reraise_exception():
-                self._rollback_gw_device(context, neutron_id, is_create=True)
-
-    def update_gateway_device_handler(self, context, gateway_device,
-                                      old_gateway_device_data,
-                                      client_certificate):
-        nsx_id = gateway_device['nsx_id']
-        neutron_id = gateway_device['id']
-        try:
-            l2gwlib.update_gateway_device(
-                self.cluster,
-                nsx_id,
-                gateway_device['tenant_id'],
-                gateway_device['name'],
-                neutron_id,
-                self.cluster.default_tz_uuid,
-                gateway_device['connector_type'],
-                gateway_device['connector_ip'],
-                client_certificate)
-
-            # Fetch status (it needs another NSX API call)
-            device_status = nsx_utils.get_nsx_device_status(self.cluster,
-                                                            nsx_id)
-            # update status
-            with context.session.begin(subtransactions=True):
-                query = self._model_query(
-                    context, nsx_models.NetworkGatewayDevice).filter(
-                        nsx_models.NetworkGatewayDevice.id == neutron_id)
-                query.update({'status': device_status},
-                             synchronize_session=False)
-            LOG.debug("Neutron gateway device: %(neutron_id)s; "
-                      "NSX transport node identifier: %(nsx_id)s; "
-                      "Operational status: %(status)s.",
-                      {'neutron_id': neutron_id,
-                       'nsx_id': nsx_id,
-                       'status': device_status})
-            return device_status
-        except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
-            with excutils.save_and_reraise_exception():
-                self._rollback_gw_device(context, neutron_id,
-                                         gw_data=old_gateway_device_data)
-        except n_exc.NotFound:
-            # The gateway device was probably deleted in the backend.
-            # The DB change should be rolled back and the status must
-            # be put in error
-            with excutils.save_and_reraise_exception():
-                self._rollback_gw_device(context, neutron_id,
-                                         gw_data=old_gateway_device_data,
-                                         new_status=networkgw_db.ERROR)
-
-    def get_gateway_device(self, context, device_id, fields=None):
-        # Get device from database
-        gw_device = super(NsxPluginV2, self).get_gateway_device(
-            context, device_id, fields, include_nsx_id=True)
-        # Fetch status from NSX
-        nsx_id = gw_device['nsx_id']
-        device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id)
-        # TODO(salv-orlando): Asynchronous sync for gateway device status
-        # Update status in database
-        with context.session.begin(subtransactions=True):
-            query = self._model_query(
-                context, nsx_models.NetworkGatewayDevice).filter(
-                    nsx_models.NetworkGatewayDevice.id == device_id)
-            query.update({'status': device_status},
-                         synchronize_session=False)
-        gw_device['status'] = device_status
-        return gw_device
-
-    def get_gateway_devices(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False):
-        # Get devices from database
-        devices = super(NsxPluginV2, self).get_gateway_devices(
-            context, filters, fields, include_nsx_id=True)
-        # Fetch operational status from NSX, filter by tenant tag
-        # TODO(salv-orlando): Asynchronous sync for gateway device status
-        tenant_id = context.tenant_id if not context.is_admin else None
-        nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster,
-                                                         tenant_id)
-        # Update statuses in database
-        with context.session.begin(subtransactions=True):
-            for device in devices:
-                new_status = nsx_statuses.get(device['nsx_id'])
-                if new_status:
-                    device['status'] = new_status
-        return devices
-
-    def create_gateway_device(self, context, gateway_device):
-        # NOTE(salv-orlando): client-certificate will not be stored
-        # in the database
-        device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME]
-        client_certificate = device_data.pop('client_certificate')
-        gw_device = super(NsxPluginV2, self).create_gateway_device(
-            context, gateway_device)
-        # DB operation was successful, perform NSX operation
-        gw_device['status'] = self.create_gateway_device_handler(
-            context, gw_device, client_certificate)
-        return gw_device
-
-    def update_gateway_device(self, context, device_id,
-                              gateway_device):
-        # NOTE(salv-orlando): client-certificate will not be stored
-        # in the database
-        client_certificate = (
-            gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop(
-                'client_certificate', None))
-        # Retrive current state from DB in case a rollback should be needed
-        old_gw_device_data = super(NsxPluginV2, self).get_gateway_device(
-            context, device_id, include_nsx_id=True)
-        gw_device = super(NsxPluginV2, self).update_gateway_device(
-            context, device_id, gateway_device, include_nsx_id=True)
-        # DB operation was successful, perform NSX operation
-        gw_device['status'] = self.update_gateway_device_handler(
-            context, gw_device, old_gw_device_data, client_certificate)
-        gw_device.pop('nsx_id')
-        return gw_device
-
-    def delete_gateway_device(self, context, device_id):
-        nsx_device_id = self._get_nsx_device_id(context, device_id)
-        super(NsxPluginV2, self).delete_gateway_device(
-            context, device_id)
-        # DB operation was successful, perform NSX operation
-        # TODO(salv-orlando): State consistency with neutron DB
-        # should be ensured even in case of backend failures
-        try:
-            l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
-        except n_exc.NotFound:
-            LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
-                         "NSX backend (NSX id:%(nsx_id)s) because the NSX "
-                         "resource was not found"),
-                     {'neutron_id': device_id, 'nsx_id': nsx_device_id})
-        except api_exc.NsxApiException:
-            with excutils.save_and_reraise_exception():
-                # In this case a 500 should be returned
-                LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
-                                  "failed on NSX backend (NSX id:%(nsx_id)s). "
-                                  "Neutron and NSX states have diverged."),
-                              {'neutron_id': device_id,
-                               'nsx_id': nsx_device_id})
-
-    def create_security_group(self, context, security_group, default_sg=False):
-        """Create security group.
-
-        If default_sg is true that means we are creating a default security
-        group and we don't need to check if one exists.
-        """
-        s = security_group.get('security_group')
-
-        tenant_id = self._get_tenant_id_for_create(context, s)
-        if not default_sg:
-            self._ensure_default_security_group(context, tenant_id)
-        # NOTE(salv-orlando): Pre-generating Neutron ID for security group.
-        neutron_id = str(uuid.uuid4())
-        nsx_secgroup = secgrouplib.create_security_profile(
-            self.cluster, tenant_id, neutron_id, s)
-        with context.session.begin(subtransactions=True):
-            s['id'] = neutron_id
-            sec_group = super(NsxPluginV2, self).create_security_group(
-                context, security_group, default_sg)
-            context.session.flush()
-            # Add mapping between neutron and nsx identifiers
-            nsx_db.add_neutron_nsx_security_group_mapping(
-                context.session, neutron_id, nsx_secgroup['uuid'])
-        return sec_group
-
-    def update_security_group(self, context, secgroup_id, security_group):
-        secgroup = (super(NsxPluginV2, self).
-                    update_security_group(context,
-                                          secgroup_id,
-                                          security_group))
-        if ('name' in security_group['security_group'] and
-            secgroup['name'] != 'default'):
-            nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
-                context.session, self.cluster, secgroup_id)
-            try:
-                name = security_group['security_group']['name']
-                secgrouplib.update_security_profile(
-                    self.cluster, nsx_sec_profile_id, name)
-            except (n_exc.NotFound, api_exc.NsxApiException) as e:
-                # Reverting the DB change is not really worthwhile
-                # for a mismatch between names. It's the rules that
-                # we care about.
-                LOG.error(_LE('Error while updating security profile '
-                              '%(uuid)s with name %(name)s: %(error)s.'),
-                          {'uuid': secgroup_id, 'name': name, 'error': e})
-        return secgroup
-
-    def delete_security_group(self, context, security_group_id):
-        """Delete a security group.
-
-        :param security_group_id: security group rule to remove.
-        """
-        with context.session.begin(subtransactions=True):
-            security_group = super(NsxPluginV2, self).get_security_group(
-                context, security_group_id)
-            if not security_group:
-                raise ext_sg.SecurityGroupNotFound(id=security_group_id)
-
-            if security_group['name'] == 'default' and not context.is_admin:
-                raise ext_sg.SecurityGroupCannotRemoveDefault()
-
-            filters = {'security_group_id': [security_group['id']]}
-            if super(NsxPluginV2, self)._get_port_security_group_bindings(
-                context, filters):
-                raise ext_sg.SecurityGroupInUse(id=security_group['id'])
-            nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
-                context.session, self.cluster, security_group_id)
-
-            try:
-                secgrouplib.delete_security_profile(
-                    self.cluster, nsx_sec_profile_id)
-            except n_exc.NotFound:
-                # The security profile was not found on the backend
-                # do not fail in this case.
-                LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
-                                "associated with the Neutron security group "
-                                "%(sec_group_id)s was not found on the "
-                                "backend"),
-                            {'sec_profile_id': nsx_sec_profile_id,
-                             'sec_group_id': security_group_id})
-            except api_exc.NsxApiException:
-                # Raise and fail the operation, as there is a problem which
-                # prevented the sec group from being removed from the backend
-                LOG.exception(_LE("An exception occurred while removing the "
-                                  "NSX security profile %(sec_profile_id)s, "
-                                  "associated with Netron security group "
-                                  "%(sec_group_id)s"),
-                              {'sec_profile_id': nsx_sec_profile_id,
-                               'sec_group_id': security_group_id})
-                raise nsx_exc.NsxPluginException(
-                    _("Unable to remove security group %s from backend"),
-                    security_group['id'])
-            return super(NsxPluginV2, self).delete_security_group(
-                context, security_group_id)
-
-    def _validate_security_group_rules(self, context, rules):
-        for rule in rules['security_group_rules']:
-            r = rule.get('security_group_rule')
-            port_based_proto = (self._get_ip_proto_number(r['protocol'])
-                                in securitygroups_db.IP_PROTOCOL_MAP.values())
-            if (not port_based_proto and
-                (r['port_range_min'] is not None or
-                 r['port_range_max'] is not None)):
-                msg = (_("Port values not valid for "
-                         "protocol: %s") % r['protocol'])
-                raise n_exc.BadRequest(resource='security_group_rule',
-                                       msg=msg)
-        return super(NsxPluginV2, self)._validate_security_group_rules(context,
-                                                                       rules)
-
-    def create_security_group_rule(self, context, security_group_rule):
-        """Create a single security group rule."""
-        bulk_rule = {'security_group_rules': [security_group_rule]}
-        return self.create_security_group_rule_bulk(context, bulk_rule)[0]
-
-    def create_security_group_rule_bulk(self, context, security_group_rule):
-        """Create security group rules.
-
-        :param security_group_rule: list of rules to create
-        """
-        s = security_group_rule.get('security_group_rules')
-
-        # TODO(arosen) is there anyway we could avoid having the update of
-        # the security group rules in nsx outside of this transaction?
-        with context.session.begin(subtransactions=True):
-            security_group_id = self._validate_security_group_rules(
-                context, security_group_rule)
-            # Check to make sure security group exists
-            security_group = super(NsxPluginV2, self).get_security_group(
-                context, security_group_id)
-
-            if not security_group:
-                raise ext_sg.SecurityGroupNotFound(id=security_group_id)
-            # Check for duplicate rules
-            self._check_for_duplicate_rules(context, s)
-            # gather all the existing security group rules since we need all
-            # of them to PUT to NSX.
-            existing_rules = self.get_security_group_rules(
-                context, {'security_group_id': [security_group['id']]})
-            combined_rules = sg_utils.merge_security_group_rules_with_current(
-                context.session, self.cluster, s, existing_rules)
-            nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
-                context.session, self.cluster, security_group_id)
-            secgrouplib.update_security_group_rules(self.cluster,
-                                                    nsx_sec_profile_id,
-                                                    combined_rules)
-            return super(
-                NsxPluginV2, self).create_security_group_rule_bulk_native(
-                    context, security_group_rule)
-
-    def delete_security_group_rule(self, context, sgrid):
-        """Delete a security group rule
-        :param sgrid: security group id to remove.
-        """
-        with context.session.begin(subtransactions=True):
-            # determine security profile id
-            security_group_rule = (
-                super(NsxPluginV2, self).get_security_group_rule(
-                    context, sgrid))
-            if not security_group_rule:
-                raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
-
-            sgid = security_group_rule['security_group_id']
-            current_rules = self.get_security_group_rules(
-                context, {'security_group_id': [sgid]})
-            current_rules_nsx = sg_utils.get_security_group_rules_nsx_format(
-                context.session, self.cluster, current_rules, True)
-
-            sg_utils.remove_security_group_with_id_and_id_field(
-                current_rules_nsx, sgrid)
-            nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
-                context.session, self.cluster, sgid)
-            secgrouplib.update_security_group_rules(
-                self.cluster, nsx_sec_profile_id, current_rules_nsx)
-            return super(NsxPluginV2, self).delete_security_group_rule(context,
-                                                                       sgrid)
-
-    def create_qos_queue(self, context, qos_queue, check_policy=True):
-        q = qos_queue.get('qos_queue')
-        self._validate_qos_queue(context, q)
-        q['id'] = queuelib.create_lqueue(self.cluster, q)
-        return super(NsxPluginV2, self).create_qos_queue(context, qos_queue)
-
-    def delete_qos_queue(self, context, queue_id, raise_in_use=True):
-        filters = {'queue_id': [queue_id]}
-        queues = self._get_port_queue_bindings(context, filters)
-        if queues:
-            if raise_in_use:
-                raise qos.QueueInUseByPort()
-            else:
-                return
-        queuelib.delete_lqueue(self.cluster, queue_id)
-        return super(NsxPluginV2, self).delete_qos_queue(context, queue_id)
diff --git a/neutron/plugins/vmware/shell/__init__.py b/neutron/plugins/vmware/shell/__init__.py
deleted file mode 100644 (file)
index e0b15b8..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-from neutron.plugins.vmware.shell import commands as cmd
-from neutronclient import shell
-
-
-class NsxManage(shell.NeutronShell):
-
-    def __init__(self, api_version):
-        super(NsxManage, self).__init__(api_version)
-        self.command_manager.add_command('net-migrate', cmd.NetworkMigrate)
-        self.command_manager.add_command('net-report', cmd.NetworkReport)
-
-    def build_option_parser(self, description, version):
-        parser = super(NsxManage, self).build_option_parser(
-            description, version)
-        return parser
-
-    def initialize_app(self, argv):
-        super(NsxManage, self).initialize_app(argv)
-        self.client = self.client_manager.neutron
-
-
-def main():
-    return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:])
diff --git a/neutron/plugins/vmware/shell/commands.py b/neutron/plugins/vmware/shell/commands.py
deleted file mode 100644 (file)
index bd6706f..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from neutronclient.neutron import v2_0 as client
-
-LSN_PATH = '/lsns'
-
-
-def print_report(write_func, report):
-    write_func(_("\nService type = %s\n") % report['report']['type'])
-    services = ','.join(report['report']['services'])
-    ports = ','.join(report['report']['ports'])
-    write_func(_("Service uuids = %s\n") % services)
-    write_func(_("Port uuids = %s\n\n") % ports)
-
-
-class NetworkReport(client.NeutronCommand):
-    """Retrieve network migration report."""
-
-    def get_parser(self, prog_name):
-        parser = super(NetworkReport, self).get_parser(prog_name)
-        parser.add_argument('network', metavar='network',
-                            help=_('ID or name of network to run report on'))
-        return parser
-
-    def run(self, parsed_args):
-        net = parsed_args.network
-        net_id = client.find_resourceid_by_name_or_id(self.app.client,
-                                                      'network', net)
-        res = self.app.client.get("%s/%s" % (LSN_PATH, net_id))
-        if res:
-            self.app.stdout.write(_('Migration report is:\n'))
-            print_report(self.app.stdout.write, res['lsn'])
-
-
-class NetworkMigrate(client.NeutronCommand):
-    """Perform network migration."""
-
-    def get_parser(self, prog_name):
-        parser = super(NetworkMigrate, self).get_parser(prog_name)
-        parser.add_argument('network', metavar='network',
-                            help=_('ID or name of network to migrate'))
-        return parser
-
-    def run(self, parsed_args):
-        net = parsed_args.network
-        net_id = client.find_resourceid_by_name_or_id(self.app.client,
-                                                      'network', net)
-        body = {'network': net_id}
-        res = self.app.client.post(LSN_PATH, body={'lsn': body})
-        if res:
-            self.app.stdout.write(_('Migration has been successful:\n'))
-            print_report(self.app.stdout.write, res['lsn'])
diff --git a/neutron/plugins/vmware/vshield/__init__.py b/neutron/plugins/vmware/vshield/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/vmware/vshield/common/VcnsApiClient.py b/neutron/plugins/vmware/vshield/common/VcnsApiClient.py
deleted file mode 100644 (file)
index e064310..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import base64
-
-import eventlet
-from oslo_serialization import jsonutils
-
-from neutron.plugins.vmware.vshield.common import exceptions
-
-httplib2 = eventlet.import_patched('httplib2')
-
-
-def xmldumps(obj):
-    config = ""
-    if isinstance(obj, dict):
-        for key, value in obj.iteritems():
-            cfg = "<%s>%s</%s>" % (key, xmldumps(value), key)
-            config += cfg
-    elif isinstance(obj, list):
-        for value in obj:
-            config += xmldumps(value)
-    else:
-        config = obj
-
-    return config
-
-
-class VcnsApiHelper(object):
-    errors = {
-        303: exceptions.ResourceRedirect,
-        400: exceptions.RequestBad,
-        403: exceptions.Forbidden,
-        404: exceptions.ResourceNotFound,
-        415: exceptions.MediaTypeUnsupport,
-        503: exceptions.ServiceUnavailable
-    }
-
-    def __init__(self, address, user, password, format='json'):
-        self.authToken = base64.encodestring("%s:%s" % (user, password))
-        self.user = user
-        self.passwd = password
-        self.address = address
-        self.format = format
-        if format == 'json':
-            self.encode = jsonutils.dumps
-        else:
-            self.encode = xmldumps
-
-    def request(self, method, uri, params=None):
-        uri = self.address + uri
-        http = httplib2.Http()
-        http.disable_ssl_certificate_validation = True
-        headers = {
-            'Content-Type': 'application/' + self.format,
-            'Accept': 'application/' + 'json',
-            'Authorization': 'Basic ' + self.authToken
-        }
-        body = self.encode(params) if params else None
-        header, response = http.request(uri, method,
-                                        body=body, headers=headers)
-        status = int(header['status'])
-        if 200 <= status < 300:
-            return header, response
-        if status in self.errors:
-            cls = self.errors[status]
-        else:
-            cls = exceptions.VcnsApiException
-        raise cls(uri=uri, status=status, header=header, response=response)
diff --git a/neutron/plugins/vmware/vshield/common/__init__.py b/neutron/plugins/vmware/vshield/common/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/vmware/vshield/common/constants.py b/neutron/plugins/vmware/vshield/common/constants.py
deleted file mode 100644 (file)
index 1c2aa25..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-EDGE_ID = 'edge_id'
-ROUTER_ID = 'router_id'
-
-# Interface
-EXTERNAL_VNIC_INDEX = 0
-INTERNAL_VNIC_INDEX = 1
-EXTERNAL_VNIC_NAME = "external"
-INTERNAL_VNIC_NAME = "internal"
-
-INTEGRATION_LR_IPADDRESS = "169.254.2.1/28"
-INTEGRATION_EDGE_IPADDRESS = "169.254.2.3"
-INTEGRATION_SUBNET_NETMASK = "255.255.255.240"
-
-# SNAT rule location
-PREPEND = 0
-APPEND = -1
-
-# error code
-VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013
-
-SUFFIX_LENGTH = 8
-
-
-# router status by number
-class RouterStatus(object):
-    ROUTER_STATUS_ACTIVE = 0
-    ROUTER_STATUS_DOWN = 1
-    ROUTER_STATUS_PENDING_CREATE = 2
-    ROUTER_STATUS_PENDING_DELETE = 3
-    ROUTER_STATUS_ERROR = 4
diff --git a/neutron/plugins/vmware/vshield/common/exceptions.py b/neutron/plugins/vmware/vshield/common/exceptions.py
deleted file mode 100644 (file)
index e90ca18..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.common import exceptions
-
-
-class VcnsException(exceptions.NeutronException):
-    pass
-
-
-class VcnsGeneralException(VcnsException):
-    def __init__(self, message):
-        self.message = message
-        super(VcnsGeneralException, self).__init__()
-
-
-class VcnsBadRequest(exceptions.BadRequest):
-    pass
-
-
-class VcnsNotFound(exceptions.NotFound):
-    message = _('%(resource)s not found: %(msg)s')
-
-
-class VcnsApiException(VcnsException):
-    message = _("An unknown exception %(status)s occurred: %(response)s.")
-
-    def __init__(self, **kwargs):
-        super(VcnsApiException, self).__init__(**kwargs)
-
-        self.status = kwargs.get('status')
-        self.header = kwargs.get('header')
-        self.response = kwargs.get('response')
-
-
-class ResourceRedirect(VcnsApiException):
-    message = _("Resource %(uri)s has been redirected")
-
-
-class RequestBad(VcnsApiException):
-    message = _("Request %(uri)s is Bad, response %(response)s")
-
-
-class Forbidden(VcnsApiException):
-    message = _("Forbidden: %(uri)s")
-
-
-class ResourceNotFound(VcnsApiException):
-    message = _("Resource %(uri)s not found")
-
-
-class MediaTypeUnsupport(VcnsApiException):
-    message = _("Media Type %(uri)s is not supported")
-
-
-class ServiceUnavailable(VcnsApiException):
-    message = _("Service Unavailable: %(uri)s")
diff --git a/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/neutron/plugins/vmware/vshield/edge_appliance_driver.py
deleted file mode 100644 (file)
index 2bdd22d..0000000
+++ /dev/null
@@ -1,661 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.vshield.common import constants as vcns_const
-from neutron.plugins.vmware.vshield.common import exceptions
-from neutron.plugins.vmware.vshield.tasks import constants
-from neutron.plugins.vmware.vshield.tasks import tasks
-
-LOG = logging.getLogger(__name__)
-
-
-class EdgeApplianceDriver(object):
-    def __init__(self):
-        # store the last task per edge that has the latest config
-        self.updated_task = {
-            'nat': {},
-            'route': {},
-        }
-
-    def _assemble_edge(self, name, appliance_size="compact",
-                       deployment_container_id=None, datacenter_moid=None,
-                       enable_aesni=True, hypervisor_assist=False,
-                       enable_fips=False, remote_access=False):
-        edge = {
-            'name': name,
-            'fqdn': name,
-            'hypervisorAssist': hypervisor_assist,
-            'type': 'gatewayServices',
-            'enableAesni': enable_aesni,
-            'enableFips': enable_fips,
-            'cliSettings': {
-                'remoteAccess': remote_access
-            },
-            'appliances': {
-                'applianceSize': appliance_size
-            },
-            'vnics': {
-                'vnics': []
-            }
-        }
-        if deployment_container_id:
-            edge['appliances']['deploymentContainerId'] = (
-                deployment_container_id)
-        if datacenter_moid:
-            edge['datacenterMoid'] = datacenter_moid
-
-        return edge
-
-    def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
-        appliance = {}
-        if resource_pool_id:
-            appliance['resourcePoolId'] = resource_pool_id
-        if datastore_id:
-            appliance['datastoreId'] = datastore_id
-        return appliance
-
-    def _assemble_edge_vnic(self, name, index, portgroup_id,
-                            primary_address=None, subnet_mask=None,
-                            secondary=None,
-                            type="internal",
-                            enable_proxy_arp=False,
-                            enable_send_redirects=True,
-                            is_connected=True,
-                            mtu=1500):
-        vnic = {
-            'index': index,
-            'name': name,
-            'type': type,
-            'portgroupId': portgroup_id,
-            'mtu': mtu,
-            'enableProxyArp': enable_proxy_arp,
-            'enableSendRedirects': enable_send_redirects,
-            'isConnected': is_connected
-        }
-        if primary_address and subnet_mask:
-            address_group = {
-                'primaryAddress': primary_address,
-                'subnetMask': subnet_mask
-            }
-            if secondary:
-                address_group['secondaryAddresses'] = {
-                    'ipAddress': secondary,
-                    'type': 'IpAddressesDto'
-                }
-
-            vnic['addressGroups'] = {
-                'addressGroups': [address_group]
-            }
-
-        return vnic
-
-    def _edge_status_to_level(self, status):
-        if status == 'GREEN':
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
-        elif status in ('GREY', 'YELLOW'):
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
-        else:
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
-        return status_level
-
-    def _enable_loadbalancer(self, edge):
-        if not edge.get('featureConfigs') or (
-            not edge['featureConfigs'].get('features')):
-            edge['featureConfigs'] = {'features': []}
-        edge['featureConfigs']['features'].append(
-            {'featureType': 'loadbalancer_4.0',
-             'enabled': True})
-
-    def get_edge_status(self, edge_id):
-        try:
-            response = self.vcns.get_edge_status(edge_id)[1]
-            status_level = self._edge_status_to_level(
-                response['edgeStatus'])
-        except exceptions.VcnsApiException as e:
-            LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
-                          e.response)
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
-            try:
-                desc = jsonutils.loads(e.response)
-                if desc.get('errorCode') == (
-                    vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
-                    status_level = (
-                        vcns_const.RouterStatus.ROUTER_STATUS_DOWN)
-            except ValueError:
-                LOG.exception(e.response)
-
-        return status_level
-
-    def get_edges_statuses(self):
-        edges_status_level = {}
-        edges = self._get_edges()
-        for edge in edges['edgePage'].get('data', []):
-            edge_id = edge['id']
-            status = edge['edgeStatus']
-            edges_status_level[edge_id] = self._edge_status_to_level(status)
-
-        return edges_status_level
-
-    def _update_interface(self, task):
-        edge_id = task.userdata['edge_id']
-        config = task.userdata['config']
-        LOG.debug("VCNS: start updating vnic %s", config)
-        try:
-            self.vcns.update_interface(edge_id, config)
-        except exceptions.VcnsApiException as e:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
-                                  "%(response)s"), {
-                                    'config': config,
-                                    'response': e.response})
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to update vnic %d"),
-                              config['index'])
-
-        return constants.TaskStatus.COMPLETED
-
-    def update_interface(self, router_id, edge_id, index, network,
-                         address=None, netmask=None, secondary=None,
-                         jobdata=None):
-        LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
-            'index': index, 'addr': address, 'netmask': netmask})
-        if index == vcns_const.EXTERNAL_VNIC_INDEX:
-            name = vcns_const.EXTERNAL_VNIC_NAME
-            intf_type = 'uplink'
-        elif index == vcns_const.INTERNAL_VNIC_INDEX:
-            name = vcns_const.INTERNAL_VNIC_NAME
-            intf_type = 'internal'
-        else:
-            msg = _("Vnic %d currently not supported") % index
-            raise exceptions.VcnsGeneralException(msg)
-
-        config = self._assemble_edge_vnic(
-            name, index, network, address, netmask, secondary, type=intf_type)
-
-        userdata = {
-            'edge_id': edge_id,
-            'config': config,
-            'jobdata': jobdata
-        }
-        task_name = "update-interface-%s-%d" % (edge_id, index)
-        task = tasks.Task(task_name, router_id,
-                          self._update_interface, userdata=userdata)
-        task.add_result_monitor(self.callbacks.interface_update_result)
-        self.task_manager.add(task)
-        return task
-
-    def _deploy_edge(self, task):
-        userdata = task.userdata
-        name = userdata['router_name']
-        LOG.debug("VCNS: start deploying edge %s", name)
-        request = userdata['request']
-        try:
-            header = self.vcns.deploy_edge(request)[0]
-            objuri = header['location']
-            job_id = objuri[objuri.rfind("/") + 1:]
-            response = self.vcns.get_edge_id(job_id)[1]
-            edge_id = response['edgeId']
-            LOG.debug("VCNS: deploying edge %s", edge_id)
-            userdata['edge_id'] = edge_id
-            status = constants.TaskStatus.PENDING
-        except exceptions.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
-                              name)
-
-        return status
-
-    def _status_edge(self, task):
-        edge_id = task.userdata['edge_id']
-        try:
-            response = self.vcns.get_edge_deploy_status(edge_id)[1]
-            task.userdata['retries'] = 0
-            system_status = response.get('systemStatus', None)
-            if system_status is None:
-                status = constants.TaskStatus.PENDING
-            elif system_status == 'good':
-                status = constants.TaskStatus.COMPLETED
-            else:
-                status = constants.TaskStatus.ERROR
-        except exceptions.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Edge %s status query failed."),
-                              edge_id)
-        except Exception:
-            retries = task.userdata.get('retries', 0) + 1
-            if retries < 3:
-                task.userdata['retries'] = retries
-                LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
-                                  "status. Retry %(retries)d."),
-                              {'edge_id': edge_id,
-                               'retries': retries})
-                status = constants.TaskStatus.PENDING
-            else:
-                LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
-                                 "Abort."), edge_id)
-                status = constants.TaskStatus.ERROR
-        LOG.debug("VCNS: Edge %s status", edge_id)
-        return status
-
-    def _result_edge(self, task):
-        router_name = task.userdata['router_name']
-        edge_id = task.userdata.get('edge_id')
-        if task.status != constants.TaskStatus.COMPLETED:
-            LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
-                          "for %(name)s, status %(status)d"), {
-                            'edge_id': edge_id,
-                            'name': router_name,
-                            'status': task.status
-                        })
-        else:
-            LOG.debug("VCNS: Edge %(edge_id)s deployed for "
-                      "router %(name)s", {
-                          'edge_id': edge_id, 'name': router_name
-                      })
-
-    def _delete_edge(self, task):
-        edge_id = task.userdata['edge_id']
-        LOG.debug("VCNS: start destroying edge %s", edge_id)
-        status = constants.TaskStatus.COMPLETED
-        if edge_id:
-            try:
-                self.vcns.delete_edge(edge_id)
-            except exceptions.ResourceNotFound:
-                pass
-            except exceptions.VcnsApiException as e:
-                LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
-                                  "%(response)s"),
-                              {'edge_id': edge_id, 'response': e.response})
-                status = constants.TaskStatus.ERROR
-            except Exception:
-                LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
-                status = constants.TaskStatus.ERROR
-
-        return status
-
-    def _get_edges(self):
-        try:
-            return self.vcns.get_edges()[1]
-        except exceptions.VcnsApiException as e:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
-                              e.response)
-
-    def deploy_edge(self, router_id, name, internal_network, jobdata=None,
-                    wait_for_exec=False, loadbalancer_enable=True):
-        task_name = 'deploying-%s' % name
-        edge_name = name
-        edge = self._assemble_edge(
-            edge_name, datacenter_moid=self.datacenter_moid,
-            deployment_container_id=self.deployment_container_id,
-            appliance_size='large', remote_access=True)
-        appliance = self._assemble_edge_appliance(self.resource_pool_id,
-                                                  self.datastore_id)
-        if appliance:
-            edge['appliances']['appliances'] = [appliance]
-
-        vnic_external = self._assemble_edge_vnic(
-            vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
-            self.external_network, type="uplink")
-        edge['vnics']['vnics'].append(vnic_external)
-        vnic_inside = self._assemble_edge_vnic(
-            vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
-            internal_network,
-            vcns_const.INTEGRATION_EDGE_IPADDRESS,
-            vcns_const.INTEGRATION_SUBNET_NETMASK,
-            type="internal")
-        edge['vnics']['vnics'].append(vnic_inside)
-        if loadbalancer_enable:
-            self._enable_loadbalancer(edge)
-        userdata = {
-            'request': edge,
-            'router_name': name,
-            'jobdata': jobdata
-        }
-        task = tasks.Task(task_name, router_id,
-                          self._deploy_edge,
-                          status_callback=self._status_edge,
-                          result_callback=self._result_edge,
-                          userdata=userdata)
-        task.add_executed_monitor(self.callbacks.edge_deploy_started)
-        task.add_result_monitor(self.callbacks.edge_deploy_result)
-        self.task_manager.add(task)
-
-        if wait_for_exec:
-            # wait until the deploy task is executed so edge_id is available
-            task.wait(constants.TaskState.EXECUTED)
-
-        return task
-
-    def delete_edge(self, router_id, edge_id, jobdata=None):
-        task_name = 'delete-%s' % edge_id
-        userdata = {
-            'router_id': router_id,
-            'edge_id': edge_id,
-            'jobdata': jobdata
-        }
-        task = tasks.Task(task_name, router_id, self._delete_edge,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.edge_delete_result)
-        self.task_manager.add(task)
-        return task
-
-    def _assemble_nat_rule(self, action, original_address,
-                           translated_address,
-                           vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
-                           enabled=True):
-        nat_rule = {}
-        nat_rule['action'] = action
-        nat_rule['vnic'] = vnic_index
-        nat_rule['originalAddress'] = original_address
-        nat_rule['translatedAddress'] = translated_address
-        nat_rule['enabled'] = enabled
-        return nat_rule
-
-    def get_nat_config(self, edge_id):
-        try:
-            return self.vcns.get_nat_config(edge_id)[1]
-        except exceptions.VcnsApiException as e:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
-                              e.response)
-
-    def _create_nat_rule(self, task):
-        # TODO(fank): use POST for optimization
-        #             return rule_id for future reference
-        rule = task.userdata['rule']
-        LOG.debug("VCNS: start creating nat rules: %s", rule)
-        edge_id = task.userdata['edge_id']
-        nat = self.get_nat_config(edge_id)
-        location = task.userdata['location']
-
-        del nat['version']
-
-        if location is None or location == vcns_const.APPEND:
-            nat['rules']['natRulesDtos'].append(rule)
-        else:
-            nat['rules']['natRulesDtos'].insert(location, rule)
-
-        try:
-            self.vcns.update_nat_config(edge_id, nat)
-            status = constants.TaskStatus.COMPLETED
-        except exceptions.VcnsApiException as e:
-            LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
-                          e.response)
-            status = constants.TaskStatus.ERROR
-
-        return status
-
-    def create_snat_rule(self, router_id, edge_id, src, translated,
-                         jobdata=None, location=None):
-        LOG.debug("VCNS: create snat rule %(src)s/%(translated)s", {
-            'src': src, 'translated': translated})
-        snat_rule = self._assemble_nat_rule("snat", src, translated)
-        userdata = {
-            'router_id': router_id,
-            'edge_id': edge_id,
-            'rule': snat_rule,
-            'location': location,
-            'jobdata': jobdata
-        }
-        task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated)
-        task = tasks.Task(task_name, router_id, self._create_nat_rule,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.snat_create_result)
-        self.task_manager.add(task)
-        return task
-
-    def _delete_nat_rule(self, task):
-        # TODO(fank): pass in rule_id for optimization
-        #             handle routes update for optimization
-        edge_id = task.userdata['edge_id']
-        address = task.userdata['address']
-        addrtype = task.userdata['addrtype']
-        LOG.debug("VCNS: start deleting %(type)s rules: %(addr)s", {
-            'type': addrtype, 'addr': address})
-        nat = self.get_nat_config(edge_id)
-        del nat['version']
-        status = constants.TaskStatus.COMPLETED
-        for nat_rule in nat['rules']['natRulesDtos']:
-            if nat_rule[addrtype] == address:
-                rule_id = nat_rule['ruleId']
-                try:
-                    self.vcns.delete_nat_rule(edge_id, rule_id)
-                except exceptions.VcnsApiException as e:
-                    LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
-                                      "%s"), e.response)
-                    status = constants.TaskStatus.ERROR
-
-        return status
-
-    def delete_snat_rule(self, router_id, edge_id, src, jobdata=None):
-        LOG.debug("VCNS: delete snat rule %s", src)
-        userdata = {
-            'edge_id': edge_id,
-            'address': src,
-            'addrtype': 'originalAddress',
-            'jobdata': jobdata
-        }
-        task_name = "delete-snat-%s-%s" % (edge_id, src)
-        task = tasks.Task(task_name, router_id, self._delete_nat_rule,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.snat_delete_result)
-        self.task_manager.add(task)
-        return task
-
-    def create_dnat_rule(self, router_id, edge_id, dst, translated,
-                         jobdata=None, location=None):
-        # TODO(fank): use POST for optimization
-        #             return rule_id for future reference
-        LOG.debug("VCNS: create dnat rule %(dst)s/%(translated)s", {
-            'dst': dst, 'translated': translated})
-        dnat_rule = self._assemble_nat_rule(
-            "dnat", dst, translated)
-        userdata = {
-            'router_id': router_id,
-            'edge_id': edge_id,
-            'rule': dnat_rule,
-            'location': location,
-            'jobdata': jobdata
-        }
-        task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated)
-        task = tasks.Task(task_name, router_id, self._create_nat_rule,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.dnat_create_result)
-        self.task_manager.add(task)
-        return task
-
-    def delete_dnat_rule(self, router_id, edge_id, translated,
-                         jobdata=None):
-        # TODO(fank): pass in rule_id for optimization
-        LOG.debug("VCNS: delete dnat rule %s", translated)
-        userdata = {
-            'edge_id': edge_id,
-            'address': translated,
-            'addrtype': 'translatedAddress',
-            'jobdata': jobdata
-        }
-        task_name = "delete-dnat-%s-%s" % (edge_id, translated)
-        task = tasks.Task(task_name, router_id, self._delete_nat_rule,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.dnat_delete_result)
-        self.task_manager.add(task)
-        return task
-
-    def _update_nat_rule(self, task):
-        # TODO(fank): use POST for optimization
-        #             return rule_id for future reference
-        edge_id = task.userdata['edge_id']
-        if task != self.updated_task['nat'][edge_id]:
-            # this task does not have the latest config, abort now
-            # for speedup
-            return constants.TaskStatus.ABORT
-
-        rules = task.userdata['rules']
-        LOG.debug("VCNS: start updating nat rules: %s", rules)
-
-        nat = {
-            'featureType': 'nat',
-            'rules': {
-                'natRulesDtos': rules
-            }
-        }
-
-        try:
-            self.vcns.update_nat_config(edge_id, nat)
-            status = constants.TaskStatus.COMPLETED
-        except exceptions.VcnsApiException as e:
-            LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
-                          e.response)
-            status = constants.TaskStatus.ERROR
-
-        return status
-
-    def update_nat_rules(self, router_id, edge_id, snats, dnats,
-                         jobdata=None):
-        LOG.debug("VCNS: update nat rule\n"
-                  "SNAT:%(snat)s\n"
-                  "DNAT:%(dnat)s\n", {
-                      'snat': snats, 'dnat': dnats})
-        nat_rules = []
-
-        for dnat in dnats:
-            nat_rules.append(self._assemble_nat_rule(
-                'dnat', dnat['dst'], dnat['translated']))
-            nat_rules.append(self._assemble_nat_rule(
-                'snat', dnat['translated'], dnat['dst']))
-
-        for snat in snats:
-            nat_rules.append(self._assemble_nat_rule(
-                'snat', snat['src'], snat['translated']))
-
-        userdata = {
-            'edge_id': edge_id,
-            'rules': nat_rules,
-            'jobdata': jobdata,
-        }
-        task_name = "update-nat-%s" % edge_id
-        task = tasks.Task(task_name, router_id, self._update_nat_rule,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.nat_update_result)
-        self.updated_task['nat'][edge_id] = task
-        self.task_manager.add(task)
-        return task
-
-    def _update_routes(self, task):
-        edge_id = task.userdata['edge_id']
-        if (task != self.updated_task['route'][edge_id] and
-            task.userdata.get('skippable', True)):
-            # this task does not have the latest config, abort now
-            # for speedup
-            return constants.TaskStatus.ABORT
-        gateway = task.userdata['gateway']
-        routes = task.userdata['routes']
-        LOG.debug("VCNS: start updating routes for %s", edge_id)
-        static_routes = []
-        for route in routes:
-            static_routes.append({
-                "description": "",
-                "vnic": vcns_const.INTERNAL_VNIC_INDEX,
-                "network": route['cidr'],
-                "nextHop": route['nexthop']
-            })
-        request = {
-            "staticRoutes": {
-                "staticRoutes": static_routes
-            }
-        }
-        if gateway:
-            request["defaultRoute"] = {
-                "description": "default-gateway",
-                "gatewayAddress": gateway,
-                "vnic": vcns_const.EXTERNAL_VNIC_INDEX
-            }
-        try:
-            self.vcns.update_routes(edge_id, request)
-            status = constants.TaskStatus.COMPLETED
-        except exceptions.VcnsApiException as e:
-            LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
-                          e.response)
-            status = constants.TaskStatus.ERROR
-
-        return status
-
-    def update_routes(self, router_id, edge_id, gateway, routes,
-                      skippable=True, jobdata=None):
-        if gateway:
-            gateway = gateway.split('/')[0]
-
-        userdata = {
-            'edge_id': edge_id,
-            'gateway': gateway,
-            'routes': routes,
-            'skippable': skippable,
-            'jobdata': jobdata
-        }
-        task_name = "update-routes-%s" % (edge_id)
-        task = tasks.Task(task_name, router_id, self._update_routes,
-                          userdata=userdata)
-        task.add_result_monitor(self.callbacks.routes_update_result)
-        self.updated_task['route'][edge_id] = task
-        self.task_manager.add(task)
-        return task
-
-    def create_lswitch(self, name, tz_config, tags=None,
-                       port_isolation=False, replication_mode="service"):
-        lsconfig = {
-            'display_name': utils.check_and_truncate(name),
-            "tags": tags or [],
-            "type": "LogicalSwitchConfig",
-            "_schema": "/ws.v1/schema/LogicalSwitchConfig",
-            "transport_zones": tz_config
-        }
-        if port_isolation is bool:
-            lsconfig["port_isolation_enabled"] = port_isolation
-        if replication_mode:
-            lsconfig["replication_mode"] = replication_mode
-
-        response = self.vcns.create_lswitch(lsconfig)[1]
-        return response
-
-    def delete_lswitch(self, lswitch_id):
-        self.vcns.delete_lswitch(lswitch_id)
-
-    def get_loadbalancer_config(self, edge_id):
-        try:
-            header, response = self.vcns.get_loadbalancer_config(
-                edge_id)
-        except exceptions.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to get service config"))
-        return response
-
-    def enable_service_loadbalancer(self, edge_id):
-        config = self.get_loadbalancer_config(
-            edge_id)
-        if not config['enabled']:
-            config['enabled'] = True
-        try:
-            self.vcns.enable_service_loadbalancer(edge_id, config)
-        except exceptions.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to enable loadbalancer "
-                                  "service config"))
diff --git a/neutron/plugins/vmware/vshield/tasks/__init__.py b/neutron/plugins/vmware/vshield/tasks/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/plugins/vmware/vshield/tasks/constants.py b/neutron/plugins/vmware/vshield/tasks/constants.py
deleted file mode 100644 (file)
index f5322e0..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-class TaskStatus(object):
-    """Task running status.
-
-    This is used by execution/status callback function to notify the
-    task manager what's the status of current task, and also used for
-    indication the final task execution result.
-    """
-    PENDING = 1
-    COMPLETED = 2
-    ERROR = 3
-    ABORT = 4
-
-
-class TaskState(object):
-    """Current state of a task.
-
-    This is to keep track of the current state of a task.
-    NONE: the task is still in the queue
-    START: the task is pull out from the queue and is about to be executed
-    EXECUTED: the task has been executed
-    STATUS: we're running periodic status check for this task
-    RESULT: the task has finished and result is ready
-    """
-    NONE = -1
-    START = 0
-    EXECUTED = 1
-    STATUS = 2
-    RESULT = 3
diff --git a/neutron/plugins/vmware/vshield/tasks/tasks.py b/neutron/plugins/vmware/vshield/tasks/tasks.py
deleted file mode 100644 (file)
index 5012abd..0000000
+++ /dev/null
@@ -1,394 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import collections
-import uuid
-
-from eventlet import event
-from eventlet import greenthread
-
-from neutron.common import exceptions
-from neutron.i18n import _LE, _LI
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import loopingcall
-from neutron.plugins.vmware.vshield.tasks import constants
-
-DEFAULT_INTERVAL = 1000
-
-LOG = logging.getLogger(__name__)
-
-
-def nop(task):
-    return constants.TaskStatus.COMPLETED
-
-
-class TaskException(exceptions.NeutronException):
-
-    def __init__(self, message=None, **kwargs):
-        if message is not None:
-            self.message = message
-
-        super(TaskException, self).__init__(**kwargs)
-
-
-class InvalidState(TaskException):
-    message = _("Invalid state %(state)d")
-
-
-class TaskStateSkipped(TaskException):
-    message = _("State %(state)d skipped. Current state %(current)d")
-
-
-class Task(object):
-    def __init__(self, name, resource_id, execute_callback,
-                 status_callback=nop, result_callback=nop, userdata=None):
-        self.name = name
-        self.resource_id = resource_id
-        self._execute_callback = execute_callback
-        self._status_callback = status_callback
-        self._result_callback = result_callback
-        self.userdata = userdata
-        self.id = None
-        self.status = None
-
-        self._monitors = {
-            constants.TaskState.START: [],
-            constants.TaskState.EXECUTED: [],
-            constants.TaskState.RESULT: []
-        }
-        self._states = [None, None, None, None]
-        self._state = constants.TaskState.NONE
-
-    def _add_monitor(self, action, func):
-        self._monitors[action].append(func)
-        return self
-
-    def _move_state(self, state):
-        self._state = state
-        if self._states[state] is not None:
-            e = self._states[state]
-            self._states[state] = None
-            e.send()
-
-        for s in range(state):
-            if self._states[s] is not None:
-                e = self._states[s]
-                self._states[s] = None
-                e.send_exception(
-                    TaskStateSkipped(state=s, current=self._state))
-
-    def _invoke_monitor(self, state):
-        for func in self._monitors[state]:
-            try:
-                func(self)
-            except Exception:
-                LOG.exception(_LE("Task %(task)s encountered exception in "
-                                  "%(func)s at state %(state)s"),
-                              {'task': str(self),
-                               'func': str(func),
-                               'state': state})
-
-        self._move_state(state)
-
-        return self
-
-    def _start(self):
-        return self._invoke_monitor(constants.TaskState.START)
-
-    def _executed(self):
-        return self._invoke_monitor(constants.TaskState.EXECUTED)
-
-    def _update_status(self, status):
-        if self.status == status:
-            return self
-
-        self.status = status
-
-    def _finished(self):
-        return self._invoke_monitor(constants.TaskState.RESULT)
-
-    def add_start_monitor(self, func):
-        return self._add_monitor(constants.TaskState.START, func)
-
-    def add_executed_monitor(self, func):
-        return self._add_monitor(constants.TaskState.EXECUTED, func)
-
-    def add_result_monitor(self, func):
-        return self._add_monitor(constants.TaskState.RESULT, func)
-
-    def wait(self, state):
-        if (state < constants.TaskState.START or
-            state > constants.TaskState.RESULT or
-            state == constants.TaskState.STATUS):
-            raise InvalidState(state=state)
-
-        if state <= self._state:
-            # we already passed this current state, so no wait
-            return
-
-        e = event.Event()
-        self._states[state] = e
-        e.wait()
-
-    def __repr__(self):
-        return "Task-%s-%s-%s" % (
-            self.name, self.resource_id, self.id)
-
-
-class TaskManager(object):
-
-    _instance = None
-    _default_interval = DEFAULT_INTERVAL
-
-    def __init__(self, interval=None):
-        self._interval = interval or TaskManager._default_interval
-
-        # A queue to pass tasks from other threads
-        self._tasks_queue = collections.deque()
-
-        # A dict to store resource -> resource's tasks
-        self._tasks = {}
-
-        # Current task being executed in main thread
-        self._main_thread_exec_task = None
-
-        # New request event
-        self._req = event.Event()
-
-        # TaskHandler stopped event
-        self._stopped = False
-
-        # Periodic function trigger
-        self._monitor = None
-        self._monitor_busy = False
-
-        # Thread handling the task request
-        self._thread = None
-
-    def _execute(self, task):
-        """Execute task."""
-        LOG.debug("Start task %s", str(task))
-        task._start()
-        try:
-            status = task._execute_callback(task)
-        except Exception:
-            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
-                          {'task': str(task),
-                           'cb': str(task._execute_callback)})
-            status = constants.TaskStatus.ERROR
-
-        LOG.debug("Task %(task)s return %(status)s", {
-            'task': str(task),
-            'status': status})
-
-        task._update_status(status)
-        task._executed()
-
-        return status
-
-    def _result(self, task):
-        """Notify task execution result."""
-        try:
-            task._result_callback(task)
-        except Exception:
-            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
-                          {'task': str(task),
-                           'cb': str(task._result_callback)})
-
-        LOG.debug("Task %(task)s return %(status)s",
-                  {'task': str(task), 'status': task.status})
-
-        task._finished()
-
-    def _check_pending_tasks(self):
-        """Check all pending tasks status."""
-        for resource_id in self._tasks.keys():
-            if self._stopped:
-                # Task manager is stopped, return now
-                return
-
-            tasks = self._tasks[resource_id]
-            # only the first task is executed and pending
-            task = tasks[0]
-            try:
-                status = task._status_callback(task)
-            except Exception:
-                LOG.exception(_LE("Task %(task)s encountered exception in "
-                                  "%(cb)s"),
-                              {'task': str(task),
-                               'cb': str(task._status_callback)})
-                status = constants.TaskStatus.ERROR
-            task._update_status(status)
-            if status != constants.TaskStatus.PENDING:
-                self._dequeue(task, True)
-
-    def _enqueue(self, task):
-        if task.resource_id in self._tasks:
-            # append to existing resource queue for ordered processing
-            self._tasks[task.resource_id].append(task)
-        else:
-            # put the task to a new resource queue
-            tasks = collections.deque()
-            tasks.append(task)
-            self._tasks[task.resource_id] = tasks
-
-    def _dequeue(self, task, run_next):
-        self._result(task)
-        tasks = self._tasks[task.resource_id]
-        tasks.remove(task)
-        if not tasks:
-            # no more tasks for this resource
-            del self._tasks[task.resource_id]
-            return
-
-        if run_next:
-            # process next task for this resource
-            while tasks:
-                task = tasks[0]
-                status = self._execute(task)
-                if status == constants.TaskStatus.PENDING:
-                    break
-                self._dequeue(task, False)
-
-    def _abort(self):
-        """Abort all tasks."""
-        # put all tasks haven't been received by main thread to queue
-        # so the following abort handling can cover them
-        for t in self._tasks_queue:
-            self._enqueue(t)
-        self._tasks_queue.clear()
-
-        for resource_id in self._tasks.keys():
-            tasks = list(self._tasks[resource_id])
-            for task in tasks:
-                task._update_status(constants.TaskStatus.ABORT)
-                self._dequeue(task, False)
-
-    def _get_task(self):
-        """Get task request."""
-        while True:
-            for t in self._tasks_queue:
-                return self._tasks_queue.popleft()
-            self._req.wait()
-            self._req.reset()
-
-    def run(self):
-        while True:
-            try:
-                if self._stopped:
-                    # Gracefully terminate this thread if the _stopped
-                    # attribute was set to true
-                    LOG.info(_LI("Stopping TaskManager"))
-                    break
-
-                # get a task from queue, or timeout for periodic status check
-                task = self._get_task()
-                if task.resource_id in self._tasks:
-                    # this resource already has some tasks under processing,
-                    # append the task to same queue for ordered processing
-                    self._enqueue(task)
-                    continue
-
-                try:
-                    self._main_thread_exec_task = task
-                    self._execute(task)
-                finally:
-                    self._main_thread_exec_task = None
-                    if task.status is None:
-                        # The thread is killed during _execute(). To guarantee
-                        # the task been aborted correctly, put it to the queue.
-                        self._enqueue(task)
-                    elif task.status != constants.TaskStatus.PENDING:
-                        self._result(task)
-                    else:
-                        self._enqueue(task)
-            except Exception:
-                LOG.exception(_LE("TaskManager terminating because "
-                                  "of an exception"))
-                break
-
-    def add(self, task):
-        task.id = uuid.uuid1()
-        self._tasks_queue.append(task)
-        if not self._req.ready():
-            self._req.send()
-        return task.id
-
-    def stop(self):
-        if self._thread is None:
-            return
-        self._stopped = True
-        self._thread.kill()
-        self._thread = None
-        # Stop looping call and abort running tasks
-        self._monitor.stop()
-        if self._monitor_busy:
-            self._monitor.wait()
-        self._abort()
-        LOG.info(_LI("TaskManager terminated"))
-
-    def has_pending_task(self):
-        if self._tasks_queue or self._tasks or self._main_thread_exec_task:
-            return True
-        else:
-            return False
-
-    def show_pending_tasks(self):
-        for task in self._tasks_queue:
-            LOG.info(str(task))
-        for resource, tasks in self._tasks.iteritems():
-            for task in tasks:
-                LOG.info(str(task))
-        if self._main_thread_exec_task:
-            LOG.info(str(self._main_thread_exec_task))
-
-    def count(self):
-        count = 0
-        for resource_id, tasks in self._tasks.iteritems():
-            count += len(tasks)
-        return count
-
-    def start(self, interval=None):
-        def _inner():
-            self.run()
-
-        def _loopingcall_callback():
-            self._monitor_busy = True
-            try:
-                self._check_pending_tasks()
-            except Exception:
-                LOG.exception(_LE("Exception in _check_pending_tasks"))
-            self._monitor_busy = False
-
-        if self._thread is not None:
-            return self
-
-        if interval is None or interval == 0:
-            interval = self._interval
-
-        self._stopped = False
-        self._thread = greenthread.spawn(_inner)
-        self._monitor = loopingcall.FixedIntervalLoopingCall(
-            _loopingcall_callback)
-        self._monitor.start(interval / 1000.0,
-                            interval / 1000.0)
-        # To allow the created thread start running
-        greenthread.sleep(0)
-
-        return self
-
-    @classmethod
-    def set_default_interval(cls, interval):
-        cls._default_interval = interval
diff --git a/neutron/plugins/vmware/vshield/vcns.py b/neutron/plugins/vmware/vshield/vcns.py
deleted file mode 100644 (file)
index e9323aa..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.vshield.common import VcnsApiClient
-
-LOG = logging.getLogger(__name__)
-
-HTTP_GET = "GET"
-HTTP_POST = "POST"
-HTTP_DELETE = "DELETE"
-HTTP_PUT = "PUT"
-URI_PREFIX = "/api/4.0/edges"
-
-#FwaaS constants
-FIREWALL_SERVICE = "firewall/config"
-FIREWALL_RULE_RESOURCE = "rules"
-
-#LbaaS Constants
-LOADBALANCER_SERVICE = "loadbalancer/config"
-VIP_RESOURCE = "virtualservers"
-POOL_RESOURCE = "pools"
-MONITOR_RESOURCE = "monitors"
-APP_PROFILE_RESOURCE = "applicationprofiles"
-
-# IPsec VPNaaS Constants
-IPSEC_VPN_SERVICE = 'ipsec/config'
-
-
-class Vcns(object):
-
-    def __init__(self, address, user, password):
-        self.address = address
-        self.user = user
-        self.password = password
-        self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user,
-                                                          password, 'json')
-
-    def do_request(self, method, uri, params=None, format='json', **kwargs):
-        LOG.debug("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')", {
-                  'method': method,
-                  'uri': uri,
-                  'body': jsonutils.dumps(params)})
-        if format == 'json':
-            header, content = self.jsonapi_client.request(method, uri, params)
-        else:
-            header, content = self.xmlapi_client.request(method, uri, params)
-        LOG.debug("Header: '%s'", header)
-        LOG.debug("Content: '%s'", content)
-        if content == '':
-            return header, {}
-        if kwargs.get('decode', True):
-            content = jsonutils.loads(content)
-        return header, content
-
-    def deploy_edge(self, request):
-        uri = URI_PREFIX + "?async=true"
-        return self.do_request(HTTP_POST, uri, request, decode=False)
-
-    def get_edge_id(self, job_id):
-        uri = URI_PREFIX + "/jobs/%s" % job_id
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def get_edge_deploy_status(self, edge_id):
-        uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id
-        return self.do_request(HTTP_GET, uri, decode="True")
-
-    def delete_edge(self, edge_id):
-        uri = "%s/%s" % (URI_PREFIX, edge_id)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def update_interface(self, edge_id, vnic):
-        uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index'])
-        return self.do_request(HTTP_PUT, uri, vnic, decode=True)
-
-    def get_nat_config(self, edge_id):
-        uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def update_nat_config(self, edge_id, nat):
-        uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
-        return self.do_request(HTTP_PUT, uri, nat, decode=True)
-
-    def delete_nat_rule(self, edge_id, rule_id):
-        uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id)
-        return self.do_request(HTTP_DELETE, uri, decode=True)
-
-    def get_edge_status(self, edge_id):
-        uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def get_edges(self):
-        uri = URI_PREFIX
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def update_routes(self, edge_id, routes):
-        uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id)
-        return self.do_request(HTTP_PUT, uri, routes)
-
-    def create_lswitch(self, lsconfig):
-        uri = "/api/ws.v1/lswitch"
-        return self.do_request(HTTP_POST, uri, lsconfig, decode=True)
-
-    def delete_lswitch(self, lswitch_id):
-        uri = "/api/ws.v1/lswitch/%s" % lswitch_id
-        return self.do_request(HTTP_DELETE, uri)
-
-    def get_loadbalancer_config(self, edge_id):
-        uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def enable_service_loadbalancer(self, edge_id, config):
-        uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
-        return self.do_request(HTTP_PUT, uri, config)
-
-    def update_firewall(self, edge_id, fw_req):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE)
-        return self.do_request(HTTP_PUT, uri, fw_req)
-
-    def delete_firewall(self, edge_id):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE, None)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE,
-            FIREWALL_RULE_RESOURCE,
-            vcns_rule_id)
-        return self.do_request(HTTP_PUT, uri, fwr_req)
-
-    def delete_firewall_rule(self, edge_id, vcns_rule_id):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE,
-            FIREWALL_RULE_RESOURCE,
-            vcns_rule_id)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE,
-            FIREWALL_RULE_RESOURCE)
-        uri += "?aboveRuleId=" + ref_vcns_rule_id
-        return self.do_request(HTTP_POST, uri, fwr_req)
-
-    def add_firewall_rule(self, edge_id, fwr_req):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE,
-            FIREWALL_RULE_RESOURCE)
-        return self.do_request(HTTP_POST, uri, fwr_req)
-
-    def get_firewall(self, edge_id):
-        uri = self._build_uri_path(edge_id, FIREWALL_SERVICE)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def get_firewall_rule(self, edge_id, vcns_rule_id):
-        uri = self._build_uri_path(
-            edge_id, FIREWALL_SERVICE,
-            FIREWALL_RULE_RESOURCE,
-            vcns_rule_id)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    #
-    #Edge LBAAS call helper
-    #
-    def create_vip(self, edge_id, vip_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            VIP_RESOURCE)
-        return self.do_request(HTTP_POST, uri, vip_new)
-
-    def get_vip(self, edge_id, vip_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            VIP_RESOURCE, vip_vseid)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def update_vip(self, edge_id, vip_vseid, vip_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            VIP_RESOURCE, vip_vseid)
-        return self.do_request(HTTP_PUT, uri, vip_new)
-
-    def delete_vip(self, edge_id, vip_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            VIP_RESOURCE, vip_vseid)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def create_pool(self, edge_id, pool_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            POOL_RESOURCE)
-        return self.do_request(HTTP_POST, uri, pool_new)
-
-    def get_pool(self, edge_id, pool_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            POOL_RESOURCE, pool_vseid)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def update_pool(self, edge_id, pool_vseid, pool_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            POOL_RESOURCE, pool_vseid)
-        return self.do_request(HTTP_PUT, uri, pool_new)
-
-    def delete_pool(self, edge_id, pool_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            POOL_RESOURCE, pool_vseid)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def create_health_monitor(self, edge_id, monitor_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            MONITOR_RESOURCE)
-        return self.do_request(HTTP_POST, uri, monitor_new)
-
-    def get_health_monitor(self, edge_id, monitor_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            MONITOR_RESOURCE, monitor_vseid)
-        return self.do_request(HTTP_GET, uri, decode=True)
-
-    def update_health_monitor(self, edge_id, monitor_vseid, monitor_new):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            MONITOR_RESOURCE,
-            monitor_vseid)
-        return self.do_request(HTTP_PUT, uri, monitor_new)
-
-    def delete_health_monitor(self, edge_id, monitor_vseid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            MONITOR_RESOURCE,
-            monitor_vseid)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def create_app_profile(self, edge_id, app_profile):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            APP_PROFILE_RESOURCE)
-        return self.do_request(HTTP_POST, uri, app_profile)
-
-    def update_app_profile(self, edge_id, app_profileid, app_profile):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            APP_PROFILE_RESOURCE, app_profileid)
-        return self.do_request(HTTP_PUT, uri, app_profile)
-
-    def delete_app_profile(self, edge_id, app_profileid):
-        uri = self._build_uri_path(
-            edge_id, LOADBALANCER_SERVICE,
-            APP_PROFILE_RESOURCE,
-            app_profileid)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def update_ipsec_config(self, edge_id, ipsec_config):
-        uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
-        return self.do_request(HTTP_PUT, uri, ipsec_config)
-
-    def delete_ipsec_config(self, edge_id):
-        uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
-        return self.do_request(HTTP_DELETE, uri)
-
-    def get_ipsec_config(self, edge_id):
-        uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
-        return self.do_request(HTTP_GET, uri)
-
-    def _build_uri_path(self, edge_id,
-                        service,
-                        resource=None,
-                        resource_id=None,
-                        parent_resource_id=None,
-                        fields=None,
-                        relations=None,
-                        filters=None,
-                        types=None,
-                        is_attachment=False):
-        uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service)
-        if resource:
-            res_path = resource
-            if resource_id:
-                res_path += "/%s" % resource_id
-            uri_path = "%s/%s" % (uri_prefix, res_path)
-        else:
-            uri_path = uri_prefix
-        return uri_path
diff --git a/neutron/plugins/vmware/vshield/vcns_driver.py b/neutron/plugins/vmware/vshield/vcns_driver.py
deleted file mode 100644 (file)
index 4282912..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import config  # noqa
-from neutron.plugins.vmware.vshield import edge_appliance_driver
-from neutron.plugins.vmware.vshield.tasks import tasks
-from neutron.plugins.vmware.vshield import vcns
-
-LOG = logging.getLogger(__name__)
-
-
-class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver):
-
-    def __init__(self, callbacks):
-        super(VcnsDriver, self).__init__()
-
-        self.callbacks = callbacks
-        self.vcns_uri = cfg.CONF.vcns.manager_uri
-        self.vcns_user = cfg.CONF.vcns.user
-        self.vcns_passwd = cfg.CONF.vcns.password
-        self.datacenter_moid = cfg.CONF.vcns.datacenter_moid
-        self.deployment_container_id = cfg.CONF.vcns.deployment_container_id
-        self.resource_pool_id = cfg.CONF.vcns.resource_pool_id
-        self.datastore_id = cfg.CONF.vcns.datastore_id
-        self.external_network = cfg.CONF.vcns.external_network
-        interval = cfg.CONF.vcns.task_status_check_interval
-        self.task_manager = tasks.TaskManager(interval)
-        self.task_manager.start()
-        self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)
diff --git a/neutron/tests/unit/vmware/__init__.py b/neutron/tests/unit/vmware/__init__.py
deleted file mode 100644 (file)
index eb6887d..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from neutron.plugins.vmware.api_client import client as nsx_client
-from neutron.plugins.vmware.api_client import eventlet_client
-from neutron.plugins.vmware import extensions
-import neutron.plugins.vmware.plugin as neutron_plugin
-from neutron.plugins.vmware.vshield import vcns
-
-
-plugin = neutron_plugin.NsxPlugin
-api_client = nsx_client.NsxApiClient
-evt_client = eventlet_client.EventletApiClient
-vcns_class = vcns.Vcns
-
-STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
-NSXEXT_PATH = os.path.dirname(extensions.__file__)
-NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__)
-PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__)
-CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__)
-VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__)
-
-
-def get_fake_conf(filename):
-    return os.path.join(STUBS_PATH, filename)
-
-
-def nsx_method(method_name, module_name='nsxlib'):
-    return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name)
diff --git a/neutron/tests/unit/vmware/apiclient/__init__.py b/neutron/tests/unit/vmware/apiclient/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/vmware/apiclient/fake.py b/neutron/tests/unit/vmware/apiclient/fake.py
deleted file mode 100644 (file)
index 25a9ebf..0000000
+++ /dev/null
@@ -1,661 +0,0 @@
-# Copyright 2012 VMware, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-import six.moves.urllib.parse as urlparse
-
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import exception as api_exc
-
-
-LOG = logging.getLogger(__name__)
-MAX_NAME_LEN = 40
-
-
-def _validate_name(name):
-    if name and len(name) > MAX_NAME_LEN:
-        raise Exception("Logical switch name exceeds %d characters",
-                        MAX_NAME_LEN)
-
-
-def _validate_resource(body):
-    _validate_name(body.get('display_name'))
-
-
-class FakeClient(object):
-
-    LSWITCH_RESOURCE = 'lswitch'
-    LPORT_RESOURCE = 'lport'
-    LROUTER_RESOURCE = 'lrouter'
-    NAT_RESOURCE = 'nat'
-    LQUEUE_RESOURCE = 'lqueue'
-    SECPROF_RESOURCE = 'securityprofile'
-    LSWITCH_STATUS = 'lswitchstatus'
-    LROUTER_STATUS = 'lrouterstatus'
-    LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
-    LROUTER_LPORT_RESOURCE = 'lrouter_lport'
-    LROUTER_NAT_RESOURCE = 'lrouter_nat'
-    LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
-    LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
-    LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
-    LROUTER_LPORT_ATT = 'lrouter_lportattachment'
-    GWSERVICE_RESOURCE = 'gatewayservice'
-
-    RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE,
-                 LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE,
-                 GWSERVICE_RESOURCE]
-
-    FAKE_GET_RESPONSES = {
-        LSWITCH_RESOURCE: "fake_get_lswitch.json",
-        LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
-        LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
-        LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
-        LROUTER_RESOURCE: "fake_get_lrouter.json",
-        LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
-        LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
-        LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
-        LROUTER_STATUS: "fake_get_lrouter_status.json",
-        LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json",
-        SECPROF_RESOURCE: "fake_get_security_profile.json",
-        LQUEUE_RESOURCE: "fake_get_lqueue.json",
-        GWSERVICE_RESOURCE: "fake_get_gwservice.json"
-    }
-
-    FAKE_POST_RESPONSES = {
-        LSWITCH_RESOURCE: "fake_post_lswitch.json",
-        LROUTER_RESOURCE: "fake_post_lrouter.json",
-        LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
-        LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
-        LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
-        SECPROF_RESOURCE: "fake_post_security_profile.json",
-        LQUEUE_RESOURCE: "fake_post_lqueue.json",
-        GWSERVICE_RESOURCE: "fake_post_gwservice.json"
-    }
-
-    FAKE_PUT_RESPONSES = {
-        LSWITCH_RESOURCE: "fake_post_lswitch.json",
-        LROUTER_RESOURCE: "fake_post_lrouter.json",
-        LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
-        LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
-        LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
-        LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
-        LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
-        SECPROF_RESOURCE: "fake_post_security_profile.json",
-        LQUEUE_RESOURCE: "fake_post_lqueue.json",
-        GWSERVICE_RESOURCE: "fake_post_gwservice.json"
-    }
-
-    MANAGED_RELATIONS = {
-        LSWITCH_RESOURCE: [],
-        LROUTER_RESOURCE: [],
-        LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
-        LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
-    }
-
-    _validators = {
-        LSWITCH_RESOURCE: _validate_resource,
-        LSWITCH_LPORT_RESOURCE: _validate_resource,
-        LROUTER_LPORT_RESOURCE: _validate_resource,
-        SECPROF_RESOURCE: _validate_resource,
-        LQUEUE_RESOURCE: _validate_resource,
-        GWSERVICE_RESOURCE: _validate_resource
-    }
-
-    def __init__(self, fake_files_path):
-        self.fake_files_path = fake_files_path
-        self._fake_lswitch_dict = {}
-        self._fake_lrouter_dict = {}
-        self._fake_lswitch_lport_dict = {}
-        self._fake_lrouter_lport_dict = {}
-        self._fake_lrouter_nat_dict = {}
-        self._fake_lswitch_lportstatus_dict = {}
-        self._fake_lrouter_lportstatus_dict = {}
-        self._fake_securityprofile_dict = {}
-        self._fake_lqueue_dict = {}
-        self._fake_gatewayservice_dict = {}
-
-    def _get_tag(self, resource, scope):
-        tags = [tag['tag'] for tag in resource['tags']
-                if tag['scope'] == scope]
-        return len(tags) > 0 and tags[0]
-
-    def _get_filters(self, querystring):
-        if not querystring:
-            return (None, None, None, None)
-        params = urlparse.parse_qs(querystring)
-        tag_filter = None
-        attr_filter = None
-        if 'tag' in params and 'tag_scope' in params:
-            tag_filter = {'scope': params['tag_scope'][0],
-                          'tag': params['tag'][0]}
-        elif 'uuid' in params:
-            attr_filter = {'uuid': params['uuid'][0]}
-        # Handle page length and page cursor parameter
-        page_len = params.get('_page_length')
-        page_cursor = params.get('_page_cursor')
-        if page_len:
-            page_len = int(page_len[0])
-        else:
-            # Explicitly set it to None (avoid 0 or empty list)
-            page_len = None
-        return (tag_filter, attr_filter, page_len, page_cursor)
-
-    def _add_lswitch(self, body):
-        fake_lswitch = jsonutils.loads(body)
-        fake_lswitch['uuid'] = uuidutils.generate_uuid()
-        self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
-        # put the tenant_id and the zone_uuid in the main dict
-        # for simplyfying templating
-        zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
-        fake_lswitch['zone_uuid'] = zone_uuid
-        fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
-        fake_lswitch['lport_count'] = 0
-        # set status value
-        fake_lswitch['status'] = 'true'
-        return fake_lswitch
-
-    def _build_lrouter(self, body, uuid=None):
-        fake_lrouter = jsonutils.loads(body)
-        if uuid:
-            fake_lrouter['uuid'] = uuid
-        fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
-        default_nexthop = fake_lrouter['routing_config'].get(
-            'default_route_next_hop')
-        if default_nexthop:
-            fake_lrouter['default_next_hop'] = default_nexthop.get(
-                'gateway_ip_address', '0.0.0.0')
-        else:
-            fake_lrouter['default_next_hop'] = '0.0.0.0'
-        # NOTE(salv-orlando): We won't make the Fake NSX API client
-        # aware of NSX version. The long term plan is to replace it
-        # with behavioral mocking of NSX API requests
-        if 'distributed' not in fake_lrouter:
-            fake_lrouter['distributed'] = False
-        distributed_json = ('"distributed": %s,' %
-                            str(fake_lrouter['distributed']).lower())
-        fake_lrouter['distributed_json'] = distributed_json
-        return fake_lrouter
-
-    def _add_lrouter(self, body):
-        fake_lrouter = self._build_lrouter(body,
-                                           uuidutils.generate_uuid())
-        self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
-        fake_lrouter['lport_count'] = 0
-        # set status value
-        fake_lrouter['status'] = 'true'
-        return fake_lrouter
-
-    def _add_lqueue(self, body):
-        fake_lqueue = jsonutils.loads(body)
-        fake_lqueue['uuid'] = uuidutils.generate_uuid()
-        self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue
-        return fake_lqueue
-
-    def _add_lswitch_lport(self, body, ls_uuid):
-        fake_lport = jsonutils.loads(body)
-        new_uuid = uuidutils.generate_uuid()
-        fake_lport['uuid'] = new_uuid
-        # put the tenant_id and the ls_uuid in the main dict
-        # for simplyfying templating
-        fake_lport['ls_uuid'] = ls_uuid
-        fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
-        fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
-                                                      'q_port_id')
-        fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id')
-        fake_lport['att_type'] = "NoAttachment"
-        fake_lport['att_info_json'] = ''
-        self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
-
-        fake_lswitch = self._fake_lswitch_dict[ls_uuid]
-        fake_lswitch['lport_count'] += 1
-        fake_lport_status = fake_lport.copy()
-        fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
-        fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
-        fake_lport_status['ls_name'] = fake_lswitch['display_name']
-        fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
-        # set status value
-        fake_lport['status'] = 'true'
-        self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
-        return fake_lport
-
-    def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None):
-        fake_lport = jsonutils.loads(body)
-        if new_uuid:
-            fake_lport['uuid'] = new_uuid
-        # put the tenant_id and the le_uuid in the main dict
-        # for simplyfying templating
-        if lr_uuid:
-            fake_lport['lr_uuid'] = lr_uuid
-        fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
-        fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
-                                                      'q_port_id')
-        # replace ip_address with its json dump
-        if 'ip_addresses' in fake_lport:
-            ip_addresses_json = jsonutils.dumps(fake_lport['ip_addresses'])
-            fake_lport['ip_addresses_json'] = ip_addresses_json
-        return fake_lport
-
-    def _add_lrouter_lport(self, body, lr_uuid):
-        new_uuid = uuidutils.generate_uuid()
-        fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid)
-        self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
-        try:
-            fake_lrouter = self._fake_lrouter_dict[lr_uuid]
-        except KeyError:
-            raise api_exc.ResourceNotFound()
-        fake_lrouter['lport_count'] += 1
-        fake_lport_status = fake_lport.copy()
-        fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
-        fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
-        fake_lport_status['lr_name'] = fake_lrouter['display_name']
-        self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
-        return fake_lport
-
-    def _add_securityprofile(self, body):
-        fake_securityprofile = jsonutils.loads(body)
-        fake_securityprofile['uuid'] = uuidutils.generate_uuid()
-        fake_securityprofile['tenant_id'] = self._get_tag(
-            fake_securityprofile, 'os_tid')
-
-        fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile,
-                                                          'nova_spid')
-        self._fake_securityprofile_dict[fake_securityprofile['uuid']] = (
-            fake_securityprofile)
-        return fake_securityprofile
-
-    def _add_lrouter_nat(self, body, lr_uuid):
-        fake_nat = jsonutils.loads(body)
-        new_uuid = uuidutils.generate_uuid()
-        fake_nat['uuid'] = new_uuid
-        fake_nat['lr_uuid'] = lr_uuid
-        self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
-        if 'match' in fake_nat:
-            match_json = jsonutils.dumps(fake_nat['match'])
-            fake_nat['match_json'] = match_json
-        return fake_nat
-
-    def _add_gatewayservice(self, body):
-        fake_gwservice = jsonutils.loads(body)
-        fake_gwservice['uuid'] = str(uuidutils.generate_uuid())
-        fake_gwservice['tenant_id'] = self._get_tag(
-            fake_gwservice, 'os_tid')
-        # FIXME(salvatore-orlando): For simplicity we're managing only a
-        # single device. Extend the fake client for supporting multiple devices
-        first_gw = fake_gwservice['gateways'][0]
-        fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid']
-        fake_gwservice['device_id'] = first_gw['device_id']
-        self._fake_gatewayservice_dict[fake_gwservice['uuid']] = (
-            fake_gwservice)
-        return fake_gwservice
-
-    def _build_relation(self, src, dst, resource_type, relation):
-        if relation not in self.MANAGED_RELATIONS[resource_type]:
-            return  # Relation is not desired in output
-        if '_relations' not in src or not src['_relations'].get(relation):
-            return  # Item does not have relation
-        relation_data = src['_relations'].get(relation)
-        dst_relations = dst.get('_relations', {})
-        dst_relations[relation] = relation_data
-        dst['_relations'] = dst_relations
-
-    def _fill_attachment(self, att_data, ls_uuid=None,
-                         lr_uuid=None, lp_uuid=None):
-        new_data = att_data.copy()
-        for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
-            if locals().get(k):
-                new_data[k] = locals()[k]
-
-        def populate_field(field_name):
-            if field_name in att_data:
-                new_data['%s_field' % field_name] = ('"%s" : "%s",'
-                                                     % (field_name,
-                                                        att_data[field_name]))
-                del new_data[field_name]
-            else:
-                new_data['%s_field' % field_name] = ""
-
-        for field in ['vif_uuid', 'peer_port_href', 'vlan_id',
-                      'peer_port_uuid', 'l3_gateway_service_uuid']:
-            populate_field(field)
-        return new_data
-
-    def _get_resource_type(self, path):
-        """Get resource type.
-
-        Identifies resource type and relevant uuids in the uri
-
-        /ws.v1/lswitch/xxx
-        /ws.v1/lswitch/xxx/status
-        /ws.v1/lswitch/xxx/lport/yyy
-        /ws.v1/lswitch/xxx/lport/yyy/status
-        /ws.v1/lrouter/zzz
-        /ws.v1/lrouter/zzz/status
-        /ws.v1/lrouter/zzz/lport/www
-        /ws.v1/lrouter/zzz/lport/www/status
-        /ws.v1/lqueue/xxx
-        """
-        # The first element will always be 'ws.v1' - so we just discard it
-        uri_split = path.split('/')[1:]
-        # parse uri_split backwards
-        suffix = ""
-        idx = len(uri_split) - 1
-        if 'status' in uri_split[idx]:
-            suffix = "status"
-            idx = idx - 1
-        elif 'attachment' in uri_split[idx]:
-            suffix = "attachment"
-            idx = idx - 1
-        # then check if we have an uuid
-        uuids = []
-        if uri_split[idx].replace('-', '') not in self.RESOURCES:
-            uuids.append(uri_split[idx])
-            idx = idx - 1
-        resource_type = "%s%s" % (uri_split[idx], suffix)
-        if idx > 1:
-            uuids.insert(0, uri_split[idx - 1])
-            resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
-        return (resource_type.replace('-', ''), uuids)
-
-    def _list(self, resource_type, response_file,
-              parent_uuid=None, query=None, relations=None):
-        (tag_filter, attr_filter,
-         page_len, page_cursor) = self._get_filters(query)
-        # result_count attribute in response should appear only when
-        # page_cursor is not specified
-        do_result_count = not page_cursor
-        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
-            response_template = f.read()
-            res_dict = getattr(self, '_fake_%s_dict' % resource_type)
-            if parent_uuid == '*':
-                parent_uuid = None
-            # NSX raises ResourceNotFound if lswitch doesn't exist and is not *
-            elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE:
-                raise api_exc.ResourceNotFound()
-
-            def _attr_match(res_uuid):
-                if not attr_filter:
-                    return True
-                item = res_dict[res_uuid]
-                for (attr, value) in attr_filter.iteritems():
-                    if item.get(attr) != value:
-                        return False
-                return True
-
-            def _tag_match(res_uuid):
-                if not tag_filter:
-                    return True
-                return any([x['scope'] == tag_filter['scope'] and
-                            x['tag'] == tag_filter['tag']
-                            for x in res_dict[res_uuid]['tags']])
-
-            def _lswitch_match(res_uuid):
-                # verify that the switch exist
-                if parent_uuid and parent_uuid not in self._fake_lswitch_dict:
-                    raise Exception(_("lswitch:%s not found") % parent_uuid)
-                if (not parent_uuid
-                    or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
-                    return True
-                return False
-
-            def _lrouter_match(res_uuid):
-                # verify that the router exist
-                if parent_uuid and parent_uuid not in self._fake_lrouter_dict:
-                    raise api_exc.ResourceNotFound()
-                if (not parent_uuid or
-                    res_dict[res_uuid].get('lr_uuid') == parent_uuid):
-                    return True
-                return False
-
-            def _cursor_match(res_uuid, page_cursor):
-                if not page_cursor:
-                    return True
-                if page_cursor == res_uuid:
-                    # always return True once page_cursor has been found
-                    page_cursor = None
-                    return True
-                return False
-
-            def _build_item(resource):
-                item = jsonutils.loads(response_template % resource)
-                if relations:
-                    for relation in relations:
-                        self._build_relation(resource, item,
-                                             resource_type, relation)
-                return item
-
-            for item in res_dict.itervalues():
-                if 'tags' in item:
-                    item['tags_json'] = jsonutils.dumps(item['tags'])
-            if resource_type in (self.LSWITCH_LPORT_RESOURCE,
-                                 self.LSWITCH_LPORT_ATT,
-                                 self.LSWITCH_LPORT_STATUS):
-                parent_func = _lswitch_match
-            elif resource_type in (self.LROUTER_LPORT_RESOURCE,
-                                   self.LROUTER_LPORT_ATT,
-                                   self.LROUTER_NAT_RESOURCE,
-                                   self.LROUTER_LPORT_STATUS):
-                parent_func = _lrouter_match
-            else:
-                parent_func = lambda x: True
-
-            items = [_build_item(res_dict[res_uuid])
-                     for res_uuid in res_dict
-                     if (parent_func(res_uuid) and
-                         _tag_match(res_uuid) and
-                         _attr_match(res_uuid) and
-                         _cursor_match(res_uuid, page_cursor))]
-            # Rather inefficient, but hey this is just a mock!
-            next_cursor = None
-            total_items = len(items)
-            if page_len:
-                try:
-                    next_cursor = items[page_len]['uuid']
-                except IndexError:
-                    next_cursor = None
-                items = items[:page_len]
-            response_dict = {'results': items}
-            if next_cursor:
-                response_dict['page_cursor'] = next_cursor
-            if do_result_count:
-                response_dict['result_count'] = total_items
-            return jsonutils.dumps(response_dict)
-
-    def _show(self, resource_type, response_file,
-              uuid1, uuid2=None, relations=None):
-        target_uuid = uuid2 or uuid1
-        if resource_type.endswith('attachment'):
-            resource_type = resource_type[:resource_type.index('attachment')]
-        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
-            response_template = f.read()
-            res_dict = getattr(self, '_fake_%s_dict' % resource_type)
-            for item in res_dict.itervalues():
-                if 'tags' in item:
-                    item['tags_json'] = jsonutils.dumps(item['tags'])
-
-                # replace sec prof rules with their json dump
-                def jsonify_rules(rule_key):
-                    if rule_key in item:
-                        rules_json = jsonutils.dumps(item[rule_key])
-                        item['%s_json' % rule_key] = rules_json
-                jsonify_rules('logical_port_egress_rules')
-                jsonify_rules('logical_port_ingress_rules')
-
-            items = [jsonutils.loads(response_template % res_dict[res_uuid])
-                     for res_uuid in res_dict if res_uuid == target_uuid]
-            if items:
-                return jsonutils.dumps(items[0])
-            raise api_exc.ResourceNotFound()
-
-    def handle_get(self, url):
-        #TODO(salvatore-orlando): handle field selection
-        parsedurl = urlparse.urlparse(url)
-        (res_type, uuids) = self._get_resource_type(parsedurl.path)
-        relations = urlparse.parse_qs(parsedurl.query).get('relations')
-        response_file = self.FAKE_GET_RESPONSES.get(res_type)
-        if not response_file:
-            raise api_exc.NsxApiException()
-        if 'lport' in res_type or 'nat' in res_type:
-            if len(uuids) > 1:
-                return self._show(res_type, response_file, uuids[0],
-                                  uuids[1], relations=relations)
-            else:
-                return self._list(res_type, response_file, uuids[0],
-                                  query=parsedurl.query, relations=relations)
-        elif ('lswitch' in res_type or
-              'lrouter' in res_type or
-              self.SECPROF_RESOURCE in res_type or
-              self.LQUEUE_RESOURCE in res_type or
-              'gatewayservice' in res_type):
-            LOG.debug("UUIDS:%s", uuids)
-            if uuids:
-                return self._show(res_type, response_file, uuids[0],
-                                  relations=relations)
-            else:
-                return self._list(res_type, response_file,
-                                  query=parsedurl.query,
-                                  relations=relations)
-        else:
-            raise Exception("unknown resource:%s" % res_type)
-
-    def handle_post(self, url, body):
-        parsedurl = urlparse.urlparse(url)
-        (res_type, uuids) = self._get_resource_type(parsedurl.path)
-        response_file = self.FAKE_POST_RESPONSES.get(res_type)
-        if not response_file:
-            raise Exception("resource not found")
-        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
-            response_template = f.read()
-            add_resource = getattr(self, '_add_%s' % res_type)
-            body_json = jsonutils.loads(body)
-            val_func = self._validators.get(res_type)
-            if val_func:
-                val_func(body_json)
-            args = [body]
-            if uuids:
-                args.append(uuids[0])
-            response = response_template % add_resource(*args)
-            return response
-
-    def handle_put(self, url, body):
-        parsedurl = urlparse.urlparse(url)
-        (res_type, uuids) = self._get_resource_type(parsedurl.path)
-        response_file = self.FAKE_PUT_RESPONSES.get(res_type)
-        if not response_file:
-            raise Exception("resource not found")
-        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
-            response_template = f.read()
-            # Manage attachment operations
-            is_attachment = False
-            if res_type.endswith('attachment'):
-                is_attachment = True
-                res_type = res_type[:res_type.index('attachment')]
-            res_dict = getattr(self, '_fake_%s_dict' % res_type)
-            body_json = jsonutils.loads(body)
-            val_func = self._validators.get(res_type)
-            if val_func:
-                val_func(body_json)
-            try:
-                resource = res_dict[uuids[-1]]
-            except KeyError:
-                raise api_exc.ResourceNotFound()
-            if not is_attachment:
-                edit_resource = getattr(self, '_build_%s' % res_type, None)
-                if edit_resource:
-                    body_json = edit_resource(body)
-                resource.update(body_json)
-            else:
-                relations = resource.get("_relations", {})
-                body_2 = jsonutils.loads(body)
-                resource['att_type'] = body_2['type']
-                relations['LogicalPortAttachment'] = body_2
-                resource['_relations'] = relations
-                if body_2['type'] == "PatchAttachment":
-                    # We need to do a trick here
-                    if self.LROUTER_RESOURCE in res_type:
-                        res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
-                                                      self.LSWITCH_RESOURCE)
-                    elif self.LSWITCH_RESOURCE in res_type:
-                        res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
-                                                      self.LROUTER_RESOURCE)
-                    res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
-                    body_2['peer_port_uuid'] = uuids[-1]
-                    resource_2 = \
-                        res_dict_2[jsonutils.loads(body)['peer_port_uuid']]
-                    relations_2 = resource_2.get("_relations")
-                    if not relations_2:
-                        relations_2 = {}
-                    relations_2['LogicalPortAttachment'] = body_2
-                    resource_2['_relations'] = relations_2
-                    resource['peer_port_uuid'] = body_2['peer_port_uuid']
-                    resource['att_info_json'] = (
-                        "\"peer_port_uuid\": \"%s\"," %
-                        resource_2['uuid'])
-                    resource_2['att_info_json'] = (
-                        "\"peer_port_uuid\": \"%s\"," %
-                        body_2['peer_port_uuid'])
-                elif body_2['type'] == "L3GatewayAttachment":
-                    resource['attachment_gwsvc_uuid'] = (
-                        body_2['l3_gateway_service_uuid'])
-                    resource['vlan_id'] = body_2.get('vlan_id')
-                elif body_2['type'] == "L2GatewayAttachment":
-                    resource['attachment_gwsvc_uuid'] = (
-                        body_2['l2_gateway_service_uuid'])
-                elif body_2['type'] == "VifAttachment":
-                    resource['vif_uuid'] = body_2['vif_uuid']
-                    resource['att_info_json'] = (
-                        "\"vif_uuid\": \"%s\"," % body_2['vif_uuid'])
-
-            if not is_attachment:
-                response = response_template % resource
-            else:
-                if res_type == self.LROUTER_LPORT_RESOURCE:
-                    lr_uuid = uuids[0]
-                    ls_uuid = None
-                elif res_type == self.LSWITCH_LPORT_RESOURCE:
-                    ls_uuid = uuids[0]
-                    lr_uuid = None
-                lp_uuid = uuids[1]
-                response = response_template % self._fill_attachment(
-                    jsonutils.loads(body), ls_uuid, lr_uuid, lp_uuid)
-            return response
-
-    def handle_delete(self, url):
-        parsedurl = urlparse.urlparse(url)
-        (res_type, uuids) = self._get_resource_type(parsedurl.path)
-        response_file = self.FAKE_PUT_RESPONSES.get(res_type)
-        if not response_file:
-            raise Exception("resource not found")
-        res_dict = getattr(self, '_fake_%s_dict' % res_type)
-        try:
-            del res_dict[uuids[-1]]
-        except KeyError:
-            raise api_exc.ResourceNotFound()
-        return ""
-
-    def fake_request(self, *args, **kwargs):
-        method = args[0]
-        handler = getattr(self, "handle_%s" % method.lower())
-        return handler(*args[1:])
-
-    def reset_all(self):
-        self._fake_lswitch_dict.clear()
-        self._fake_lrouter_dict.clear()
-        self._fake_lswitch_lport_dict.clear()
-        self._fake_lrouter_lport_dict.clear()
-        self._fake_lswitch_lportstatus_dict.clear()
-        self._fake_lrouter_lportstatus_dict.clear()
-        self._fake_lqueue_dict.clear()
-        self._fake_securityprofile_dict.clear()
-        self._fake_gatewayservice_dict.clear()
diff --git a/neutron/tests/unit/vmware/apiclient/test_api_common.py b/neutron/tests/unit/vmware/apiclient/test_api_common.py
deleted file mode 100644 (file)
index 5ea40d0..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2011 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib
-
-from neutron.plugins.vmware import api_client
-from neutron.tests import base
-
-
-class ApiCommonTest(base.BaseTestCase):
-
-    def test_ctrl_conn_to_str(self):
-        conn = httplib.HTTPSConnection('localhost', 4242, timeout=0)
-        self.assertTrue(
-            api_client.ctrl_conn_to_str(conn) == 'https://localhost:4242')
-
-        conn = httplib.HTTPConnection('localhost', 4242, timeout=0)
-        self.assertTrue(
-            api_client.ctrl_conn_to_str(conn) == 'http://localhost:4242')
-
-        self.assertRaises(TypeError, api_client.ctrl_conn_to_str,
-                          ('not an httplib.HTTPSConnection'))
diff --git a/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py b/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py
deleted file mode 100644 (file)
index ddd0bfd..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib
-import random
-
-import eventlet
-from eventlet.green import urllib2
-import mock
-
-from neutron.i18n import _LI
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import eventlet_client as client
-from neutron.plugins.vmware.api_client import eventlet_request as request
-from neutron.tests import base
-from neutron.tests.unit import vmware
-
-
-LOG = logging.getLogger(__name__)
-
-
-REQUEST_TIMEOUT = 1
-
-
-def fetch(url):
-    return urllib2.urlopen(url).read()
-
-
-class ApiRequestEventletTest(base.BaseTestCase):
-
-    def setUp(self):
-
-        super(ApiRequestEventletTest, self).setUp()
-        self.client = client.EventletApiClient(
-            [("127.0.0.1", 4401, True)], "admin", "admin")
-        self.url = "/ws.v1/_debug"
-        self.req = request.EventletApiRequest(self.client, self.url)
-
-    def tearDown(self):
-        self.client = None
-        self.req = None
-        super(ApiRequestEventletTest, self).tearDown()
-
-    def test_construct_eventlet_api_request(self):
-        e = request.EventletApiRequest(self.client, self.url)
-        self.assertIsNotNone(e)
-
-    def test_apirequest_spawn(self):
-        def x(id):
-            eventlet.greenthread.sleep(random.random())
-            LOG.info(_LI('spawned: %d'), id)
-
-        for i in range(10):
-            request.EventletApiRequest._spawn(x, i)
-
-    def test_apirequest_start(self):
-        for i in range(10):
-            a = request.EventletApiRequest(
-                self.client, self.url)
-            a._handle_request = mock.Mock()
-            a.start()
-            eventlet.greenthread.sleep(0.1)
-            LOG.info(_LI('_handle_request called: %s'),
-                     a._handle_request.called)
-        request.EventletApiRequest.joinall()
-
-    def test_join_with_handle_request(self):
-        self.req._handle_request = mock.Mock()
-        self.req.start()
-        self.req.join()
-        self.assertTrue(self.req._handle_request.called)
-
-    def test_join_without_handle_request(self):
-        self.req._handle_request = mock.Mock()
-        self.req.join()
-        self.assertFalse(self.req._handle_request.called)
-
-    def test_copy(self):
-        req = self.req.copy()
-        for att in [
-                '_api_client', '_url', '_method', '_body', '_headers',
-                '_http_timeout', '_request_timeout', '_retries',
-                '_redirects', '_auto_login']:
-            self.assertTrue(getattr(req, att) is getattr(self.req, att))
-
-    def test_request_error(self):
-        self.assertIsNone(self.req.request_error)
-
-    def test_run_and_handle_request(self):
-        self.req._request_timeout = None
-        self.req._handle_request = mock.Mock()
-        self.req.start()
-        self.req.join()
-        self.assertTrue(self.req._handle_request.called)
-
-    def test_run_and_timeout(self):
-        def my_handle_request():
-            LOG.info('my_handle_request() self: %s' % self.req)
-            LOG.info('my_handle_request() dir(self): %s' % dir(self.req))
-            eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
-
-        with mock.patch.object(
-            self.req,
-            '_handle_request',
-            new=my_handle_request
-        ):
-            self.req._request_timeout = REQUEST_TIMEOUT
-            self.req.start()
-            self.assertIsNone(self.req.join())
-
-    def prep_issue_request(self):
-        mysock = mock.Mock()
-        mysock.gettimeout.return_value = 4242
-
-        myresponse = mock.Mock()
-        myresponse.read.return_value = 'body'
-        myresponse.getheaders.return_value = 'headers'
-        myresponse.status = httplib.MOVED_PERMANENTLY
-
-        myconn = mock.Mock()
-        myconn.request.return_value = None
-        myconn.sock = mysock
-        myconn.getresponse.return_value = myresponse
-        myconn.__str__ = mock.Mock()
-        myconn.__str__.return_value = 'myconn string'
-
-        req = self.req
-        req._redirect_params = mock.Mock()
-        req._redirect_params.return_value = (myconn, 'url')
-        req._request_str = mock.Mock()
-        req._request_str.return_value = 'http://cool/cool'
-
-        client = self.client
-        client.need_login = False
-        client._auto_login = False
-        client._auth_cookie = False
-        client.acquire_connection = mock.Mock()
-        client.acquire_connection.return_value = myconn
-        client.release_connection = mock.Mock()
-
-        return (mysock, myresponse, myconn)
-
-    def test_issue_request_trigger_exception(self):
-        (mysock, myresponse, myconn) = self.prep_issue_request()
-        self.client.acquire_connection.return_value = None
-
-        self.req._issue_request()
-        self.assertIsInstance(self.req._request_error, Exception)
-        self.assertTrue(self.client.acquire_connection.called)
-
-    def test_issue_request_handle_none_sock(self):
-        (mysock, myresponse, myconn) = self.prep_issue_request()
-        myconn.sock = None
-        self.req.start()
-        self.assertIsNone(self.req.join())
-        self.assertTrue(self.client.acquire_connection.called)
-
-    def test_issue_request_exceed_maximum_retries(self):
-        (mysock, myresponse, myconn) = self.prep_issue_request()
-        self.req.start()
-        self.assertIsNone(self.req.join())
-        self.assertTrue(self.client.acquire_connection.called)
-
-    def test_issue_request_trigger_non_redirect(self):
-        (mysock, myresponse, myconn) = self.prep_issue_request()
-        myresponse.status = httplib.OK
-        self.req.start()
-        self.assertIsNone(self.req.join())
-        self.assertTrue(self.client.acquire_connection.called)
-
-    def test_issue_request_trigger_internal_server_error(self):
-        (mysock, myresponse, myconn) = self.prep_issue_request()
-        self.req._redirect_params.return_value = (myconn, None)
-        self.req.start()
-        self.assertIsNone(self.req.join())
-        self.assertTrue(self.client.acquire_connection.called)
-
-    def test_redirect_params_break_on_location(self):
-        myconn = mock.Mock()
-        (conn, retval) = self.req._redirect_params(
-            myconn, [('location', None)])
-        self.assertIsNone(retval)
-
-    def test_redirect_params_parse_a_url(self):
-        myconn = mock.Mock()
-        (conn, retval) = self.req._redirect_params(
-            myconn, [('location', '/path/a/b/c')])
-        self.assertIsNotNone(retval)
-
-    def test_redirect_params_invalid_redirect_location(self):
-        myconn = mock.Mock()
-        (conn, retval) = self.req._redirect_params(
-            myconn, [('location', '+path/a/b/c')])
-        self.assertIsNone(retval)
-
-    def test_redirect_params_invalid_scheme(self):
-        myconn = mock.Mock()
-        (conn, retval) = self.req._redirect_params(
-            myconn, [('location', 'invalidscheme://hostname:1/path')])
-        self.assertIsNone(retval)
-
-    def test_redirect_params_setup_https_with_cooki(self):
-        with mock.patch(vmware.CLIENT_NAME) as mock_client:
-            api_client = mock_client.return_value
-            self.req._api_client = api_client
-            myconn = mock.Mock()
-            (conn, retval) = self.req._redirect_params(
-                myconn, [('location', 'https://host:1/path')])
-
-            self.assertIsNotNone(retval)
-            self.assertTrue(api_client.acquire_redirect_connection.called)
-
-    def test_redirect_params_setup_htttps_and_query(self):
-        with mock.patch(vmware.CLIENT_NAME) as mock_client:
-            api_client = mock_client.return_value
-            self.req._api_client = api_client
-            myconn = mock.Mock()
-            (conn, retval) = self.req._redirect_params(myconn, [
-                ('location', 'https://host:1/path?q=1')])
-
-            self.assertIsNotNone(retval)
-            self.assertTrue(api_client.acquire_redirect_connection.called)
-
-    def test_redirect_params_setup_https_connection_no_cookie(self):
-        with mock.patch(vmware.CLIENT_NAME) as mock_client:
-            api_client = mock_client.return_value
-            self.req._api_client = api_client
-            myconn = mock.Mock()
-            (conn, retval) = self.req._redirect_params(myconn, [
-                ('location', 'https://host:1/path')])
-
-            self.assertIsNotNone(retval)
-            self.assertTrue(api_client.acquire_redirect_connection.called)
-
-    def test_redirect_params_setup_https_and_query_no_cookie(self):
-        with mock.patch(vmware.CLIENT_NAME) as mock_client:
-            api_client = mock_client.return_value
-            self.req._api_client = api_client
-            myconn = mock.Mock()
-            (conn, retval) = self.req._redirect_params(
-                myconn, [('location', 'https://host:1/path?q=1')])
-            self.assertIsNotNone(retval)
-            self.assertTrue(api_client.acquire_redirect_connection.called)
-
-    def test_redirect_params_path_only_with_query(self):
-        with mock.patch(vmware.CLIENT_NAME) as mock_client:
-            api_client = mock_client.return_value
-            api_client.wait_for_login.return_value = None
-            api_client.auth_cookie = None
-            api_client.acquire_connection.return_value = True
-            myconn = mock.Mock()
-            (conn, retval) = self.req._redirect_params(myconn, [
-                ('location', '/path?q=1')])
-            self.assertIsNotNone(retval)
-
-    def test_handle_request_auto_login(self):
-        self.req._auto_login = True
-        self.req._api_client = mock.Mock()
-        self.req._api_client.need_login = True
-        self.req._request_str = mock.Mock()
-        self.req._request_str.return_value = 'http://cool/cool'
-        self.req.spawn = mock.Mock()
-        self.req._handle_request()
-
-    def test_handle_request_auto_login_unauth(self):
-        self.req._auto_login = True
-        self.req._api_client = mock.Mock()
-        self.req._api_client.need_login = True
-        self.req._request_str = mock.Mock()
-        self.req._request_str.return_value = 'http://cool/cool'
-
-        import socket
-        resp = httplib.HTTPResponse(socket.socket())
-        resp.status = httplib.UNAUTHORIZED
-        mywaiter = mock.Mock()
-        mywaiter.wait = mock.Mock()
-        mywaiter.wait.return_value = resp
-        self.req.spawn = mock.Mock(return_value=mywaiter)
-        self.req._handle_request()
-
-    def test_construct_eventlet_login_request(self):
-        r = request.LoginRequestEventlet(self.client, 'user', 'password')
-        self.assertIsNotNone(r)
-
-    def test_session_cookie_session_cookie_retrieval(self):
-        r = request.LoginRequestEventlet(self.client, 'user', 'password')
-        r.successful = mock.Mock()
-        r.successful.return_value = True
-        r.value = mock.Mock()
-        r.value.get_header = mock.Mock()
-        r.value.get_header.return_value = 'cool'
-        self.assertIsNotNone(r.session_cookie())
-
-    def test_session_cookie_not_retrieved(self):
-        r = request.LoginRequestEventlet(self.client, 'user', 'password')
-        r.successful = mock.Mock()
-        r.successful.return_value = False
-        r.value = mock.Mock()
-        r.value.get_header = mock.Mock()
-        r.value.get_header.return_value = 'cool'
-        self.assertIsNone(r.session_cookie())
-
-    def test_construct_eventlet_get_api_providers_request(self):
-        r = request.GetApiProvidersRequestEventlet(self.client)
-        self.assertIsNotNone(r)
-
-    def test_api_providers_none_api_providers(self):
-        r = request.GetApiProvidersRequestEventlet(self.client)
-        r.successful = mock.Mock(return_value=False)
-        self.assertIsNone(r.api_providers())
-
-    def test_api_providers_non_none_api_providers(self):
-        r = request.GetApiProvidersRequestEventlet(self.client)
-        r.value = mock.Mock()
-        r.value.body = """{
-          "results": [
-            { "roles": [
-              { "role": "api_provider",
-                "listen_addr": "pssl:1.1.1.1:1" }]}]}"""
-        r.successful = mock.Mock(return_value=True)
-        LOG.info('%s' % r.api_providers())
-        self.assertIsNotNone(r.api_providers())
diff --git a/neutron/tests/unit/vmware/db/__init__.py b/neutron/tests/unit/vmware/db/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/vmware/db/test_lsn_db.py b/neutron/tests/unit/vmware/db/test_lsn_db.py
deleted file mode 100644 (file)
index 5684093..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from sqlalchemy import orm
-
-from neutron import context
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dbexts import lsn_db
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.tests.unit import testlib_api
-
-
-class LSNTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(LSNTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-        self.net_id = 'foo_network_id'
-        self.lsn_id = 'foo_lsn_id'
-        self.lsn_port_id = 'foo_port_id'
-        self.subnet_id = 'foo_subnet_id'
-        self.mac_addr = 'aa:bb:cc:dd:ee:ff'
-
-    def test_lsn_add(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn = (self.ctx.session.query(nsx_models.Lsn).
-               filter_by(lsn_id=self.lsn_id).one())
-        self.assertEqual(self.lsn_id, lsn.lsn_id)
-
-    def test_lsn_remove(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_remove(self.ctx, self.lsn_id)
-        q = self.ctx.session.query(nsx_models.Lsn).filter_by(
-            lsn_id=self.lsn_id)
-        self.assertRaises(orm.exc.NoResultFound, q.one)
-
-    def test_lsn_remove_for_network(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_remove_for_network(self.ctx, self.net_id)
-        q = self.ctx.session.query(nsx_models.Lsn).filter_by(
-            lsn_id=self.lsn_id)
-        self.assertRaises(orm.exc.NoResultFound, q.one)
-
-    def test_lsn_get_for_network(self):
-        result = lsn_db.lsn_get_for_network(self.ctx, self.net_id,
-                                            raise_on_err=False)
-        self.assertIsNone(result)
-
-    def test_lsn_get_for_network_raise_not_found(self):
-        self.assertRaises(p_exc.LsnNotFound,
-                          lsn_db.lsn_get_for_network,
-                          self.ctx, self.net_id)
-
-    def test_lsn_port_add(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
-                                    self.subnet_id, self.mac_addr, self.lsn_id)
-        result = (self.ctx.session.query(nsx_models.LsnPort).
-                  filter_by(lsn_port_id=self.lsn_port_id).one())
-        self.assertEqual(self.lsn_port_id, result.lsn_port_id)
-
-    def test_lsn_port_get_for_mac(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
-                                    self.subnet_id, self.mac_addr, self.lsn_id)
-        result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr)
-        self.assertEqual(self.mac_addr, result.mac_addr)
-
-    def test_lsn_port_get_for_mac_raise_not_found(self):
-        self.assertRaises(p_exc.LsnPortNotFound,
-                          lsn_db.lsn_port_get_for_mac,
-                          self.ctx, self.mac_addr)
-
-    def test_lsn_port_get_for_subnet(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
-                                    self.subnet_id, self.mac_addr, self.lsn_id)
-        result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id)
-        self.assertEqual(self.subnet_id, result.sub_id)
-
-    def test_lsn_port_get_for_subnet_raise_not_found(self):
-        self.assertRaises(p_exc.LsnPortNotFound,
-                          lsn_db.lsn_port_get_for_subnet,
-                          self.ctx, self.mac_addr)
-
-    def test_lsn_port_remove(self):
-        lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id)
-        q = (self.ctx.session.query(nsx_models.LsnPort).
-             filter_by(lsn_port_id=self.lsn_port_id))
-        self.assertRaises(orm.exc.NoResultFound, q.one)
diff --git a/neutron/tests/unit/vmware/db/test_nsx_db.py b/neutron/tests/unit/vmware/db/test_nsx_db.py
deleted file mode 100644 (file)
index c9be8a6..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_db import exception as d_exc
-
-from neutron import context
-from neutron.db import models_v2
-from neutron.plugins.vmware.dbexts import db as nsx_db
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.tests.unit import testlib_api
-
-
-class NsxDBTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(NsxDBTestCase, self).setUp()
-        self.ctx = context.get_admin_context()
-
-    def _setup_neutron_network_and_port(self, network_id, port_id):
-        with self.ctx.session.begin(subtransactions=True):
-            self.ctx.session.add(models_v2.Network(id=network_id))
-            port = models_v2.Port(id=port_id,
-                                  network_id=network_id,
-                                  mac_address='foo_mac_address',
-                                  admin_state_up=True,
-                                  status='ACTIVE',
-                                  device_id='',
-                                  device_owner='')
-            self.ctx.session.add(port)
-
-    def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
-        neutron_net_id = 'foo_neutron_network_id'
-        neutron_port_id = 'foo_neutron_port_id'
-        nsx_port_id = 'foo_nsx_port_id'
-        nsx_switch_id = 'foo_nsx_switch_id'
-        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
-
-        nsx_db.add_neutron_nsx_port_mapping(
-            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
-        # Call the method twice to trigger a db duplicate constraint error
-        nsx_db.add_neutron_nsx_port_mapping(
-            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
-        result = (self.ctx.session.query(nsx_models.NeutronNsxPortMapping).
-                  filter_by(neutron_id=neutron_port_id).one())
-        self.assertEqual(nsx_port_id, result.nsx_port_id)
-        self.assertEqual(neutron_port_id, result.neutron_id)
-
-    def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
-        neutron_net_id = 'foo_neutron_network_id'
-        neutron_port_id = 'foo_neutron_port_id'
-        nsx_port_id_1 = 'foo_nsx_port_id_1'
-        nsx_port_id_2 = 'foo_nsx_port_id_2'
-        nsx_switch_id = 'foo_nsx_switch_id'
-        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
-
-        nsx_db.add_neutron_nsx_port_mapping(
-            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1)
-        # Call the method twice to trigger a db duplicate constraint error,
-        # this time with a different nsx port id!
-        self.assertRaises(d_exc.DBDuplicateEntry,
-                          nsx_db.add_neutron_nsx_port_mapping,
-                          self.ctx.session, neutron_port_id,
-                          nsx_switch_id, nsx_port_id_2)
-
-    def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self):
-        neutron_port_id = 'foo_neutron_port_id'
-        nsx_port_id = 'foo_nsx_port_id'
-        nsx_switch_id = 'foo_nsx_switch_id'
-        self.assertRaises(d_exc.DBError,
-                          nsx_db.add_neutron_nsx_port_mapping,
-                          self.ctx.session, neutron_port_id,
-                          nsx_switch_id, nsx_port_id)
diff --git a/neutron/tests/unit/vmware/etc/fake_get_gwservice.json b/neutron/tests/unit/vmware/etc/fake_get_gwservice.json
deleted file mode 100644 (file)
index 5c8f9a3..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  "_href": "/ws.v1/gateway-service/%(uuid)s",
-  "tags": %(tags_json)s,
-  "_schema": "/ws.v1/schema/L2GatewayServiceConfig",
-  "gateways": [
-    {
-      "transport_node_uuid": "%(transport_node_uuid)s",
-      "type": "L2Gateway",
-      "device_id": "%(device_id)s"
-    }
-  ],
-  "type": "L2GatewayServiceConfig",
-  "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lqueue.json b/neutron/tests/unit/vmware/etc/fake_get_lqueue.json
deleted file mode 100644 (file)
index 414945b..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-   "display_name": "%(display_name)s",
-   "uuid": "%(uuid)s",
-   "type": "LogicalSwitchConfig",
-   "_schema": "/ws.v1/schema/LogicalQueueConfig",
-   "dscp": "%(dscp)s",
-   "max_bandwidth_rate": "%(max_bandwidth_rate)s",
-   "min_bandwidth_rate": "%(min_bandwidth_rate)s",
-   "qos_marking": "%(qos_marking)s",
-   "_href": "/ws.v1/lqueue/%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter.json
deleted file mode 100644 (file)
index 9425ad6..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  %(distributed_json)s
-  "uuid": "%(uuid)s",
-  "tags": %(tags_json)s,
-  "routing_config": {
-    "type": "SingleDefaultRouteImplicitRoutingConfig",
-    "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
-    "default_route_next_hop": {
-      "type": "RouterNextHop",
-      "_schema": "/ws.v1/schema/RouterNextHop",
-      "gateway_ip_address": "%(default_next_hop)s"
-    }
-  },
-  "_schema": "/ws.v1/schema/LogicalRouterConfig",
-  "_relations": {
-    "LogicalRouterStatus": {
-      "_href": "/ws.v1/lrouter/%(uuid)s/status",
-      "lport_admin_up_count": %(lport_count)d,
-      "_schema": "/ws.v1/schema/LogicalRouterStatus",
-      "lport_count": %(lport_count)d,
-      "fabric_status": %(status)s,
-      "type": "LogicalRouterStatus",
-      "lport_link_up_count": %(lport_count)d
-    }
-  },
-  "type": "LogicalRouterConfig",
-  "_href": "/ws.v1/lrouter/%(uuid)s"
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json
deleted file mode 100644 (file)
index df9fcbe..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  "admin_status_enabled": "%(admin_status_enabled)s",
-  "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
-  "tags":
-    [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
-     {"scope": "os_tid", "tag": "%(tenant_id)s"}],
-  "ip_addresses": %(ip_addresses_json)s,
-  "_schema": "/ws.v1/schema/LogicalRouterPortConfig",
-  "type": "LogicalRouterPortConfig",
-  "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json
deleted file mode 100644 (file)
index bc5723d..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "LogicalPortAttachment":
-    {
-      %(peer_port_href_field)s
-      %(peer_port_uuid_field)s
-      %(l3_gateway_service_uuid_field)s
-      %(vlan_id)s
-      "type": "%(type)s",
-      "schema": "/ws.v1/schema/%(type)s"
-    }
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json
deleted file mode 100644 (file)
index 5f7c8ba..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
-  "type": "%(type)s",
-  "match": %(match_json)s,
-  "uuid": "%(uuid)s"
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch.json
deleted file mode 100644 (file)
index a55d508..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{"display_name": "%(display_name)s",
- "_href": "/ws.v1/lswitch/%(uuid)s",
- "_schema": "/ws.v1/schema/LogicalSwitchConfig",
- "_relations": {"LogicalSwitchStatus":
-     {"fabric_status": %(status)s,
-      "type": "LogicalSwitchStatus",
-      "lport_count": %(lport_count)d,
-      "_href": "/ws.v1/lswitch/%(uuid)s/status",
-      "_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
- "type": "LogicalSwitchConfig",
- "tags": %(tags_json)s,
- "uuid": "%(uuid)s"}
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json
deleted file mode 100644 (file)
index 3e5cb90..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-{"display_name": "%(display_name)s",
-   "_relations":
-   {"LogicalPortStatus":
-      {"type": "LogicalSwitchPortStatus",
-       "admin_status_enabled": true,
-       "fabric_status_up": %(status)s,
-       "link_status_up": %(status)s,
-       "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status",
-       "_schema": "/ws.v1/schema/LogicalSwitchPortStatus"},
-    "LogicalSwitchConfig":
-      {"uuid": "%(ls_uuid)s"},
-    "LogicalPortAttachment":
-      {
-       "type": "%(att_type)s",
-       %(att_info_json)s
-       "schema": "/ws.v1/schema/%(att_type)s"
-      }
-   },
- "tags":
-   [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
-    {"scope": "vm_id", "tag": "%(neutron_device_id)s"},
-    {"scope": "os_tid", "tag": "%(tenant_id)s"}],
- "uuid": "%(uuid)s",
- "admin_status_enabled": "%(admin_status_enabled)s",
- "type": "LogicalSwitchPortConfig",
- "_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
- "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s"
- }
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json
deleted file mode 100644 (file)
index cd1788b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-  "LogicalPortAttachment":
-    {
-      "type": "%(att_type)s",
-      "schema": "/ws.v1/schema/%(att_type)s"
-    }
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json
deleted file mode 100644 (file)
index 0df7971..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
- "lswitch":
-    {"display_name": "%(ls_name)s",
-     "uuid": "%(ls_uuid)s",
-     "tags": [
-        {"scope": "os_tid",
-         "tag": "%(ls_tenant_id)s"}
-     ],
-     "type": "LogicalSwitchConfig",
-     "_schema": "/ws.v1/schema/LogicalSwitchConfig",
-     "port_isolation_enabled": false,
-     "transport_zones": [
-        {"zone_uuid": "%(ls_zone_uuid)s",
-         "transport_type": "stt"}
-     ],
-     "_href": "/ws.v1/lswitch/%(ls_uuid)s"},
- "link_status_up": false,
- "_schema": "/ws.v1/schema/LogicalSwitchPortStatus",
- "admin_status_enabled": true,
- "fabric_status_up": true,
- "link_status_up": true,
- "type": "LogicalSwitchPortStatus"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_get_security_profile.json b/neutron/tests/unit/vmware/etc/fake_get_security_profile.json
deleted file mode 100644 (file)
index 898e493..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "display_name": "%(display_name)s",
-   "_href": "/ws.v1/security-profile/%(uuid)s",
-   "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"},
-            {"scope": "nova_spid", "tag": "%(nova_spid)s"}],
-   "logical_port_egress_rules": %(logical_port_egress_rules_json)s,
-   "_schema": "/ws.v1/schema/SecurityProfileConfig",
-   "logical_port_ingress_rules": %(logical_port_ingress_rules_json)s,
-   "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_post_gwservice.json b/neutron/tests/unit/vmware/etc/fake_post_gwservice.json
deleted file mode 100644 (file)
index 72292fd..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
-  "gateways": [
-    {
-      "transport_node_uuid": "%(transport_node_uuid)s",
-      "device_id": "%(device_id)s",
-      "type": "L2Gateway"
-    }
-  ],
-  "type": "L2GatewayServiceConfig",
-  "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lqueue.json b/neutron/tests/unit/vmware/etc/fake_post_lqueue.json
deleted file mode 100644 (file)
index 414945b..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-   "display_name": "%(display_name)s",
-   "uuid": "%(uuid)s",
-   "type": "LogicalSwitchConfig",
-   "_schema": "/ws.v1/schema/LogicalQueueConfig",
-   "dscp": "%(dscp)s",
-   "max_bandwidth_rate": "%(max_bandwidth_rate)s",
-   "min_bandwidth_rate": "%(min_bandwidth_rate)s",
-   "qos_marking": "%(qos_marking)s",
-   "_href": "/ws.v1/lqueue/%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter.json
deleted file mode 100644 (file)
index dbe2811..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  %(distributed_json)s
-  "uuid": "%(uuid)s",
-  "tags": [
-    {
-      "scope": "os_tid",
-      "tag": "%(tenant_id)s"
-    }
-  ],
-  "routing_config": {
-    "type": "SingleDefaultRouteImplicitRoutingConfig",
-    "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
-    "default_route_next_hop": {
-      "type": "RouterNextHop",
-      "_schema": "/ws.v1/schema/RouterNextHop",
-      "gateway_ip_address": "%(default_next_hop)s"
-    }
-  },
-  "_schema": "/ws.v1/schema/LogicalRouterConfig",
-  "type": "LogicalRouterConfig",
-  "_href": "/ws.v1/lrouter/%(uuid)s"
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json
deleted file mode 100644 (file)
index bcb13ae..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "display_name": "%(display_name)s",
-  "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
-  "_schema": "/ws.v1/schema/LogicalRouterPortConfig",
-  "mac_address": "00:00:00:00:00:00",
-  "admin_status_enabled": true,
-  "ip_addresses": %(ip_addresses_json)s,
-  "type": "LogicalRouterPortConfig",
-  "uuid": "%(uuid)s"
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json
deleted file mode 100644 (file)
index 5f7c8ba..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
-  "type": "%(type)s",
-  "match": %(match_json)s,
-  "uuid": "%(uuid)s"
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lswitch.json b/neutron/tests/unit/vmware/etc/fake_post_lswitch.json
deleted file mode 100644 (file)
index 7d8f9e3..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-   "display_name": "%(display_name)s",
-   "uuid": "%(uuid)s",
-   "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
-   "type": "LogicalSwitchConfig",
-   "_schema": "/ws.v1/schema/LogicalSwitchConfig",
-   "port_isolation_enabled": false,
-   "transport_zones": [
-      {"zone_uuid": "%(zone_uuid)s",
-      "transport_type": "stt"}],
-   "_href": "/ws.v1/lswitch/%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json b/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json
deleted file mode 100644 (file)
index cc8decf..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "display_name": "%(uuid)s",
- "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
- "security_profiles": [],
- "tags":
-    [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
-     {"scope": "vm_id", "tag": "%(neutron_device_id)s"},
-     {"scope": "os_tid", "tag": "%(tenant_id)s"}],
- "portno": 1,
- "queue_uuid": null,
- "_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
- "mirror_targets": [],
- "allowed_address_pairs": [],
- "admin_status_enabled": true,
- "type": "LogicalSwitchPortConfig",
- "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_post_security_profile.json b/neutron/tests/unit/vmware/etc/fake_post_security_profile.json
deleted file mode 100644 (file)
index 594da33..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "display_name": "%(display_name)s",
-   "_href": "/ws.v1/security-profile/%(uuid)s",
-   "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"},
-            {"scope": "nova_spid", "tag": "%(nova_spid)s"}],
-   "logical_port_egress_rules": [],
-   "_schema": "/ws.v1/schema/SecurityProfileConfig",
-   "logical_port_ingress_rules": [],
-   "uuid": "%(uuid)s"
-}
diff --git a/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json b/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json
deleted file mode 100644 (file)
index c58fa41..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "LogicalPortAttachment":
-    {
-      %(peer_port_href_field)s
-      %(peer_port_uuid_field)s
-      %(l3_gateway_service_uuid_field)s
-      %(vlan_id_field)s
-      "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment",
-      "type": "%(type)s",
-      "schema": "/ws.v1/schema/%(type)s"
-    }
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json b/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json
deleted file mode 100644 (file)
index dd0daa3..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "LogicalPortAttachment":
-    {
-      %(peer_port_href_field)s
-      %(peer_port_uuid_field)s
-      %(vif_uuid_field)s
-      "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment",
-      "type": "%(type)s",
-      "schema": "/ws.v1/schema/%(type)s"
-    }
-}
\ No newline at end of file
diff --git a/neutron/tests/unit/vmware/etc/neutron.conf.test b/neutron/tests/unit/vmware/etc/neutron.conf.test
deleted file mode 100644 (file)
index 4fd0092..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = False
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9696
-
-# MISSING Path to the extensions
-# api_extensions_path =
-
-# Paste configuration file
-api_paste_config = api-paste.ini.test
-
-# The messaging module to use, defaults to kombu.
-rpc_backend = fake
-
-lock_path = $state_path/lock
-
-[database]
-connection = 'sqlite://'
diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test b/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test
deleted file mode 100644 (file)
index d69df72..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nova_zone_id = whatever
-nsx_controllers = fake_1, fake_2
-nsx_user = foo
-nsx_password = bar
-default_l3_gw_service_uuid = whatever
-default_l2_gw_service_uuid = whatever
-default_service_cluster_uuid = whatever
-default_interface_name = whatever
-http_timeout = 13
-redirects = 12
-retries = 11
-
-[NSX]
-agent_mode = agentless
diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.basic.test b/neutron/tests/unit/vmware/etc/nsx.ini.basic.test
deleted file mode 100644 (file)
index c8fb988..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nsx_controllers=fake_1,fake_2
-nsx_user=foo
-nsx_password=bar
diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.combined.test b/neutron/tests/unit/vmware/etc/nsx.ini.combined.test
deleted file mode 100644 (file)
index 079a738..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nova_zone_id = whatever
-nsx_controllers = fake_1, fake_2
-nsx_user = foo
-nsx_password = bar
-default_l3_gw_service_uuid = whatever
-default_l2_gw_service_uuid = whatever
-default_service_cluster_uuid = whatever
-default_interface_name = whatever
-http_timeout = 13
-redirects = 12
-retries = 11
-
-[NSX]
-agent_mode = combined
diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.full.test b/neutron/tests/unit/vmware/etc/nsx.ini.full.test
deleted file mode 100644 (file)
index a11a86d..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nova_zone_id = whatever
-nsx_controllers = fake_1, fake_2
-nsx_user = foo
-nsx_password = bar
-default_l3_gw_service_uuid = whatever
-default_l2_gw_service_uuid = whatever
-default_interface_name = whatever
-http_timeout = 13
-redirects = 12
-retries = 11
diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.test b/neutron/tests/unit/vmware/etc/nsx.ini.test
deleted file mode 100644 (file)
index 1bb959b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nsx_controllers=fake_1, fake_2
-nsx_user=foo
-nsx_password=bar
-default_l3_gw_service_uuid = whatever
-default_l2_gw_service_uuid = whatever
diff --git a/neutron/tests/unit/vmware/etc/nvp.ini.full.test b/neutron/tests/unit/vmware/etc/nvp.ini.full.test
deleted file mode 100644 (file)
index 83de593..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-[DEFAULT]
-default_tz_uuid = fake_tz_uuid
-nova_zone_id = whatever
-nvp_controllers = fake_1, fake_2
-nvp_user = foo
-nvp_password = bar
-default_l3_gw_service_uuid = whatever
-default_l2_gw_service_uuid = whatever
-default_interface_name = whatever
-http_timeout = 3
-redirects = 2
-retries = 2
diff --git a/neutron/tests/unit/vmware/etc/vcns.ini.test b/neutron/tests/unit/vmware/etc/vcns.ini.test
deleted file mode 100644 (file)
index 38b3361..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-[vcns]
-manager_uri = https://fake-host
-user = fake-user
-passwordd = fake-password
-datacenter_moid = fake-moid
-resource_pool_id = fake-resgroup
-datastore_id = fake-datastore
-external_network = fake-ext-net
-task_status_check_interval = 100
diff --git a/neutron/tests/unit/vmware/extensions/__init__.py b/neutron/tests/unit/vmware/extensions/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/vmware/extensions/test_addresspairs.py b/neutron/tests/unit/vmware/extensions/test_addresspairs.py
deleted file mode 100644 (file)
index 11d84fe..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.tests.unit import test_extension_allowedaddresspairs as ext_pairs
-from neutron.tests.unit.vmware import test_nsx_plugin
-
-
-class TestAllowedAddressPairs(test_nsx_plugin.NsxPluginV2TestCase,
-                              ext_pairs.TestAllowedAddressPairs):
-
-    # TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all
-    # plugins do this correctly.
-    def test_create_port_no_allowed_address_pairs(self):
-        with self.network() as net:
-            res = self._create_port(self.fmt, net['network']['id'])
-            port = self.deserialize(self.fmt, res)
-            self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
-            self._delete('ports', port['port']['id'])
diff --git a/neutron/tests/unit/vmware/extensions/test_maclearning.py b/neutron/tests/unit/vmware/extensions/test_maclearning.py
deleted file mode 100644 (file)
index e2688aa..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import mock
-
-from oslo_config import cfg
-
-from neutron.api.v2 import attributes
-from neutron.common import test_lib
-from neutron import context
-from neutron.extensions import agent
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import sync
-from neutron.tests.unit import test_db_plugin
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-
-class MacLearningExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            agent.RESOURCE_ATTRIBUTE_MAP)
-        return agent.Agent.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
-    fmt = 'json'
-
-    def setUp(self):
-        test_lib.test_config['config_files'] = [
-            vmware.get_fake_conf('nsx.ini.full.test')]
-        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
-        # Save the original RESOURCE_ATTRIBUTE_MAP
-        self.saved_attr_map = {}
-        for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
-            self.saved_attr_map[resource] = attrs.copy()
-        ext_mgr = MacLearningExtensionManager()
-        # mock api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsx.start()
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-
-        # Emulate tests against NSX 2.x
-        instance.return_value.get_version.return_value = version.Version("3.0")
-        instance.return_value.request.side_effect = self.fc.fake_request
-        cfg.CONF.set_override('metadata_mode', None, 'NSX')
-        self.addCleanup(self.fc.reset_all)
-        self.addCleanup(self.restore_resource_attribute_map)
-        super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME,
-                                                 ext_mgr=ext_mgr)
-        self.adminContext = context.get_admin_context()
-
-    def restore_resource_attribute_map(self):
-        # Restore the original RESOURCE_ATTRIBUTE_MAP
-        attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
-
-    def test_create_with_mac_learning(self):
-        with self.port(arg_list=('mac_learning_enabled',),
-                       mac_learning_enabled=True) as port:
-            # Validate create operation response
-            self.assertEqual(True, port['port']['mac_learning_enabled'])
-            # Verify that db operation successfully set mac learning state
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(True, sport['port']['mac_learning_enabled'])
-
-    def test_create_and_show_port_without_mac_learning(self):
-        with self.port() as port:
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertNotIn('mac_learning_enabled', sport['port'])
-
-    def test_update_port_with_mac_learning(self):
-        with self.port(arg_list=('mac_learning_enabled',),
-                       mac_learning_enabled=False) as port:
-            data = {'port': {'mac_learning_enabled': True}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(True, res['port']['mac_learning_enabled'])
-
-    def test_update_preexisting_port_with_mac_learning(self):
-        with self.port() as port:
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertNotIn('mac_learning_enabled', sport['port'])
-            data = {'port': {'mac_learning_enabled': True}}
-            req = self.new_update_request('ports', data, port['port']['id'])
-            # Validate update operation response
-            res = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(True, res['port']['mac_learning_enabled'])
-            # Verify that db operation successfully updated mac learning state
-            req = self.new_show_request('ports', port['port']['id'], self.fmt)
-            sport = self.deserialize(self.fmt, req.get_response(self.api))
-            self.assertEqual(True, sport['port']['mac_learning_enabled'])
-
-    def test_list_ports(self):
-        # for this test we need to enable overlapping ips
-        cfg.CONF.set_default('allow_overlapping_ips', True)
-        with contextlib.nested(self.port(arg_list=('mac_learning_enabled',),
-                                         mac_learning_enabled=True),
-                               self.port(arg_list=('mac_learning_enabled',),
-                                         mac_learning_enabled=True),
-                               self.port(arg_list=('mac_learning_enabled',),
-                                         mac_learning_enabled=True)):
-            for port in self._list('ports')['ports']:
-                self.assertEqual(True, port['mac_learning_enabled'])
-
-    def test_show_port(self):
-        with self.port(arg_list=('mac_learning_enabled',),
-                       mac_learning_enabled=True) as p:
-            port_res = self._show('ports', p['port']['id'])['port']
-            self.assertEqual(True, port_res['mac_learning_enabled'])
diff --git a/neutron/tests/unit/vmware/extensions/test_networkgw.py b/neutron/tests/unit/vmware/extensions/test_networkgw.py
deleted file mode 100644 (file)
index bc2c580..0000000
+++ /dev/null
@@ -1,1109 +0,0 @@
-# Copyright 2012 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import mock
-
-from oslo_config import cfg
-from webob import exc
-import webtest
-
-from neutron.api import extensions
-from neutron.api.v2 import attributes
-from neutron import context
-from neutron.db import api as db_api
-from neutron.db import db_base_plugin_v2
-from neutron import manager
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.dbexts import networkgw_db
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware.extensions import networkgw
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron import quota
-from neutron.tests import base
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_db_plugin
-from neutron.tests.unit import test_extensions
-from neutron.tests.unit import testlib_plugin
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware import test_nsx_plugin
-
-_uuid = test_api_v2._uuid
-_get_path = test_api_v2._get_path
-
-
-class TestExtensionManager(object):
-
-    def get_resources(self):
-        # Add the resources to the global attribute map
-        # This is done here as the setup process won't
-        # initialize the main API router which extends
-        # the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            networkgw.RESOURCE_ATTRIBUTE_MAP)
-        return networkgw.Networkgw.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class NetworkGatewayExtensionTestCase(base.BaseTestCase,
-                                      testlib_plugin.PluginSetupHelper):
-
-    def setUp(self):
-        super(NetworkGatewayExtensionTestCase, self).setUp()
-        plugin = '%s.%s' % (networkgw.__name__,
-                            networkgw.NetworkGatewayPluginBase.__name__)
-        self._gw_resource = networkgw.GATEWAY_RESOURCE_NAME
-        self._dev_resource = networkgw.DEVICE_RESOURCE_NAME
-
-        # Ensure existing ExtensionManager is not used
-        extensions.PluginAwareExtensionManager._instance = None
-
-        # Create the default configurations
-        self.config_parse()
-
-        # Update the plugin and extensions path
-        self.setup_coreplugin(plugin)
-
-        _plugin_patcher = mock.patch(plugin, autospec=True)
-        self.plugin = _plugin_patcher.start()
-
-        # Instantiate mock plugin and enable extensions
-        manager.NeutronManager.get_plugin().supported_extension_aliases = (
-            [networkgw.EXT_ALIAS])
-        ext_mgr = TestExtensionManager()
-        extensions.PluginAwareExtensionManager._instance = ext_mgr
-        self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
-        self.api = webtest.TestApp(self.ext_mdw)
-
-        quota.QUOTAS._driver = None
-        cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
-                              group='QUOTAS')
-
-    def test_network_gateway_create(self):
-        nw_gw_id = _uuid()
-        data = {self._gw_resource: {'name': 'nw-gw',
-                                    'tenant_id': _uuid(),
-                                    'devices': [{'id': _uuid(),
-                                                 'interface_name': 'xxx'}]}}
-        return_value = data[self._gw_resource].copy()
-        return_value.update({'id': nw_gw_id})
-        instance = self.plugin.return_value
-        instance.create_network_gateway.return_value = return_value
-        res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data)
-        instance.create_network_gateway.assert_called_with(
-            mock.ANY, network_gateway=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        self.assertIn(self._gw_resource, res.json)
-        nw_gw = res.json[self._gw_resource]
-        self.assertEqual(nw_gw['id'], nw_gw_id)
-
-    def _test_network_gateway_create_with_error(
-        self, data, error_code=exc.HTTPBadRequest.code):
-        res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data,
-                                 expect_errors=True)
-        self.assertEqual(res.status_int, error_code)
-
-    def test_network_gateway_create_invalid_device_spec(self):
-        data = {self._gw_resource: {'name': 'nw-gw',
-                                    'tenant_id': _uuid(),
-                                    'devices': [{'id': _uuid(),
-                                                 'invalid': 'xxx'}]}}
-        self._test_network_gateway_create_with_error(data)
-
-    def test_network_gateway_create_extra_attr_in_device_spec(self):
-        data = {self._gw_resource: {'name': 'nw-gw',
-                                    'tenant_id': _uuid(),
-                                    'devices':
-                                    [{'id': _uuid(),
-                                      'interface_name': 'xxx',
-                                      'extra_attr': 'onetoomany'}]}}
-        self._test_network_gateway_create_with_error(data)
-
-    def test_network_gateway_update(self):
-        nw_gw_name = 'updated'
-        data = {self._gw_resource: {'name': nw_gw_name}}
-        nw_gw_id = _uuid()
-        return_value = {'id': nw_gw_id,
-                        'name': nw_gw_name}
-
-        instance = self.plugin.return_value
-        instance.update_network_gateway.return_value = return_value
-        res = self.api.put_json(
-            _get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, nw_gw_id)), data)
-        instance.update_network_gateway.assert_called_with(
-            mock.ANY, nw_gw_id, network_gateway=data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        self.assertIn(self._gw_resource, res.json)
-        nw_gw = res.json[self._gw_resource]
-        self.assertEqual(nw_gw['id'], nw_gw_id)
-        self.assertEqual(nw_gw['name'], nw_gw_name)
-
-    def test_network_gateway_delete(self):
-        nw_gw_id = _uuid()
-        instance = self.plugin.return_value
-        res = self.api.delete(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS,
-                                                   nw_gw_id)))
-
-        instance.delete_network_gateway.assert_called_with(mock.ANY,
-                                                           nw_gw_id)
-        self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-
-    def test_network_gateway_get(self):
-        nw_gw_id = _uuid()
-        return_value = {self._gw_resource: {'name': 'test',
-                                            'devices':
-                                            [{'id': _uuid(),
-                                              'interface_name': 'xxx'}],
-                                            'id': nw_gw_id}}
-        instance = self.plugin.return_value
-        instance.get_network_gateway.return_value = return_value
-
-        res = self.api.get(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS,
-                                                nw_gw_id)))
-
-        instance.get_network_gateway.assert_called_with(mock.ANY,
-                                                        nw_gw_id,
-                                                        fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_network_gateway_list(self):
-        nw_gw_id = _uuid()
-        return_value = [{self._gw_resource: {'name': 'test',
-                                             'devices':
-                                             [{'id': _uuid(),
-                                               'interface_name': 'xxx'}],
-                                             'id': nw_gw_id}}]
-        instance = self.plugin.return_value
-        instance.get_network_gateways.return_value = return_value
-
-        res = self.api.get(_get_path(networkgw.NETWORK_GATEWAYS))
-
-        instance.get_network_gateways.assert_called_with(mock.ANY,
-                                                         fields=mock.ANY,
-                                                         filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_network_gateway_connect(self):
-        nw_gw_id = _uuid()
-        nw_id = _uuid()
-        gw_port_id = _uuid()
-        mapping_data = {'network_id': nw_id,
-                        'segmentation_type': 'vlan',
-                        'segmentation_id': '999'}
-        return_value = {'connection_info': {
-                        'network_gateway_id': nw_gw_id,
-                        'port_id': gw_port_id,
-                        'network_id': nw_id}}
-        instance = self.plugin.return_value
-        instance.connect_network.return_value = return_value
-        res = self.api.put_json(_get_path('%s/%s/connect_network' %
-                                          (networkgw.NETWORK_GATEWAYS,
-                                           nw_gw_id)),
-                                mapping_data)
-        instance.connect_network.assert_called_with(mock.ANY,
-                                                    nw_gw_id,
-                                                    mapping_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        nw_conn_res = res.json['connection_info']
-        self.assertEqual(nw_conn_res['port_id'], gw_port_id)
-        self.assertEqual(nw_conn_res['network_id'], nw_id)
-
-    def test_network_gateway_disconnect(self):
-        nw_gw_id = _uuid()
-        nw_id = _uuid()
-        mapping_data = {'network_id': nw_id}
-        instance = self.plugin.return_value
-        res = self.api.put_json(_get_path('%s/%s/disconnect_network' %
-                                          (networkgw.NETWORK_GATEWAYS,
-                                           nw_gw_id)),
-                                mapping_data)
-        instance.disconnect_network.assert_called_with(mock.ANY,
-                                                       nw_gw_id,
-                                                       mapping_data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_gateway_device_get(self):
-        gw_dev_id = _uuid()
-        return_value = {self._dev_resource: {'name': 'test',
-                                             'connector_type': 'stt',
-                                             'connector_ip': '1.1.1.1',
-                                             'id': gw_dev_id}}
-        instance = self.plugin.return_value
-        instance.get_gateway_device.return_value = return_value
-
-        res = self.api.get(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES,
-                                                gw_dev_id)))
-
-        instance.get_gateway_device.assert_called_with(mock.ANY,
-                                                       gw_dev_id,
-                                                       fields=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_gateway_device_list(self):
-        gw_dev_id = _uuid()
-        return_value = [{self._dev_resource: {'name': 'test',
-                                              'connector_type': 'stt',
-                                              'connector_ip': '1.1.1.1',
-                                              'id': gw_dev_id}}]
-        instance = self.plugin.return_value
-        instance.get_gateway_devices.return_value = return_value
-
-        res = self.api.get(_get_path(networkgw.GATEWAY_DEVICES))
-
-        instance.get_gateway_devices.assert_called_with(mock.ANY,
-                                                        fields=mock.ANY,
-                                                        filters=mock.ANY)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-
-    def test_gateway_device_create(self):
-        gw_dev_id = _uuid()
-        data = {self._dev_resource: {'name': 'test-dev',
-                                     'tenant_id': _uuid(),
-                                     'client_certificate': 'xyz',
-                                     'connector_type': 'stt',
-                                     'connector_ip': '1.1.1.1'}}
-        return_value = data[self._dev_resource].copy()
-        return_value.update({'id': gw_dev_id})
-        instance = self.plugin.return_value
-        instance.create_gateway_device.return_value = return_value
-        res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data)
-        instance.create_gateway_device.assert_called_with(
-            mock.ANY, gateway_device=data)
-        self.assertEqual(res.status_int, exc.HTTPCreated.code)
-        self.assertIn(self._dev_resource, res.json)
-        gw_dev = res.json[self._dev_resource]
-        self.assertEqual(gw_dev['id'], gw_dev_id)
-
-    def _test_gateway_device_create_with_error(
-        self, data, error_code=exc.HTTPBadRequest.code):
-        res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data,
-                                 expect_errors=True)
-        self.assertEqual(res.status_int, error_code)
-
-    def test_gateway_device_create_invalid_connector_type(self):
-        data = {self._gw_resource: {'name': 'test-dev',
-                                    'client_certificate': 'xyz',
-                                    'tenant_id': _uuid(),
-                                    'connector_type': 'invalid',
-                                    'connector_ip': '1.1.1.1'}}
-        self._test_gateway_device_create_with_error(data)
-
-    def test_gateway_device_create_invalid_connector_ip(self):
-        data = {self._gw_resource: {'name': 'test-dev',
-                                    'client_certificate': 'xyz',
-                                    'tenant_id': _uuid(),
-                                    'connector_type': 'stt',
-                                    'connector_ip': 'invalid'}}
-        self._test_gateway_device_create_with_error(data)
-
-    def test_gateway_device_create_extra_attr_in_device_spec(self):
-        data = {self._gw_resource: {'name': 'test-dev',
-                                    'client_certificate': 'xyz',
-                                    'tenant_id': _uuid(),
-                                    'alien_attribute': 'E.T.',
-                                    'connector_type': 'stt',
-                                    'connector_ip': '1.1.1.1'}}
-        self._test_gateway_device_create_with_error(data)
-
-    def test_gateway_device_update(self):
-        gw_dev_name = 'updated'
-        data = {self._dev_resource: {'name': gw_dev_name}}
-        gw_dev_id = _uuid()
-        return_value = {'id': gw_dev_id,
-                        'name': gw_dev_name}
-
-        instance = self.plugin.return_value
-        instance.update_gateway_device.return_value = return_value
-        res = self.api.put_json(
-            _get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, gw_dev_id)), data)
-        instance.update_gateway_device.assert_called_with(
-            mock.ANY, gw_dev_id, gateway_device=data)
-        self.assertEqual(res.status_int, exc.HTTPOk.code)
-        self.assertIn(self._dev_resource, res.json)
-        gw_dev = res.json[self._dev_resource]
-        self.assertEqual(gw_dev['id'], gw_dev_id)
-        self.assertEqual(gw_dev['name'], gw_dev_name)
-
-    def test_gateway_device_delete(self):
-        gw_dev_id = _uuid()
-        instance = self.plugin.return_value
-        res = self.api.delete(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES,
-                                                   gw_dev_id)))
-        instance.delete_gateway_device.assert_called_with(mock.ANY, gw_dev_id)
-        self.assertEqual(res.status_int, exc.HTTPNoContent.code)
-
-
-class NetworkGatewayDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
-    """Unit tests for Network Gateway DB support."""
-
-    def setUp(self, plugin=None, ext_mgr=None):
-        if not plugin:
-            plugin = '%s.%s' % (__name__, TestNetworkGatewayPlugin.__name__)
-        if not ext_mgr:
-            ext_mgr = TestExtensionManager()
-        self.gw_resource = networkgw.GATEWAY_RESOURCE_NAME
-        self.dev_resource = networkgw.DEVICE_RESOURCE_NAME
-
-        super(NetworkGatewayDbTestCase, self).setUp(plugin=plugin,
-                                                    ext_mgr=ext_mgr)
-
-    def _create_network_gateway(self, fmt, tenant_id, name=None,
-                                devices=None, arg_list=None, **kwargs):
-        data = {self.gw_resource: {'tenant_id': tenant_id,
-                                   'devices': devices}}
-        if name:
-            data[self.gw_resource]['name'] = name
-        for arg in arg_list or ():
-            # Arg must be present and not empty
-            if kwargs.get(arg):
-                data[self.gw_resource][arg] = kwargs[arg]
-        nw_gw_req = self.new_create_request(networkgw.NETWORK_GATEWAYS,
-                                            data, fmt)
-        if (kwargs.get('set_context') and tenant_id):
-            # create a specific auth context for this request
-            nw_gw_req.environ['neutron.context'] = context.Context(
-                '', tenant_id)
-        return nw_gw_req.get_response(self.ext_api)
-
-    @contextlib.contextmanager
-    def _network_gateway(self, name='gw1', devices=None,
-                         fmt='json', tenant_id=_uuid()):
-        device = None
-        if not devices:
-            device_res = self._create_gateway_device(
-                fmt, tenant_id, 'stt', '1.1.1.1', 'xxxxxx',
-                name='whatever')
-            if device_res.status_int >= 400:
-                raise exc.HTTPClientError(code=device_res.status_int)
-            device = self.deserialize(fmt, device_res)
-            devices = [{'id': device[self.dev_resource]['id'],
-                        'interface_name': 'xyz'}]
-
-        res = self._create_network_gateway(fmt, tenant_id, name=name,
-                                           devices=devices)
-        if res.status_int >= 400:
-            raise exc.HTTPClientError(code=res.status_int)
-        network_gateway = self.deserialize(fmt, res)
-        yield network_gateway
-
-        self._delete(networkgw.NETWORK_GATEWAYS,
-                     network_gateway[self.gw_resource]['id'])
-        if device:
-            self._delete(networkgw.GATEWAY_DEVICES,
-                         device[self.dev_resource]['id'])
-
-    def _create_gateway_device(self, fmt, tenant_id,
-                               connector_type, connector_ip,
-                               client_certificate, name=None,
-                               set_context=False):
-        data = {self.dev_resource: {'tenant_id': tenant_id,
-                                    'connector_type': connector_type,
-                                    'connector_ip': connector_ip,
-                                    'client_certificate': client_certificate}}
-        if name:
-            data[self.dev_resource]['name'] = name
-        gw_dev_req = self.new_create_request(networkgw.GATEWAY_DEVICES,
-                                             data, fmt)
-        if (set_context and tenant_id):
-            # create a specific auth context for this request
-            gw_dev_req.environ['neutron.context'] = context.Context(
-                '', tenant_id)
-        return gw_dev_req.get_response(self.ext_api)
-
-    def _update_gateway_device(self, fmt, gateway_device_id,
-                               connector_type=None, connector_ip=None,
-                               client_certificate=None, name=None,
-                               set_context=False, tenant_id=None):
-        data = {self.dev_resource: {}}
-        if connector_type:
-            data[self.dev_resource]['connector_type'] = connector_type
-        if connector_ip:
-            data[self.dev_resource]['connector_ip'] = connector_ip
-        if client_certificate:
-            data[self.dev_resource]['client_certificate'] = client_certificate
-        if name:
-            data[self.dev_resource]['name'] = name
-        gw_dev_req = self.new_update_request(networkgw.GATEWAY_DEVICES,
-                                             data, gateway_device_id, fmt)
-        if (set_context and tenant_id):
-            # create a specific auth context for this request
-            gw_dev_req.environ['neutron.context'] = context.Context(
-                '', tenant_id)
-        return gw_dev_req.get_response(self.ext_api)
-
-    @contextlib.contextmanager
-    def _gateway_device(self, name='gw_dev',
-                        connector_type='stt',
-                        connector_ip='1.1.1.1',
-                        client_certificate='xxxxxxxxxxxxxxx',
-                        fmt='json', tenant_id=_uuid()):
-        res = self._create_gateway_device(
-            fmt,
-            tenant_id,
-            connector_type=connector_type,
-            connector_ip=connector_ip,
-            client_certificate=client_certificate,
-            name=name)
-        if res.status_int >= 400:
-            raise exc.HTTPClientError(code=res.status_int)
-        gateway_device = self.deserialize(fmt, res)
-        yield gateway_device
-
-        self._delete(networkgw.GATEWAY_DEVICES,
-                     gateway_device[self.dev_resource]['id'])
-
-    def _gateway_action(self, action, network_gateway_id, network_id,
-                        segmentation_type, segmentation_id=None,
-                        expected_status=exc.HTTPOk.code):
-        connection_data = {'network_id': network_id,
-                           'segmentation_type': segmentation_type}
-        if segmentation_id:
-            connection_data['segmentation_id'] = segmentation_id
-
-        req = self.new_action_request(networkgw.NETWORK_GATEWAYS,
-                                      connection_data,
-                                      network_gateway_id,
-                                      "%s_network" % action)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, expected_status)
-        return self.deserialize('json', res)
-
-    def _test_connect_and_disconnect_network(self, segmentation_type,
-                                             segmentation_id=None):
-        with self._network_gateway() as gw:
-            with self.network() as net:
-                body = self._gateway_action('connect',
-                                            gw[self.gw_resource]['id'],
-                                            net['network']['id'],
-                                            segmentation_type,
-                                            segmentation_id)
-                self.assertIn('connection_info', body)
-                connection_info = body['connection_info']
-                for attr in ('network_id', 'port_id',
-                             'network_gateway_id'):
-                    self.assertIn(attr, connection_info)
-                # fetch port and confirm device_id
-                gw_port_id = connection_info['port_id']
-                port_body = self._show('ports', gw_port_id)
-                self.assertEqual(port_body['port']['device_id'],
-                                 gw[self.gw_resource]['id'])
-                # Clean up - otherwise delete will fail
-                body = self._gateway_action('disconnect',
-                                            gw[self.gw_resource]['id'],
-                                            net['network']['id'],
-                                            segmentation_type,
-                                            segmentation_id)
-                # Check associated port has been deleted too
-                body = self._show('ports', gw_port_id,
-                                  expected_code=exc.HTTPNotFound.code)
-
-    def test_create_network_gateway(self):
-        tenant_id = _uuid()
-        with contextlib.nested(
-            self._gateway_device(name='dev_1',
-                                 tenant_id=tenant_id),
-            self._gateway_device(name='dev_2',
-                                 tenant_id=tenant_id)) as (dev_1, dev_2):
-            name = 'test-gw'
-            dev_1_id = dev_1[self.dev_resource]['id']
-            dev_2_id = dev_2[self.dev_resource]['id']
-            devices = [{'id': dev_1_id, 'interface_name': 'xxx'},
-                       {'id': dev_2_id, 'interface_name': 'yyy'}]
-            keys = [('devices', devices), ('name', name)]
-            with self._network_gateway(name=name,
-                                       devices=devices,
-                                       tenant_id=tenant_id) as gw:
-                for k, v in keys:
-                    self.assertEqual(gw[self.gw_resource][k], v)
-
-    def test_create_network_gateway_no_interface_name(self):
-        tenant_id = _uuid()
-        with self._gateway_device(tenant_id=tenant_id) as dev:
-            name = 'test-gw'
-            devices = [{'id': dev[self.dev_resource]['id']}]
-            exp_devices = devices
-            exp_devices[0]['interface_name'] = 'breth0'
-            keys = [('devices', exp_devices), ('name', name)]
-            with self._network_gateway(name=name,
-                                       devices=devices,
-                                       tenant_id=tenant_id) as gw:
-                for k, v in keys:
-                    self.assertEqual(gw[self.gw_resource][k], v)
-
-    def test_create_network_gateway_not_owned_device_raises_404(self):
-        # Create a device with a different tenant identifier
-        with self._gateway_device(name='dev', tenant_id=_uuid()) as dev:
-            name = 'test-gw'
-            dev_id = dev[self.dev_resource]['id']
-            devices = [{'id': dev_id, 'interface_name': 'xxx'}]
-            res = self._create_network_gateway(
-                'json', _uuid(), name=name, devices=devices)
-            self.assertEqual(404, res.status_int)
-
-    def test_create_network_gateway_non_existent_device_raises_404(self):
-        name = 'test-gw'
-        devices = [{'id': _uuid(), 'interface_name': 'xxx'}]
-        res = self._create_network_gateway(
-            'json', _uuid(), name=name, devices=devices)
-        self.assertEqual(404, res.status_int)
-
-    def test_delete_network_gateway(self):
-        tenant_id = _uuid()
-        with self._gateway_device(tenant_id=tenant_id) as dev:
-            name = 'test-gw'
-            device_id = dev[self.dev_resource]['id']
-            devices = [{'id': device_id,
-                        'interface_name': 'xxx'}]
-            with self._network_gateway(name=name,
-                                       devices=devices,
-                                       tenant_id=tenant_id) as gw:
-                # Nothing to do here - just let the gateway go
-                gw_id = gw[self.gw_resource]['id']
-        # Verify nothing left on db
-        session = db_api.get_session()
-        dev_query = session.query(
-            nsx_models.NetworkGatewayDevice).filter(
-                nsx_models.NetworkGatewayDevice.id == device_id)
-        self.assertIsNone(dev_query.first())
-        gw_query = session.query(nsx_models.NetworkGateway).filter(
-            nsx_models.NetworkGateway.id == gw_id)
-        self.assertIsNone(gw_query.first())
-
-    def test_update_network_gateway(self):
-        with self._network_gateway() as gw:
-            data = {self.gw_resource: {'name': 'new_name'}}
-            req = self.new_update_request(networkgw.NETWORK_GATEWAYS,
-                                          data,
-                                          gw[self.gw_resource]['id'])
-            res = self.deserialize('json', req.get_response(self.ext_api))
-            self.assertEqual(res[self.gw_resource]['name'],
-                             data[self.gw_resource]['name'])
-
-    def test_get_network_gateway(self):
-        with self._network_gateway(name='test-gw') as gw:
-            req = self.new_show_request(networkgw.NETWORK_GATEWAYS,
-                                        gw[self.gw_resource]['id'])
-            res = self.deserialize('json', req.get_response(self.ext_api))
-            self.assertEqual(res[self.gw_resource]['name'],
-                             gw[self.gw_resource]['name'])
-
-    def test_list_network_gateways(self):
-        with self._network_gateway(name='test-gw-1') as gw1:
-            with self._network_gateway(name='test_gw_2') as gw2:
-                req = self.new_list_request(networkgw.NETWORK_GATEWAYS)
-                res = self.deserialize('json', req.get_response(self.ext_api))
-                key = self.gw_resource + 's'
-                self.assertEqual(len(res[key]), 2)
-                self.assertEqual(res[key][0]['name'],
-                                 gw1[self.gw_resource]['name'])
-                self.assertEqual(res[key][1]['name'],
-                                 gw2[self.gw_resource]['name'])
-
-    def _test_list_network_gateway_with_multiple_connections(
-        self, expected_gateways=1):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-                req = self.new_list_request(networkgw.NETWORK_GATEWAYS)
-                res = self.deserialize('json', req.get_response(self.ext_api))
-                key = self.gw_resource + 's'
-                self.assertEqual(len(res[key]), expected_gateways)
-                for item in res[key]:
-                    self.assertIn('ports', item)
-                    if item['id'] == gw[self.gw_resource]['id']:
-                        gw_ports = item['ports']
-                self.assertEqual(len(gw_ports), 2)
-                segmentation_ids = [555, 777]
-                for gw_port in gw_ports:
-                    self.assertEqual('vlan', gw_port['segmentation_type'])
-                    self.assertIn(gw_port['segmentation_id'], segmentation_ids)
-                    segmentation_ids.remove(gw_port['segmentation_id'])
-                # Required cleanup
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-
-    def test_list_network_gateway_with_multiple_connections(self):
-        self._test_list_network_gateway_with_multiple_connections()
-
-    def test_connect_and_disconnect_network(self):
-        self._test_connect_and_disconnect_network('flat')
-
-    def test_connect_and_disconnect_network_no_seg_type(self):
-        self._test_connect_and_disconnect_network(None)
-
-    def test_connect_and_disconnect_network_vlan_with_segmentation_id(self):
-        self._test_connect_and_disconnect_network('vlan', 999)
-
-    def test_connect_and_disconnect_network_vlan_without_segmentation_id(self):
-        self._test_connect_and_disconnect_network('vlan')
-
-    def test_connect_network_multiple_times(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-
-    def test_connect_network_multiple_gateways(self):
-        with self._network_gateway() as gw_1:
-            with self._network_gateway() as gw_2:
-                with self.network() as net_1:
-                    self._gateway_action('connect',
-                                         gw_1[self.gw_resource]['id'],
-                                         net_1['network']['id'],
-                                         'vlan', 555)
-                    self._gateway_action('connect',
-                                         gw_2[self.gw_resource]['id'],
-                                         net_1['network']['id'],
-                                         'vlan', 555)
-                    self._gateway_action('disconnect',
-                                         gw_1[self.gw_resource]['id'],
-                                         net_1['network']['id'],
-                                         'vlan', 555)
-                    self._gateway_action('disconnect',
-                                         gw_2[self.gw_resource]['id'],
-                                         net_1['network']['id'],
-                                         'vlan', 555)
-
-    def test_connect_network_mapping_in_use_returns_409(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                with self.network() as net_2:
-                    self._gateway_action('connect',
-                                         gw[self.gw_resource]['id'],
-                                         net_2['network']['id'],
-                                         'vlan', 555,
-                                         expected_status=exc.HTTPConflict.code)
-                # Clean up - otherwise delete will fail
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-
-    def test_connect_network_vlan_invalid_seg_id_returns_400(self):
-        with self._network_gateway() as gw:
-            with self.network() as net:
-                # above upper bound
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net['network']['id'],
-                                     'vlan', 4095,
-                                     expected_status=exc.HTTPBadRequest.code)
-                # below lower bound (0 is valid for NSX plugin)
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net['network']['id'],
-                                     'vlan', -1,
-                                     expected_status=exc.HTTPBadRequest.code)
-
-    def test_connect_invalid_network_returns_400(self):
-        with self._network_gateway() as gw:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     'hohoho',
-                                     'vlan', 555,
-                                     expected_status=exc.HTTPBadRequest.code)
-
-    def test_connect_unspecified_network_returns_400(self):
-        with self._network_gateway() as gw:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     None,
-                                     'vlan', 555,
-                                     expected_status=exc.HTTPBadRequest.code)
-
-    def test_disconnect_network_ambiguous_returns_409(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-                # This should raise
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan',
-                                     expected_status=exc.HTTPConflict.code)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 777)
-
-    def test_delete_active_gateway_port_returns_409(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                body = self._gateway_action('connect',
-                                            gw[self.gw_resource]['id'],
-                                            net_1['network']['id'],
-                                            'vlan', 555)
-                # fetch port id and try to delete it
-                gw_port_id = body['connection_info']['port_id']
-                self._delete('ports', gw_port_id,
-                             expected_code=exc.HTTPConflict.code)
-                body = self._gateway_action('disconnect',
-                                            gw[self.gw_resource]['id'],
-                                            net_1['network']['id'],
-                                            'vlan', 555)
-
-    def test_delete_network_gateway_active_connections_returns_409(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'flat')
-                self._delete(networkgw.NETWORK_GATEWAYS,
-                             gw[self.gw_resource]['id'],
-                             expected_code=exc.HTTPConflict.code)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'flat')
-
-    def test_disconnect_non_existing_connection_returns_404(self):
-        with self._network_gateway() as gw:
-            with self.network() as net_1:
-                self._gateway_action('connect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 999,
-                                     expected_status=exc.HTTPNotFound.code)
-                self._gateway_action('disconnect',
-                                     gw[self.gw_resource]['id'],
-                                     net_1['network']['id'],
-                                     'vlan', 555)
-
-    def test_create_gateway_device(
-        self, expected_status=networkgw_db.STATUS_UNKNOWN):
-        with self._gateway_device(name='test-dev',
-                                  connector_type='stt',
-                                  connector_ip='1.1.1.1',
-                                  client_certificate='xyz') as dev:
-            self.assertEqual(dev[self.dev_resource]['name'], 'test-dev')
-            self.assertEqual(dev[self.dev_resource]['connector_type'], 'stt')
-            self.assertEqual(dev[self.dev_resource]['connector_ip'], '1.1.1.1')
-            self.assertEqual(dev[self.dev_resource]['status'], expected_status)
-
-    def test_list_gateway_devices(self):
-        with contextlib.nested(
-            self._gateway_device(name='test-dev-1',
-                                 connector_type='stt',
-                                 connector_ip='1.1.1.1',
-                                 client_certificate='xyz'),
-            self._gateway_device(name='test-dev-2',
-                                 connector_type='stt',
-                                 connector_ip='2.2.2.2',
-                                 client_certificate='qwe')) as (dev_1, dev_2):
-            req = self.new_list_request(networkgw.GATEWAY_DEVICES)
-            res = self.deserialize('json', req.get_response(self.ext_api))
-        devices = res[networkgw.GATEWAY_DEVICES.replace('-', '_')]
-        self.assertEqual(len(devices), 2)
-        dev_1 = devices[0]
-        dev_2 = devices[1]
-        self.assertEqual(dev_1['name'], 'test-dev-1')
-        self.assertEqual(dev_2['name'], 'test-dev-2')
-
-    def test_get_gateway_device(
-        self, expected_status=networkgw_db.STATUS_UNKNOWN):
-        with self._gateway_device(name='test-dev',
-                                  connector_type='stt',
-                                  connector_ip='1.1.1.1',
-                                  client_certificate='xyz') as dev:
-            req = self.new_show_request(networkgw.GATEWAY_DEVICES,
-                                        dev[self.dev_resource]['id'])
-            res = self.deserialize('json', req.get_response(self.ext_api))
-        self.assertEqual(res[self.dev_resource]['name'], 'test-dev')
-        self.assertEqual(res[self.dev_resource]['connector_type'], 'stt')
-        self.assertEqual(res[self.dev_resource]['connector_ip'], '1.1.1.1')
-        self.assertEqual(res[self.dev_resource]['status'], expected_status)
-
-    def test_update_gateway_device(
-        self, expected_status=networkgw_db.STATUS_UNKNOWN):
-        with self._gateway_device(name='test-dev',
-                                  connector_type='stt',
-                                  connector_ip='1.1.1.1',
-                                  client_certificate='xyz') as dev:
-            self._update_gateway_device('json', dev[self.dev_resource]['id'],
-                                        connector_type='stt',
-                                        connector_ip='2.2.2.2',
-                                        name='test-dev-upd')
-            req = self.new_show_request(networkgw.GATEWAY_DEVICES,
-                                        dev[self.dev_resource]['id'])
-            res = self.deserialize('json', req.get_response(self.ext_api))
-
-        self.assertEqual(res[self.dev_resource]['name'], 'test-dev-upd')
-        self.assertEqual(res[self.dev_resource]['connector_type'], 'stt')
-        self.assertEqual(res[self.dev_resource]['connector_ip'], '2.2.2.2')
-        self.assertEqual(res[self.dev_resource]['status'], expected_status)
-
-    def test_delete_gateway_device(self):
-        with self._gateway_device(name='test-dev',
-                                  connector_type='stt',
-                                  connector_ip='1.1.1.1',
-                                  client_certificate='xyz') as dev:
-            # Nothing to do here - just note the device id
-            dev_id = dev[self.dev_resource]['id']
-        # Verify nothing left on db
-        session = db_api.get_session()
-        dev_query = session.query(nsx_models.NetworkGatewayDevice)
-        dev_query.filter(nsx_models.NetworkGatewayDevice.id == dev_id)
-        self.assertIsNone(dev_query.first())
-
-
-class TestNetworkGateway(test_nsx_plugin.NsxPluginV2TestCase,
-                         NetworkGatewayDbTestCase):
-
-    def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None):
-        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
-        # Mock l2gwlib calls for gateway devices since this resource is not
-        # mocked through the fake NSX API client
-        create_gw_dev_patcher = mock.patch.object(
-            l2gwlib, 'create_gateway_device')
-        update_gw_dev_patcher = mock.patch.object(
-            l2gwlib, 'update_gateway_device')
-        delete_gw_dev_patcher = mock.patch.object(
-            l2gwlib, 'delete_gateway_device')
-        get_gw_dev_status_patcher = mock.patch.object(
-            l2gwlib, 'get_gateway_device_status')
-        get_gw_dev_statuses_patcher = mock.patch.object(
-            l2gwlib, 'get_gateway_devices_status')
-        self.mock_create_gw_dev = create_gw_dev_patcher.start()
-        self.mock_create_gw_dev.return_value = {'uuid': 'callejon'}
-        self.mock_update_gw_dev = update_gw_dev_patcher.start()
-        delete_gw_dev_patcher.start()
-        self.mock_get_gw_dev_status = get_gw_dev_status_patcher.start()
-        get_gw_dev_statuses = get_gw_dev_statuses_patcher.start()
-        get_gw_dev_statuses.return_value = {}
-
-        super(TestNetworkGateway,
-              self).setUp(plugin=plugin, ext_mgr=ext_mgr)
-
-    def test_create_network_gateway_name_exceeds_40_chars(self):
-        name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars'
-        with self._network_gateway(name=name) as nw_gw:
-            # Assert Neutron name is not truncated
-            self.assertEqual(nw_gw[self.gw_resource]['name'], name)
-
-    def test_update_network_gateway_with_name_calls_backend(self):
-        with mock.patch.object(
-            nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw:
-            with self._network_gateway(name='cavani') as nw_gw:
-                nw_gw_id = nw_gw[self.gw_resource]['id']
-                self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id,
-                             {self.gw_resource: {'name': 'higuain'}})
-                mock_update_gw.assert_called_once_with(
-                    mock.ANY, nw_gw_id, 'higuain')
-
-    def test_update_network_gateway_without_name_does_not_call_backend(self):
-        with mock.patch.object(
-            nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw:
-            with self._network_gateway(name='something') as nw_gw:
-                nw_gw_id = nw_gw[self.gw_resource]['id']
-                self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id,
-                             {self.gw_resource: {}})
-                self.assertEqual(mock_update_gw.call_count, 0)
-
-    def test_update_network_gateway_name_exceeds_40_chars(self):
-        new_name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars'
-        with self._network_gateway(name='something') as nw_gw:
-            nw_gw_id = nw_gw[self.gw_resource]['id']
-            self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id,
-                         {self.gw_resource: {'name': new_name}})
-            req = self.new_show_request(networkgw.NETWORK_GATEWAYS,
-                                        nw_gw_id)
-            res = self.deserialize('json', req.get_response(self.ext_api))
-            # Assert Neutron name is not truncated
-            self.assertEqual(new_name, res[self.gw_resource]['name'])
-            # Assert NSX name is truncated
-            self.assertEqual(
-                new_name[:40],
-                self.fc._fake_gatewayservice_dict[nw_gw_id]['display_name'])
-
-    def test_create_network_gateway_nsx_error_returns_500(self):
-        def raise_nsx_api_exc(*args, **kwargs):
-            raise api_exc.NsxApiException()
-
-        with mock.patch.object(nsxlib.l2gateway,
-                               'create_l2_gw_service',
-                               new=raise_nsx_api_exc):
-            tenant_id = _uuid()
-            with self._gateway_device(tenant_id=tenant_id) as dev:
-                res = self._create_network_gateway(
-                    self.fmt,
-                    tenant_id,
-                    name='yyy',
-                    devices=[{'id': dev[self.dev_resource]['id']}])
-            self.assertEqual(500, res.status_int)
-
-    def test_create_network_gateway_nsx_error_returns_409(self):
-        with mock.patch.object(nsxlib.l2gateway,
-                               'create_l2_gw_service',
-                               side_effect=api_exc.Conflict):
-            tenant_id = _uuid()
-            with self._gateway_device(tenant_id=tenant_id) as dev:
-                res = self._create_network_gateway(
-                    self.fmt,
-                    tenant_id,
-                    name='yyy',
-                    devices=[{'id': dev[self.dev_resource]['id']}])
-            self.assertEqual(409, res.status_int)
-
-    def test_list_network_gateways(self):
-        with self._network_gateway(name='test-gw-1') as gw1:
-            with self._network_gateway(name='test_gw_2') as gw2:
-                req = self.new_list_request(networkgw.NETWORK_GATEWAYS)
-                res = self.deserialize('json', req.get_response(self.ext_api))
-                # Ensure we always get the list in the same order
-                gateways = sorted(
-                    res[self.gw_resource + 's'], key=lambda k: k['name'])
-                self.assertEqual(len(gateways), 3)
-                # We expect the default gateway too
-                self.assertEqual(gateways[0]['default'], True)
-                self.assertEqual(gateways[1]['name'],
-                                 gw1[self.gw_resource]['name'])
-                self.assertEqual(gateways[2]['name'],
-                                 gw2[self.gw_resource]['name'])
-
-    def test_list_network_gateway_with_multiple_connections(self):
-        self._test_list_network_gateway_with_multiple_connections(
-            expected_gateways=2)
-
-    def test_show_network_gateway_nsx_error_returns_404(self):
-        invalid_id = 'b5afd4a9-eb71-4af7-a082-8fc625a35b61'
-        req = self.new_show_request(networkgw.NETWORK_GATEWAYS, invalid_id)
-        res = req.get_response(self.ext_api)
-        self.assertEqual(exc.HTTPNotFound.code, res.status_int)
-
-    def test_create_gateway_device(self):
-        self.mock_get_gw_dev_status.return_value = True
-        super(TestNetworkGateway, self).test_create_gateway_device(
-            expected_status=networkgw_db.STATUS_ACTIVE)
-
-    def test_create_gateway_device_status_down(self):
-        self.mock_get_gw_dev_status.return_value = False
-        super(TestNetworkGateway, self).test_create_gateway_device(
-            expected_status=networkgw_db.STATUS_DOWN)
-
-    def test_create_gateway_device_invalid_cert_returns_400(self):
-        self.mock_create_gw_dev.side_effect = (
-            nsx_exc.InvalidSecurityCertificate)
-        res = self._create_gateway_device(
-            'json',
-            _uuid(),
-            connector_type='stt',
-            connector_ip='1.1.1.1',
-            client_certificate='invalid_certificate',
-            name='whatever')
-        self.assertEqual(res.status_int, 400)
-
-    def test_get_gateway_device(self):
-        self.mock_get_gw_dev_status.return_value = True
-        super(TestNetworkGateway, self).test_get_gateway_device(
-            expected_status=networkgw_db.STATUS_ACTIVE)
-
-    def test_get_gateway_device_status_down(self):
-        self.mock_get_gw_dev_status.return_value = False
-        super(TestNetworkGateway, self).test_get_gateway_device(
-            expected_status=networkgw_db.STATUS_DOWN)
-
-    def test_update_gateway_device(self):
-        self.mock_get_gw_dev_status.return_value = True
-        super(TestNetworkGateway, self).test_update_gateway_device(
-            expected_status=networkgw_db.STATUS_ACTIVE)
-
-    def test_update_gateway_device_status_down(self):
-        self.mock_get_gw_dev_status.return_value = False
-        super(TestNetworkGateway, self).test_update_gateway_device(
-            expected_status=networkgw_db.STATUS_DOWN)
-
-    def test_update_gateway_device_invalid_cert_returns_400(self):
-        with self._gateway_device(
-            name='whaterver',
-            connector_type='stt',
-            connector_ip='1.1.1.1',
-            client_certificate='iminvalidbutiitdoesnotmatter') as dev:
-            self.mock_update_gw_dev.side_effect = (
-                nsx_exc.InvalidSecurityCertificate)
-            res = self._update_gateway_device(
-                'json',
-                dev[self.dev_resource]['id'],
-                client_certificate='invalid_certificate')
-            self.assertEqual(res.status_int, 400)
-
-
-class TestNetworkGatewayPlugin(db_base_plugin_v2.NeutronDbPluginV2,
-                               networkgw_db.NetworkGatewayMixin):
-    """Simple plugin class for testing db support for network gateway ext."""
-
-    supported_extension_aliases = ["network-gateway"]
-
-    def __init__(self, **args):
-        super(TestNetworkGatewayPlugin, self).__init__(**args)
-        extensions.append_api_extensions_path([vmware.NSXEXT_PATH])
-
-    def delete_port(self, context, id, nw_gw_port_check=True):
-        if nw_gw_port_check:
-            port = self._get_port(context, id)
-            self.prevent_network_gateway_port_deletion(context, port)
-        super(TestNetworkGatewayPlugin, self).delete_port(context, id)
diff --git a/neutron/tests/unit/vmware/extensions/test_portsecurity.py b/neutron/tests/unit/vmware/extensions/test_portsecurity.py
deleted file mode 100644 (file)
index 6b07b39..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-
-from neutron.common import test_lib
-from neutron.plugins.vmware.common import sync
-from neutron.tests.unit import test_extension_portsecurity as psec
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-
-class PortSecurityTestCase(psec.PortSecurityDBTestCase):
-
-    def setUp(self):
-        test_lib.test_config['config_files'] = [
-            vmware.get_fake_conf('nsx.ini.test')]
-        # mock api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsx.start()
-        instance.return_value.login.return_value = "the_cookie"
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-
-        instance.return_value.request.side_effect = self.fc.fake_request
-        super(PortSecurityTestCase, self).setUp(vmware.PLUGIN_NAME)
-        self.addCleanup(self.fc.reset_all)
-        self.addCleanup(self.mock_nsx.stop)
-        self.addCleanup(patch_sync.stop)
-
-
-class TestPortSecurity(PortSecurityTestCase, psec.TestPortSecurity):
-        pass
diff --git a/neutron/tests/unit/vmware/extensions/test_providernet.py b/neutron/tests/unit/vmware/extensions/test_providernet.py
deleted file mode 100644 (file)
index e26e9f4..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_config import cfg
-import webob.exc
-
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import providernet as pnet
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware import test_nsx_plugin
-
-
-class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase):
-
-    def test_create_delete_provider_network_default_physical_net(self):
-        data = {'network': {'name': 'net1',
-                            'admin_state_up': True,
-                            'tenant_id': 'admin',
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.SEGMENTATION_ID: 411}}
-        network_req = self.new_create_request('networks', data, self.fmt)
-        net = self.deserialize(self.fmt, network_req.get_response(self.api))
-        self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
-        req = self.new_delete_request('networks', net['network']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
-
-    def test_create_provider_network(self):
-        data = {'network': {'name': 'net1',
-                            'admin_state_up': True,
-                            'tenant_id': 'admin',
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.SEGMENTATION_ID: 411,
-                            pnet.PHYSICAL_NETWORK: 'physnet1'}}
-        network_req = self.new_create_request('networks', data, self.fmt)
-        net = self.deserialize(self.fmt, network_req.get_response(self.api))
-        self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
-        self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
-
-        # Test that we can create another provider network using the same
-        # vlan_id on another physical network.
-        data['network'][pnet.PHYSICAL_NETWORK] = 'physnet2'
-        network_req = self.new_create_request('networks', data, self.fmt)
-        net = self.deserialize(self.fmt, network_req.get_response(self.api))
-        self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
-        self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2')
-
-
-class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase):
-
-    def setUp(self, plugin=None):
-        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
-        super(TestMultiProviderNetworks, self).setUp()
-
-    def test_create_network_provider(self):
-        data = {'network': {'name': 'net1',
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
-        self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1)
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-    def test_create_network_provider_flat(self):
-        data = {'network': {'name': 'net1',
-                            pnet.NETWORK_TYPE: 'flat',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
-        self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
-        self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID])
-        self.assertNotIn(mpnet.SEGMENTS, network['network'])
-
-    def test_create_network_single_multiple_provider(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            'tenant_id': 'tenant_one'}}
-        net_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                               pnet.SEGMENTATION_ID]:
-            self.assertNotIn(provider_field, network['network'])
-        tz = network['network'][mpnet.SEGMENTS][0]
-        self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
-        self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
-
-        # Tests get_network()
-        net_req = self.new_show_request('networks', network['network']['id'])
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        tz = network['network'][mpnet.SEGMENTS][0]
-        self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
-        self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
-        self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
-
-    def test_create_network_multprovider(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1},
-                             {pnet.NETWORK_TYPE: 'stt',
-                              pnet.PHYSICAL_NETWORK: 'physnet1'}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        network = self.deserialize(self.fmt,
-                                   network_req.get_response(self.api))
-        tz = network['network'][mpnet.SEGMENTS]
-        for tz in data['network'][mpnet.SEGMENTS]:
-            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                          pnet.SEGMENTATION_ID]:
-                self.assertEqual(tz.get(field), tz.get(field))
-
-        # Tests get_network()
-        net_req = self.new_show_request('networks', network['network']['id'])
-        network = self.deserialize(self.fmt, net_req.get_response(self.api))
-        tz = network['network'][mpnet.SEGMENTS]
-        for tz in data['network'][mpnet.SEGMENTS]:
-            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
-                          pnet.SEGMENTATION_ID]:
-                self.assertEqual(tz.get(field), tz.get(field))
-
-    def test_create_network_with_provider_and_multiprovider_fail(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            pnet.NETWORK_TYPE: 'vlan',
-                            pnet.PHYSICAL_NETWORK: 'physnet1',
-                            pnet.SEGMENTATION_ID: 1,
-                            'tenant_id': 'tenant_one'}}
-
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
-
-    def test_create_network_duplicate_segments(self):
-        data = {'network': {'name': 'net1',
-                            mpnet.SEGMENTS:
-                            [{pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1},
-                             {pnet.NETWORK_TYPE: 'vlan',
-                              pnet.PHYSICAL_NETWORK: 'physnet1',
-                              pnet.SEGMENTATION_ID: 1}],
-                            'tenant_id': 'tenant_one'}}
-        network_req = self.new_create_request('networks', data)
-        res = network_req.get_response(self.api)
-        self.assertEqual(res.status_int, 400)
diff --git a/neutron/tests/unit/vmware/extensions/test_qosqueues.py b/neutron/tests/unit/vmware/extensions/test_qosqueues.py
deleted file mode 100644 (file)
index 50dd439..0000000
+++ /dev/null
@@ -1,276 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import mock
-from oslo_config import cfg
-import webob.exc
-
-from neutron import context
-from neutron.plugins.vmware.dbexts import qos_db
-from neutron.plugins.vmware.extensions import qos as ext_qos
-from neutron.plugins.vmware import nsxlib
-from neutron.tests.unit import test_extensions
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware import test_nsx_plugin
-
-
-class QoSTestExtensionManager(object):
-
-    def get_resources(self):
-        return ext_qos.Qos.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class TestQoSQueue(test_nsx_plugin.NsxPluginV2TestCase):
-
-    def setUp(self, plugin=None):
-        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
-        super(TestQoSQueue, self).setUp()
-        ext_mgr = QoSTestExtensionManager()
-        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
-
-    def _create_qos_queue(self, fmt, body, **kwargs):
-        qos_queue = self.new_create_request('qos-queues', body)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            qos_queue.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-
-        return qos_queue.get_response(self.ext_api)
-
-    @contextlib.contextmanager
-    def qos_queue(self, name='foo', min='0', max='10',
-                  qos_marking=None, dscp='0', default=None):
-
-        body = {'qos_queue': {'tenant_id': 'tenant',
-                              'name': name,
-                              'min': min,
-                              'max': max}}
-
-        if qos_marking:
-            body['qos_queue']['qos_marking'] = qos_marking
-        if dscp:
-            body['qos_queue']['dscp'] = dscp
-        if default:
-            body['qos_queue']['default'] = default
-        res = self._create_qos_queue('json', body)
-        qos_queue = self.deserialize('json', res)
-        if res.status_int >= 400:
-            raise webob.exc.HTTPClientError(code=res.status_int)
-
-        yield qos_queue
-
-    def test_create_qos_queue(self):
-        with self.qos_queue(name='fake_lqueue', min=34, max=44,
-                            qos_marking='untrusted', default=False) as q:
-            self.assertEqual(q['qos_queue']['name'], 'fake_lqueue')
-            self.assertEqual(q['qos_queue']['min'], 34)
-            self.assertEqual(q['qos_queue']['max'], 44)
-            self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted')
-            self.assertFalse(q['qos_queue']['default'])
-
-    def test_create_trusted_qos_queue(self):
-        with mock.patch.object(qos_db.LOG, 'info') as log:
-            with mock.patch.object(nsxlib, 'do_request',
-                                   return_value={"uuid": "fake_queue"}):
-                with self.qos_queue(name='fake_lqueue', min=34, max=44,
-                                    qos_marking='trusted', default=False) as q:
-                    self.assertIsNone(q['qos_queue']['dscp'])
-                    self.assertTrue(log.called)
-
-    def test_create_qos_queue_name_exceeds_40_chars(self):
-        name = 'this_is_a_queue_whose_name_is_longer_than_40_chars'
-        with self.qos_queue(name=name) as queue:
-            # Assert Neutron name is not truncated
-            self.assertEqual(queue['qos_queue']['name'], name)
-
-    def test_create_qos_queue_default(self):
-        with self.qos_queue(default=True) as q:
-            self.assertTrue(q['qos_queue']['default'])
-
-    def test_create_qos_queue_two_default_queues_fail(self):
-        with self.qos_queue(default=True):
-            body = {'qos_queue': {'tenant_id': 'tenant',
-                                  'name': 'second_default_queue',
-                                  'default': True}}
-            res = self._create_qos_queue('json', body)
-            self.assertEqual(res.status_int, 409)
-
-    def test_create_port_with_queue(self):
-        with self.qos_queue(default=True) as q1:
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            self.assertEqual(net1['network'][ext_qos.QUEUE],
-                             q1['qos_queue']['id'])
-            device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
-            with self.port(device_id=device_id) as p:
-                self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36)
-
-    def test_create_shared_queue_networks(self):
-        with self.qos_queue(default=True) as q1:
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            self.assertEqual(net1['network'][ext_qos.QUEUE],
-                             q1['qos_queue']['id'])
-            res = self._create_network('json', 'net2', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net2 = self.deserialize('json', res)
-            self.assertEqual(net1['network'][ext_qos.QUEUE],
-                             q1['qos_queue']['id'])
-            device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
-            res = self._create_port('json', net1['network']['id'],
-                                    device_id=device_id)
-            port1 = self.deserialize('json', res)
-            res = self._create_port('json', net2['network']['id'],
-                                    device_id=device_id)
-            port2 = self.deserialize('json', res)
-            self.assertEqual(port1['port'][ext_qos.QUEUE],
-                             port2['port'][ext_qos.QUEUE])
-
-            self._delete('ports', port1['port']['id'])
-            self._delete('ports', port2['port']['id'])
-
-    def test_remove_queue_in_use_fail(self):
-        with self.qos_queue() as q1:
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
-            res = self._create_port('json', net1['network']['id'],
-                                    device_id=device_id)
-            port = self.deserialize('json', res)
-            self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409)
-
-    def test_update_network_new_queue(self):
-        with self.qos_queue() as q1:
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            with self.qos_queue() as new_q:
-                data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}}
-                req = self.new_update_request('networks', data,
-                                              net1['network']['id'])
-                res = req.get_response(self.api)
-                net1 = self.deserialize('json', res)
-                self.assertEqual(net1['network'][ext_qos.QUEUE],
-                                 new_q['qos_queue']['id'])
-
-    def test_update_port_adding_device_id(self):
-        with self.qos_queue() as q1:
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
-            res = self._create_port('json', net1['network']['id'])
-            port = self.deserialize('json', res)
-            self.assertIsNone(port['port'][ext_qos.QUEUE])
-
-            data = {'port': {'device_id': device_id}}
-            req = self.new_update_request('ports', data,
-                                          port['port']['id'])
-
-            res = req.get_response(self.api)
-            port = self.deserialize('json', res)
-            self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36)
-
-    def test_get_port_with_qos_not_admin(self):
-        body = {'qos_queue': {'tenant_id': 'not_admin',
-                              'name': 'foo', 'min': 20, 'max': 20}}
-        res = self._create_qos_queue('json', body, tenant_id='not_admin')
-        q1 = self.deserialize('json', res)
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=(ext_qos.QUEUE, 'tenant_id',),
-                                   queue_id=q1['qos_queue']['id'],
-                                   tenant_id="not_admin")
-        net1 = self.deserialize('json', res)
-        self.assertEqual(len(net1['network'][ext_qos.QUEUE]), 36)
-        res = self._create_port('json', net1['network']['id'],
-                                tenant_id='not_admin', set_context=True)
-
-        port = self.deserialize('json', res)
-        self.assertNotIn(ext_qos.QUEUE, port['port'])
-
-    def test_dscp_value_out_of_range(self):
-        body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64',
-                              'name': 'foo', 'min': 20, 'max': 20}}
-        res = self._create_qos_queue('json', body)
-        self.assertEqual(res.status_int, 400)
-
-    def test_dscp_value_with_qos_marking_trusted_returns_400(self):
-        body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '1',
-                              'qos_marking': 'trusted',
-                              'name': 'foo', 'min': 20, 'max': 20}}
-        res = self._create_qos_queue('json', body)
-        self.assertEqual(res.status_int, 400)
-
-    def test_non_admin_cannot_create_queue(self):
-        body = {'qos_queue': {'tenant_id': 'not_admin',
-                              'name': 'foo', 'min': 20, 'max': 20}}
-        res = self._create_qos_queue('json', body, tenant_id='not_admin',
-                                     set_context=True)
-        self.assertEqual(res.status_int, 403)
-
-    def test_update_port_non_admin_does_not_show_queue_id(self):
-        body = {'qos_queue': {'tenant_id': 'not_admin',
-                              'name': 'foo', 'min': 20, 'max': 20}}
-        res = self._create_qos_queue('json', body, tenant_id='not_admin')
-        q1 = self.deserialize('json', res)
-        res = self._create_network('json', 'net1', True,
-                                   arg_list=(ext_qos.QUEUE,),
-                                   tenant_id='not_admin',
-                                   queue_id=q1['qos_queue']['id'])
-
-        net1 = self.deserialize('json', res)
-        res = self._create_port('json', net1['network']['id'],
-                                tenant_id='not_admin', set_context=True)
-        port = self.deserialize('json', res)
-        device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
-        data = {'port': {'device_id': device_id}}
-        neutron_context = context.Context('', 'not_admin')
-        port = self._update('ports', port['port']['id'], data,
-                            neutron_context=neutron_context)
-        self.assertNotIn(ext_qos.QUEUE, port['port'])
-
-    def test_rxtx_factor(self):
-        with self.qos_queue(max=10) as q1:
-
-            res = self._create_network('json', 'net1', True,
-                                       arg_list=(ext_qos.QUEUE,),
-                                       queue_id=q1['qos_queue']['id'])
-            net1 = self.deserialize('json', res)
-            res = self._create_port('json', net1['network']['id'],
-                                    arg_list=(ext_qos.RXTX_FACTOR,),
-                                    rxtx_factor=2, device_id='1')
-            port = self.deserialize('json', res)
-            req = self.new_show_request('qos-queues',
-                                        port['port'][ext_qos.QUEUE])
-            res = req.get_response(self.ext_api)
-            queue = self.deserialize('json', res)
-            self.assertEqual(queue['qos_queue']['max'], 20)
diff --git a/neutron/tests/unit/vmware/nsxlib/__init__.py b/neutron/tests/unit/vmware/nsxlib/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/vmware/nsxlib/base.py b/neutron/tests/unit/vmware/nsxlib/base.py
deleted file mode 100644 (file)
index 679db7b..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import mock
-
-from neutron.plugins.vmware.api_client import client
-from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import config  # noqa
-from neutron.plugins.vmware import nsx_cluster as cluster
-from neutron.tests import base
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-_uuid = test_api_v2._uuid
-
-
-class NsxlibTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsxapi.start()
-        instance.return_value.login.return_value = "the_cookie"
-        fake_version = getattr(self, 'fake_version', "3.0")
-        instance.return_value.get_version.return_value = (
-            version.Version(fake_version))
-
-        instance.return_value.request.side_effect = self.fc.fake_request
-        self.fake_cluster = cluster.NSXCluster(
-            name='fake-cluster', nsx_controllers=['1.1.1.1:999'],
-            default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar')
-        self.fake_cluster.api_client = client.NsxApiClient(
-            ('1.1.1.1', '999', True),
-            self.fake_cluster.nsx_user, self.fake_cluster.nsx_password,
-            self.fake_cluster.http_timeout,
-            self.fake_cluster.retries, self.fake_cluster.redirects)
-
-        super(NsxlibTestCase, self).setUp()
-        self.addCleanup(self.fc.reset_all)
-
-    def _build_tag_dict(self, tags):
-        # This syntax is needed for python 2.6 compatibility
-        return dict((t['scope'], t['tag']) for t in tags)
-
-
-class NsxlibNegativeBaseTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsxapi.start()
-        instance.return_value.login.return_value = "the_cookie"
-        # Choose 3.0, but the version is irrelevant for the aim of
-        # these tests as calls are throwing up errors anyway
-        fake_version = getattr(self, 'fake_version', "3.0")
-        instance.return_value.get_version.return_value = (
-            version.Version(fake_version))
-
-        def _faulty_request(*args, **kwargs):
-            raise exception.NsxApiException()
-
-        instance.return_value.request.side_effect = _faulty_request
-        self.fake_cluster = cluster.NSXCluster(
-            name='fake-cluster', nsx_controllers=['1.1.1.1:999'],
-            default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar')
-        self.fake_cluster.api_client = client.NsxApiClient(
-            ('1.1.1.1', '999', True),
-            self.fake_cluster.nsx_user, self.fake_cluster.nsx_password,
-            self.fake_cluster.http_timeout,
-            self.fake_cluster.retries, self.fake_cluster.redirects)
-
-        super(NsxlibNegativeBaseTestCase, self).setUp()
-        self.addCleanup(self.fc.reset_all)
diff --git a/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py b/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py
deleted file mode 100644 (file)
index ba45f08..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import mock
-from oslo_serialization import jsonutils
-
-from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.common import utils as nsx_utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit.vmware.nsxlib import base
-
-_uuid = test_api_v2._uuid
-
-
-class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase):
-
-    def test_create_l2_gw_service_on_failure(self):
-        self.assertRaises(exception.NsxApiException,
-                          l2gwlib.create_l2_gw_service,
-                          self.fake_cluster,
-                          'fake-tenant',
-                          'fake-gateway',
-                          [{'id': _uuid(),
-                           'interface_name': 'xxx'}])
-
-    def test_delete_l2_gw_service_on_failure(self):
-        self.assertRaises(exception.NsxApiException,
-                          l2gwlib.delete_l2_gw_service,
-                          self.fake_cluster,
-                          'fake-gateway')
-
-    def test_get_l2_gw_service_on_failure(self):
-        self.assertRaises(exception.NsxApiException,
-                          l2gwlib.get_l2_gw_service,
-                          self.fake_cluster,
-                          'fake-gateway')
-
-    def test_update_l2_gw_service_on_failure(self):
-        self.assertRaises(exception.NsxApiException,
-                          l2gwlib.update_l2_gw_service,
-                          self.fake_cluster,
-                          'fake-gateway',
-                          'pluto')
-
-
-class L2GatewayTestCase(base.NsxlibTestCase):
-
-    def _create_gw_service(self, node_uuid, display_name,
-                           tenant_id='fake_tenant'):
-        return l2gwlib.create_l2_gw_service(self.fake_cluster,
-                                            tenant_id,
-                                            display_name,
-                                            [{'id': node_uuid,
-                                              'interface_name': 'xxx'}])
-
-    def test_create_l2_gw_service(self):
-        display_name = 'fake-gateway'
-        node_uuid = _uuid()
-        response = self._create_gw_service(node_uuid, display_name)
-        self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
-        self.assertEqual(response.get('display_name'), display_name)
-        gateways = response.get('gateways', [])
-        self.assertEqual(len(gateways), 1)
-        self.assertEqual(gateways[0]['type'], 'L2Gateway')
-        self.assertEqual(gateways[0]['device_id'], 'xxx')
-        self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
-
-    def test_update_l2_gw_service(self):
-        display_name = 'fake-gateway'
-        new_display_name = 'still-fake-gateway'
-        node_uuid = _uuid()
-        res1 = self._create_gw_service(node_uuid, display_name)
-        gw_id = res1['uuid']
-        res2 = l2gwlib.update_l2_gw_service(
-            self.fake_cluster, gw_id, new_display_name)
-        self.assertEqual(res2['display_name'], new_display_name)
-
-    def test_get_l2_gw_service(self):
-        display_name = 'fake-gateway'
-        node_uuid = _uuid()
-        gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
-        response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id)
-        self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
-        self.assertEqual(response.get('display_name'), display_name)
-        self.assertEqual(response.get('uuid'), gw_id)
-
-    def test_list_l2_gw_service(self):
-        gw_ids = []
-        for name in ('fake-1', 'fake-2'):
-            gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
-        results = l2gwlib.get_l2_gw_services(self.fake_cluster)
-        self.assertEqual(len(results), 2)
-        self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
-
-    def test_list_l2_gw_service_by_tenant(self):
-        gw_ids = [self._create_gw_service(
-                  _uuid(), name, tenant_id=name)['uuid']
-                  for name in ('fake-1', 'fake-2')]
-        results = l2gwlib.get_l2_gw_services(self.fake_cluster,
-                                             tenant_id='fake-1')
-        self.assertEqual(len(results), 1)
-        self.assertEqual(results[0]['uuid'], gw_ids[0])
-
-    def test_delete_l2_gw_service(self):
-        display_name = 'fake-gateway'
-        node_uuid = _uuid()
-        gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
-        l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id)
-        results = l2gwlib.get_l2_gw_services(self.fake_cluster)
-        self.assertEqual(len(results), 0)
-
-    def test_plug_l2_gw_port_attachment(self):
-        tenant_id = 'pippo'
-        node_uuid = _uuid()
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(
-            self.fake_cluster, _uuid(), tenant_id,
-            'fake-switch', transport_zones_config)
-        gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
-        lport = switchlib.create_lport(
-            self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(),
-            'fake-gw-port', gw_id, True)
-        l2gwlib.plug_l2_gw_service(
-            self.fake_cluster, lswitch['uuid'],
-            lport['uuid'], gw_id)
-        uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE,
-                                     lport['uuid'],
-                                     lswitch['uuid'],
-                                     is_attachment=True)
-        resp_obj = nsxlib.do_request("GET", uri,
-                                     cluster=self.fake_cluster)
-        self.assertIn('LogicalPortAttachment', resp_obj)
-        self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
-                         'L2GatewayAttachment')
-
-    def _create_expected_req_body(self, display_name, neutron_id,
-                                  connector_type, connector_ip,
-                                  client_certificate):
-        body = {
-            "display_name": display_name,
-            "tags": [{"tag": neutron_id, "scope": "q_gw_dev_id"},
-                     {"tag": 'fake_tenant', "scope": "os_tid"},
-                     {"tag": nsx_utils.NEUTRON_VERSION,
-                      "scope": "quantum"}],
-            "transport_connectors": [
-                {"transport_zone_uuid": 'fake_tz_uuid',
-                    "ip_address": connector_ip,
-                    "type": '%sConnector' % connector_type}],
-            "admin_status_enabled": True
-        }
-        body.get("tags").sort()
-        if client_certificate:
-            body["credential"] = {
-                "client_certificate": {
-                    "pem_encoded": client_certificate},
-                "type": "SecurityCertificateCredential"}
-        return body
-
-    def test_create_gw_device(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        display_name = 'fake-device'
-        neutron_id = 'whatever'
-        connector_type = 'stt'
-        connector_ip = '1.1.1.1'
-        client_certificate = 'this_should_be_a_certificate'
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            expected_req_body = self._create_expected_req_body(
-                display_name, neutron_id, connector_type.upper(),
-                connector_ip, client_certificate)
-            l2gwlib.create_gateway_device(
-                self.fake_cluster, 'fake_tenant', display_name, neutron_id,
-                'fake_tz_uuid', connector_type, connector_ip,
-                client_certificate)
-            request_mock.assert_called_once_with(
-                "POST",
-                "/ws.v1/transport-node",
-                jsonutils.dumps(expected_req_body, sort_keys=True),
-                cluster=self.fake_cluster)
-
-    def test_update_gw_device(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        display_name = 'fake-device'
-        neutron_id = 'whatever'
-        connector_type = 'stt'
-        connector_ip = '1.1.1.1'
-        client_certificate = 'this_should_be_a_certificate'
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            expected_req_body = self._create_expected_req_body(
-                display_name, neutron_id, connector_type.upper(),
-                connector_ip, client_certificate)
-            l2gwlib.update_gateway_device(
-                self.fake_cluster, 'whatever', 'fake_tenant',
-                display_name, neutron_id,
-                'fake_tz_uuid', connector_type, connector_ip,
-                client_certificate)
-
-            request_mock.assert_called_once_with(
-                "PUT",
-                "/ws.v1/transport-node/whatever",
-                jsonutils.dumps(expected_req_body, sort_keys=True),
-                cluster=self.fake_cluster)
-
-    def test_update_gw_device_without_certificate(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        display_name = 'fake-device'
-        neutron_id = 'whatever'
-        connector_type = 'stt'
-        connector_ip = '1.1.1.1'
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            expected_req_body = self._create_expected_req_body(
-                display_name, neutron_id, connector_type.upper(),
-                connector_ip, None)
-            l2gwlib.update_gateway_device(
-                self.fake_cluster, 'whatever', 'fake_tenant',
-                display_name, neutron_id,
-                'fake_tz_uuid', connector_type, connector_ip,
-                client_certificate=None)
-
-            request_mock.assert_called_once_with(
-                "PUT",
-                "/ws.v1/transport-node/whatever",
-                jsonutils.dumps(expected_req_body, sort_keys=True),
-                cluster=self.fake_cluster)
-
-    def test_get_gw_device_status(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever')
-            request_mock.assert_called_once_with(
-                "GET",
-                "/ws.v1/transport-node/whatever/status",
-                cluster=self.fake_cluster)
-
-    def test_get_gw_devices_status(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            request_mock.return_value = {
-                'results': [],
-                'page_cursor': None,
-                'result_count': 0}
-            l2gwlib.get_gateway_devices_status(self.fake_cluster)
-            request_mock.assert_called_once_with(
-                "GET",
-                ("/ws.v1/transport-node?fields=uuid,tags&"
-                 "relations=TransportNodeStatus&"
-                 "_page_length=1000&tag_scope=quantum"),
-                cluster=self.fake_cluster)
-
-    def test_get_gw_devices_status_filter_by_tenant(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            request_mock.return_value = {
-                'results': [],
-                'page_cursor': None,
-                'result_count': 0}
-            l2gwlib.get_gateway_devices_status(self.fake_cluster,
-                                               tenant_id='ssc_napoli')
-            request_mock.assert_called_once_with(
-                "GET",
-                ("/ws.v1/transport-node?fields=uuid,tags&"
-                 "relations=TransportNodeStatus&"
-                 "tag=ssc_napoli&tag_scope=os_tid&"
-                 "_page_length=1000&tag_scope=quantum"),
-                cluster=self.fake_cluster)
-
-    def test_delete_gw_device(self):
-        # NOTE(salv-orlando): This unit test mocks backend calls rather than
-        # leveraging the fake NSX API client
-        with mock.patch.object(nsxlib, 'do_request') as request_mock:
-            l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever')
-            request_mock.assert_called_once_with(
-                "DELETE",
-                "/ws.v1/transport-node/whatever",
-                cluster=self.fake_cluster)
diff --git a/neutron/tests/unit/vmware/nsxlib/test_lsn.py b/neutron/tests/unit/vmware/nsxlib/test_lsn.py
deleted file mode 100644 (file)
index 4945a5d..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from oslo_serialization import jsonutils
-
-from neutron.common import exceptions
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.nsxlib import lsn as lsnlib
-from neutron.tests import base
-
-
-class LSNTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(LSNTestCase, self).setUp()
-        self.mock_request_p = mock.patch(
-            'neutron.plugins.vmware.nsxlib.do_request')
-        self.mock_request = self.mock_request_p.start()
-        self.cluster = mock.Mock()
-        self.cluster.default_service_cluster_uuid = 'foo'
-
-    def test_service_cluster_None(self):
-        self.mock_request.return_value = None
-        expected = lsnlib.service_cluster_exists(None, None)
-        self.assertFalse(expected)
-
-    def test_service_cluster_found(self):
-        self.mock_request.return_value = {
-            "results": [
-                {
-                    "_href": "/ws.v1/service-cluster/foo_uuid",
-                    "display_name": "foo_name",
-                    "uuid": "foo_uuid",
-                    "tags": [],
-                    "_schema": "/ws.v1/schema/ServiceClusterConfig",
-                    "gateways": []
-                }
-            ],
-            "result_count": 1
-        }
-        expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
-        self.assertTrue(expected)
-
-    def test_service_cluster_not_found(self):
-        self.mock_request.side_effect = exceptions.NotFound()
-        expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
-        self.assertFalse(expected)
-
-    def test_lsn_for_network_create(self):
-        net_id = "foo_network_id"
-        tags = utils.get_tags(n_network_id=net_id)
-        obj = {"edge_cluster_uuid": "foo", "tags": tags}
-        lsnlib.lsn_for_network_create(self.cluster, net_id)
-        self.mock_request.assert_called_once_with(
-            "POST", "/ws.v1/lservices-node",
-            jsonutils.dumps(obj), cluster=self.cluster)
-
-    def test_lsn_for_network_get(self):
-        net_id = "foo_network_id"
-        lsn_id = "foo_lsn_id"
-        self.mock_request.return_value = {
-            "results": [{"uuid": "foo_lsn_id"}],
-            "result_count": 1
-        }
-        result = lsnlib.lsn_for_network_get(self.cluster, net_id)
-        self.assertEqual(lsn_id, result)
-        self.mock_request.assert_called_once_with(
-            "GET",
-            ("/ws.v1/lservices-node?fields=uuid&tag=%s&"
-             "tag_scope=n_network_id" % net_id),
-            cluster=self.cluster)
-
-    def test_lsn_for_network_get_none(self):
-        net_id = "foo_network_id"
-        self.mock_request.return_value = {
-            "results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}],
-            "result_count": 2
-        }
-        result = lsnlib.lsn_for_network_get(self.cluster, net_id)
-        self.assertIsNone(result)
-
-    def test_lsn_for_network_get_raise_not_found(self):
-        net_id = "foo_network_id"
-        self.mock_request.return_value = {
-            "results": [], "result_count": 0
-        }
-        self.assertRaises(exceptions.NotFound,
-                          lsnlib.lsn_for_network_get,
-                          self.cluster, net_id)
-
-    def test_lsn_delete(self):
-        lsn_id = "foo_id"
-        lsnlib.lsn_delete(self.cluster, lsn_id)
-        self.mock_request.assert_called_once_with(
-            "DELETE",
-            "/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster)
-
-    def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data):
-        lsn_id = 'foo_lsn_id'
-        lsn_port_id = 'foo_lsn_port_id'
-        lsnlib.lsn_port_host_entries_update(
-            self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data)
-        self.mock_request.assert_called_once_with(
-            'PUT',
-            '/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id,
-                                                      lsn_port_id,
-                                                      lsn_type),
-            jsonutils.dumps({'hosts': hosts_data}),
-            cluster=self.cluster)
-
-    def test_lsn_port_dhcp_entries_update(self):
-        hosts_data = [{"ip_address": "11.22.33.44",
-                       "mac_address": "aa:bb:cc:dd:ee:ff"},
-                      {"ip_address": "44.33.22.11",
-                       "mac_address": "ff:ee:dd:cc:bb:aa"}]
-        self._test_lsn_port_host_entries_update("dhcp", hosts_data)
-
-    def test_lsn_port_metadata_entries_update(self):
-        hosts_data = [{"ip_address": "11.22.33.44",
-                       "device_id": "foo_vm_uuid"}]
-        self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data)
-
-    def test_lsn_port_create(self):
-        port_data = {
-            "ip_address": "1.2.3.0/24",
-            "mac_address": "aa:bb:cc:dd:ee:ff",
-            "subnet_id": "foo_subnet_id"
-        }
-        port_id = "foo_port_id"
-        self.mock_request.return_value = {"uuid": port_id}
-        lsn_id = "foo_lsn_id"
-        result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data)
-        self.assertEqual(result, port_id)
-        tags = utils.get_tags(n_subnet_id=port_data["subnet_id"],
-                              n_mac_address=port_data["mac_address"])
-        port_obj = {
-            "ip_address": port_data["ip_address"],
-            "mac_address": port_data["mac_address"],
-            "type": "LogicalServicesNodePortConfig",
-            "tags": tags
-        }
-        self.mock_request.assert_called_once_with(
-            "POST", "/ws.v1/lservices-node/%s/lport" % lsn_id,
-            jsonutils.dumps(port_obj), cluster=self.cluster)
-
-    def test_lsn_port_delete(self):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_port_id"
-        lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
-        self.mock_request.assert_called_once_with(
-            "DELETE",
-            "/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id),
-            cluster=self.cluster)
-
-    def test_lsn_port_get_with_filters(self):
-        lsn_id = "foo_lsn_id"
-        port_id = "foo_port_id"
-        filters = {"tag": "foo_tag", "tag_scope": "foo_scope"}
-        self.mock_request.return_value = {
-            "results": [{"uuid": port_id}],
-            "result_count": 1
-        }
-        result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters)
-        self.assertEqual(result, port_id)
-        self.mock_request.assert_called_once_with(
-            "GET",
-            ("/ws.v1/lservices-node/%s/lport?fields=uuid&tag=%s&"
-             "tag_scope=%s" % (lsn_id, filters["tag"], filters["tag_scope"])),
-            cluster=self.cluster)
-
-    def test_lsn_port_get_with_filters_return_none(self):
-        self.mock_request.return_value = {
-            "results": [{"uuid": "foo1"}, {"uuid": "foo2"}],
-            "result_count": 2
-        }
-        result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None)
-        self.assertIsNone(result)
-
-    def test_lsn_port_get_with_filters_raises_not_found(self):
-        self.mock_request.return_value = {"results": [], "result_count": 0}
-        self.assertRaises(exceptions.NotFound,
-                          lsnlib._lsn_port_get,
-                          self.cluster, "lsn_id", None)
-
-    def test_lsn_port_info_get(self):
-        self.mock_request.return_value = {
-            "tags": [
-                {"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"},
-                {"scope": "n_subnet_id", "tag": "foo_subnet_id"},
-            ],
-            "mac_address": "aa:bb:cc:dd:ee:ff",
-            "ip_address": "0.0.0.0/0",
-            "uuid": "foo_lsn_port_id"
-        }
-        result = lsnlib.lsn_port_info_get(
-            self.cluster, 'foo_lsn_id', 'foo_lsn_port_id')
-        self.mock_request.assert_called_once_with(
-            'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id',
-            cluster=self.cluster)
-        self.assertIn('subnet_id', result)
-        self.assertIn('mac_address', result)
-
-    def test_lsn_port_info_get_raise_not_found(self):
-        self.mock_request.side_effect = exceptions.NotFound
-        self.assertRaises(exceptions.NotFound,
-                          lsnlib.lsn_port_info_get,
-                          self.cluster, mock.ANY, mock.ANY)
-
-    def test_lsn_port_plug_network(self):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_lsn_port_id"
-        lswitch_port_id = "foo_lswitch_port_id"
-        lsnlib.lsn_port_plug_network(
-            self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
-        self.mock_request.assert_called_once_with(
-            "PUT",
-            ("/ws.v1/lservices-node/%s/lport/%s/"
-             "attachment") % (lsn_id, lsn_port_id),
-            jsonutils.dumps({"peer_port_uuid": lswitch_port_id,
-                             "type": "PatchAttachment"}),
-            cluster=self.cluster)
-
-    def test_lsn_port_plug_network_raise_conflict(self):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_lsn_port_id"
-        lswitch_port_id = "foo_lswitch_port_id"
-        self.mock_request.side_effect = api_exc.Conflict
-        self.assertRaises(
-            nsx_exc.LsnConfigurationConflict,
-            lsnlib.lsn_port_plug_network,
-            self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
-
-    def _test_lsn_port_dhcp_configure(
-        self, lsn_id, lsn_port_id, is_enabled, opts):
-        lsnlib.lsn_port_dhcp_configure(
-            self.cluster, lsn_id, lsn_port_id, is_enabled, opts)
-        opt_array = [
-            {"name": key, "value": val}
-            for key, val in opts.iteritems()
-        ]
-        self.mock_request.assert_has_calls([
-            mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id,
-                      jsonutils.dumps({"enabled": is_enabled}),
-                      cluster=self.cluster),
-            mock.call("PUT",
-                      ("/ws.v1/lservices-node/%s/"
-                       "lport/%s/dhcp") % (lsn_id, lsn_port_id),
-                      jsonutils.dumps({"options": opt_array}),
-                      cluster=self.cluster)
-        ])
-
-    def test_lsn_port_dhcp_configure_empty_opts(self):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_lsn_port_id"
-        is_enabled = False
-        opts = {}
-        self._test_lsn_port_dhcp_configure(
-            lsn_id, lsn_port_id, is_enabled, opts)
-
-    def test_lsn_port_dhcp_configure_with_opts(self):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_lsn_port_id"
-        is_enabled = True
-        opts = {"opt1": "val1", "opt2": "val2"}
-        self._test_lsn_port_dhcp_configure(
-            lsn_id, lsn_port_id, is_enabled, opts)
-
-    def _test_lsn_metadata_configure(
-        self, lsn_id, is_enabled, opts, expected_opts):
-        lsnlib.lsn_metadata_configure(
-            self.cluster, lsn_id, is_enabled, opts)
-        lsn_obj = {"enabled": is_enabled}
-        lsn_obj.update(expected_opts)
-        self.mock_request.assert_has_calls([
-            mock.call("PUT",
-                      "/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id,
-                      jsonutils.dumps(lsn_obj),
-                      cluster=self.cluster),
-        ])
-
-    def test_lsn_port_metadata_configure_empty_secret(self):
-        lsn_id = "foo_lsn_id"
-        is_enabled = True
-        opts = {
-            "metadata_server_ip": "1.2.3.4",
-            "metadata_server_port": "8775"
-        }
-        expected_opts = {
-            "metadata_server_ip": "1.2.3.4",
-            "metadata_server_port": "8775",
-        }
-        self._test_lsn_metadata_configure(
-            lsn_id, is_enabled, opts, expected_opts)
-
-    def test_lsn_metadata_configure_with_secret(self):
-        lsn_id = "foo_lsn_id"
-        is_enabled = True
-        opts = {
-            "metadata_server_ip": "1.2.3.4",
-            "metadata_server_port": "8775",
-            "metadata_proxy_shared_secret": "foo_secret"
-        }
-        expected_opts = {
-            "metadata_server_ip": "1.2.3.4",
-            "metadata_server_port": "8775",
-            "options": [{
-                "name": "metadata_proxy_shared_secret",
-                "value": "foo_secret"
-            }]
-        }
-        self._test_lsn_metadata_configure(
-            lsn_id, is_enabled, opts, expected_opts)
-
-    def _test_lsn_port_host_action(
-            self, lsn_port_action_func, extra_action, action, host):
-        lsn_id = "foo_lsn_id"
-        lsn_port_id = "foo_lsn_port_id"
-        lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host)
-        self.mock_request.assert_called_once_with(
-            "POST",
-            ("/ws.v1/lservices-node/%s/lport/"
-             "%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action),
-            jsonutils.dumps(host), cluster=self.cluster)
-
-    def test_lsn_port_dhcp_host_add(self):
-        host = {
-            "ip_address": "1.2.3.4",
-            "mac_address": "aa:bb:cc:dd:ee:ff"
-        }
-        self._test_lsn_port_host_action(
-            lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host)
-
-    def test_lsn_port_dhcp_host_remove(self):
-        host = {
-            "ip_address": "1.2.3.4",
-            "mac_address": "aa:bb:cc:dd:ee:ff"
-        }
-        self._test_lsn_port_host_action(
-            lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host)
-
-    def test_lsn_port_metadata_host_add(self):
-        host = {
-            "ip_address": "1.2.3.4",
-            "instance_id": "foo_instance_id"
-        }
-        self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add,
-                                        "metadata-proxy", "add_host", host)
-
-    def test_lsn_port_metadata_host_remove(self):
-        host = {
-            "ip_address": "1.2.3.4",
-            "instance_id": "foo_instance_id"
-        }
-        self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove,
-                                        "metadata-proxy", "remove_host", host)
diff --git a/neutron/tests/unit/vmware/nsxlib/test_queue.py b/neutron/tests/unit/vmware/nsxlib/test_queue.py
deleted file mode 100644 (file)
index 1d7e2ea..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import mock
-
-from neutron.common import exceptions
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import queue as queuelib
-from neutron.tests.unit.vmware.nsxlib import base
-
-
-class TestLogicalQueueLib(base.NsxlibTestCase):
-
-    def setUp(self):
-        super(TestLogicalQueueLib, self).setUp()
-        self.fake_queue = {
-            'name': 'fake_queue',
-            'min': 0, 'max': 256,
-            'dscp': 0, 'qos_marking': False
-        }
-
-    def test_create_and_get_lqueue(self):
-        queue_id = queuelib.create_lqueue(
-            self.fake_cluster, self.fake_queue)
-        queue_res = nsxlib.do_request(
-            'GET',
-            nsxlib._build_uri_path('lqueue', resource_id=queue_id),
-            cluster=self.fake_cluster)
-        self.assertEqual(queue_id, queue_res['uuid'])
-        self.assertEqual('fake_queue', queue_res['display_name'])
-
-    def test_create_lqueue_nsx_error_raises(self):
-        def raise_nsx_exc(*args, **kwargs):
-            raise api_exc.NsxApiException()
-
-        with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
-            self.assertRaises(
-                exceptions.NeutronException, queuelib.create_lqueue,
-                self.fake_cluster, self.fake_queue)
-
-    def test_delete_lqueue(self):
-        queue_id = queuelib.create_lqueue(
-            self.fake_cluster, self.fake_queue)
-        queuelib.delete_lqueue(self.fake_cluster, queue_id)
-        self.assertRaises(exceptions.NotFound,
-                          nsxlib.do_request,
-                          'GET',
-                          nsxlib._build_uri_path(
-                              'lqueue', resource_id=queue_id),
-                          cluster=self.fake_cluster)
-
-    def test_delete_non_existing_lqueue_raises(self):
-        self.assertRaises(exceptions.NeutronException,
-                          queuelib.delete_lqueue,
-                          self.fake_cluster, 'whatever')
diff --git a/neutron/tests/unit/vmware/nsxlib/test_router.py b/neutron/tests/unit/vmware/nsxlib/test_router.py
deleted file mode 100644 (file)
index 9aeb28c..0000000
+++ /dev/null
@@ -1,948 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import mock
-
-from oslo_config import cfg
-
-from neutron.common import exceptions
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version as version_module
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit.vmware.nsxlib import base
-
-_uuid = test_api_v2._uuid
-
-
-class TestNatRules(base.NsxlibTestCase):
-
-    def _test_create_lrouter_dnat_rule(self, version):
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: version_module.Version(version)):
-            tenant_id = 'pippo'
-            lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                               uuidutils.generate_uuid(),
-                                               tenant_id,
-                                               'fake_router',
-                                               '192.168.0.1')
-            nat_rule = routerlib.create_lrouter_dnat_rule(
-                self.fake_cluster, lrouter['uuid'], '10.0.0.99',
-                match_criteria={'destination_ip_addresses':
-                                '192.168.0.5'})
-            uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
-                                         nat_rule['uuid'],
-                                         lrouter['uuid'])
-            resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
-            self.assertEqual('DestinationNatRule', resp_obj['type'])
-            self.assertEqual('192.168.0.5',
-                             resp_obj['match']['destination_ip_addresses'])
-
-    def test_create_lrouter_dnat_rule_v2(self):
-        self._test_create_lrouter_dnat_rule('2.9')
-
-    def test_create_lrouter_dnat_rule_v31(self):
-        self._test_create_lrouter_dnat_rule('3.1')
-
-
-class TestExplicitLRouters(base.NsxlibTestCase):
-
-    def setUp(self):
-        self.fake_version = '3.2'
-        super(TestExplicitLRouters, self).setUp()
-
-    def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
-        schema = '/ws.v1/schema/RoutingTableRoutingConfig'
-
-        router = {'display_name': router_name,
-                  'uuid': router_id,
-                  'tags': utils.get_tags(os_tid=tenant_id),
-                  'distributed': False,
-                  'routing_config': {'type': 'RoutingTableRoutingConfig',
-                                     '_schema': schema},
-                  '_schema': schema,
-                  'nat_synchronization_enabled': True,
-                  'replication_mode': 'service',
-                  'type': 'LogicalRouterConfig',
-                  '_href': '/ws.v1/lrouter/%s' % router_id, }
-        if relations:
-            router['_relations'] = relations
-        return router
-
-    def _get_single_route(self, router_id, route_id='fake_route_id_0',
-                          prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
-        return {'protocol': 'static',
-                '_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
-                'prefix': prefix,
-                '_schema': '/ws.v1/schema/RoutingTableEntry',
-                'next_hop_ip': next_hop_ip,
-                'action': 'accept',
-                'uuid': route_id}
-
-    def test_prepare_body_with_implicit_routing_config(self):
-        router_name = 'fake_router_name'
-        tenant_id = 'fake_tenant_id'
-        neutron_router_id = 'pipita_higuain'
-        router_type = 'SingleDefaultRouteImplicitRoutingConfig'
-        route_config = {
-            'default_route_next_hop': {'gateway_ip_address': 'fake_address',
-                                       'type': 'RouterNextHop'}, }
-        body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
-                                               tenant_id, router_type,
-                                               **route_config)
-        expected = {'display_name': 'fake_router_name',
-                    'routing_config': {
-                        'default_route_next_hop':
-                        {'gateway_ip_address': 'fake_address',
-                         'type': 'RouterNextHop'},
-                        'type': 'SingleDefaultRouteImplicitRoutingConfig'},
-                    'tags': utils.get_tags(os_tid='fake_tenant_id',
-                                           q_router_id='pipita_higuain'),
-                    'type': 'LogicalRouterConfig',
-                    'replication_mode': cfg.CONF.NSX.replication_mode}
-        self.assertEqual(expected, body)
-
-    def test_prepare_body_without_routing_config(self):
-        router_name = 'fake_router_name'
-        tenant_id = 'fake_tenant_id'
-        neutron_router_id = 'marekiaro_hamsik'
-        router_type = 'RoutingTableRoutingConfig'
-        body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
-                                               tenant_id, router_type)
-        expected = {'display_name': 'fake_router_name',
-                    'routing_config': {'type': 'RoutingTableRoutingConfig'},
-                    'tags': utils.get_tags(os_tid='fake_tenant_id',
-                                           q_router_id='marekiaro_hamsik'),
-                    'type': 'LogicalRouterConfig',
-                    'replication_mode': cfg.CONF.NSX.replication_mode}
-        self.assertEqual(expected, body)
-
-    def test_get_lrouter(self):
-        tenant_id = 'fake_tenant_id'
-        router_name = 'fake_router_name'
-        router_id = 'fake_router_id'
-        relations = {
-            'LogicalRouterStatus':
-            {'_href': '/ws.v1/lrouter/%s/status' % router_id,
-             'lport_admin_up_count': 1,
-             '_schema': '/ws.v1/schema/LogicalRouterStatus',
-             'lport_count': 1,
-             'fabric_status': True,
-             'type': 'LogicalRouterStatus',
-             'lport_link_up_count': 0, }, }
-
-        with mock.patch.object(nsxlib, 'do_request',
-                               return_value=self._get_lrouter(tenant_id,
-                                                              router_name,
-                                                              router_id,
-                                                              relations)):
-            lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
-            self.assertTrue(
-                lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
-
-    def test_create_lrouter(self):
-        tenant_id = 'fake_tenant_id'
-        router_name = 'fake_router_name'
-        router_id = 'fake_router_id'
-        nexthop_ip = '10.0.0.1'
-        with mock.patch.object(
-            nsxlib, 'do_request',
-            return_value=self._get_lrouter(tenant_id,
-                                           router_name,
-                                           router_id)):
-            lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                               uuidutils.generate_uuid(),
-                                               tenant_id,
-                                               router_name, nexthop_ip)
-            self.assertEqual(lrouter['routing_config']['type'],
-                             'RoutingTableRoutingConfig')
-            self.assertNotIn('default_route_next_hop',
-                             lrouter['routing_config'])
-
-    def test_update_lrouter_with_no_routes(self):
-        router_id = 'fake_router_id'
-        new_routes = [{"nexthop": "10.0.0.2",
-                       "destination": "169.254.169.0/30"}, ]
-
-        nsx_routes = [self._get_single_route(router_id)]
-        with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
-                               return_value=nsx_routes):
-            with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
-                                   return_value='fake_uuid'):
-                old_routes = routerlib.update_explicit_routes_lrouter(
-                    self.fake_cluster, router_id, new_routes)
-        self.assertEqual(old_routes, nsx_routes)
-
-    def test_update_lrouter_with_no_routes_raise_nsx_exception(self):
-        router_id = 'fake_router_id'
-        new_routes = [{"nexthop": "10.0.0.2",
-                       "destination": "169.254.169.0/30"}, ]
-
-        nsx_routes = [self._get_single_route(router_id)]
-        with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
-                               return_value=nsx_routes):
-            with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
-                                   side_effect=api_exc.NsxApiException):
-                self.assertRaises(api_exc.NsxApiException,
-                                  routerlib.update_explicit_routes_lrouter,
-                                  self.fake_cluster, router_id, new_routes)
-
-    def test_update_lrouter_with_routes(self):
-        router_id = 'fake_router_id'
-        new_routes = [{"next_hop_ip": "10.0.0.2",
-                       "prefix": "169.254.169.0/30"}, ]
-
-        nsx_routes = [self._get_single_route(router_id),
-                      self._get_single_route(router_id, 'fake_route_id_1',
-                                             '0.0.0.1/24', '10.0.0.3'),
-                      self._get_single_route(router_id, 'fake_route_id_2',
-                                             '0.0.0.2/24', '10.0.0.4'), ]
-
-        with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
-                               return_value=nsx_routes):
-            with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
-                                   return_value=None):
-                with mock.patch.object(routerlib,
-                                       'create_explicit_route_lrouter',
-                                       return_value='fake_uuid'):
-                    old_routes = routerlib.update_explicit_routes_lrouter(
-                        self.fake_cluster, router_id, new_routes)
-        self.assertEqual(old_routes, nsx_routes)
-
-    def test_update_lrouter_with_routes_raises_nsx_expception(self):
-        router_id = 'fake_router_id'
-        new_routes = [{"nexthop": "10.0.0.2",
-                       "destination": "169.254.169.0/30"}, ]
-
-        nsx_routes = [self._get_single_route(router_id),
-                      self._get_single_route(router_id, 'fake_route_id_1',
-                                             '0.0.0.1/24', '10.0.0.3'),
-                      self._get_single_route(router_id, 'fake_route_id_2',
-                                             '0.0.0.2/24', '10.0.0.4'), ]
-
-        with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
-                               return_value=nsx_routes):
-            with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
-                                   side_effect=api_exc.NsxApiException):
-                with mock.patch.object(
-                    routerlib, 'create_explicit_route_lrouter',
-                    return_value='fake_uuid'):
-                    self.assertRaises(
-                        api_exc.NsxApiException,
-                        routerlib.update_explicit_routes_lrouter,
-                        self.fake_cluster, router_id, new_routes)
-
-
-class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase):
-
-    def test_create_lrouter_on_failure(self):
-        self.assertRaises(api_exc.NsxApiException,
-                          routerlib.create_lrouter,
-                          self.fake_cluster,
-                          uuidutils.generate_uuid(),
-                          'pluto',
-                          'fake_router',
-                          'my_hop')
-
-    def test_delete_lrouter_on_failure(self):
-        self.assertRaises(api_exc.NsxApiException,
-                          routerlib.delete_lrouter,
-                          self.fake_cluster,
-                          'fake_router')
-
-    def test_get_lrouter_on_failure(self):
-        self.assertRaises(api_exc.NsxApiException,
-                          routerlib.get_lrouter,
-                          self.fake_cluster,
-                          'fake_router')
-
-    def test_update_lrouter_on_failure(self):
-        self.assertRaises(api_exc.NsxApiException,
-                          routerlib.update_lrouter,
-                          self.fake_cluster,
-                          'fake_router',
-                          'pluto',
-                          'new_hop')
-
-
-class TestLogicalRouters(base.NsxlibTestCase):
-
-    def _verify_lrouter(self, res_lrouter,
-                        expected_uuid,
-                        expected_display_name,
-                        expected_nexthop,
-                        expected_tenant_id,
-                        expected_neutron_id=None,
-                        expected_distributed=None):
-        self.assertEqual(res_lrouter['uuid'], expected_uuid)
-        nexthop = (res_lrouter['routing_config']
-                   ['default_route_next_hop']['gateway_ip_address'])
-        self.assertEqual(nexthop, expected_nexthop)
-        router_tags = self._build_tag_dict(res_lrouter['tags'])
-        self.assertIn('os_tid', router_tags)
-        self.assertEqual(res_lrouter['display_name'], expected_display_name)
-        self.assertEqual(expected_tenant_id, router_tags['os_tid'])
-        if expected_distributed is not None:
-            self.assertEqual(expected_distributed,
-                             res_lrouter['distributed'])
-        if expected_neutron_id:
-            self.assertIn('q_router_id', router_tags)
-            self.assertEqual(expected_neutron_id, router_tags['q_router_id'])
-
-    def test_get_lrouters(self):
-        lrouter_uuids = [routerlib.create_lrouter(
-            self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k,
-            '10.0.0.1')['uuid'] for k in range(3)]
-        routers = routerlib.get_lrouters(self.fake_cluster, 'pippo')
-        for router in routers:
-            self.assertIn(router['uuid'], lrouter_uuids)
-
-    def _create_lrouter(self, version, neutron_id=None, distributed=None):
-        with mock.patch.object(
-            self.fake_cluster.api_client, 'get_version',
-            return_value=version_module.Version(version)):
-            if not neutron_id:
-                neutron_id = uuidutils.generate_uuid()
-            lrouter = routerlib.create_lrouter(
-                self.fake_cluster, neutron_id, 'pippo',
-                'fake-lrouter', '10.0.0.1', distributed=distributed)
-            return routerlib.get_lrouter(self.fake_cluster,
-                                         lrouter['uuid'])
-
-    def test_create_and_get_lrouter_v30(self):
-        neutron_id = uuidutils.generate_uuid()
-        res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id)
-        self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
-                             'fake-lrouter', '10.0.0.1', 'pippo',
-                             expected_neutron_id=neutron_id)
-
-    def test_create_and_get_lrouter_v31_centralized(self):
-        neutron_id = uuidutils.generate_uuid()
-        res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
-                                           distributed=False)
-        self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
-                             'fake-lrouter', '10.0.0.1', 'pippo',
-                             expected_neutron_id=neutron_id,
-                             expected_distributed=False)
-
-    def test_create_and_get_lrouter_v31_distributed(self):
-        neutron_id = uuidutils.generate_uuid()
-        res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
-                                           distributed=True)
-        self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
-                             'fake-lrouter', '10.0.0.1', 'pippo',
-                             expected_neutron_id=neutron_id,
-                             expected_distributed=True)
-
-    def test_create_and_get_lrouter_name_exceeds_40chars(self):
-        neutron_id = uuidutils.generate_uuid()
-        display_name = '*' * 50
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           neutron_id,
-                                           'pippo',
-                                           display_name,
-                                           '10.0.0.1')
-        res_lrouter = routerlib.get_lrouter(self.fake_cluster,
-                                            lrouter['uuid'])
-        self._verify_lrouter(res_lrouter, lrouter['uuid'],
-                             '*' * 40, '10.0.0.1', 'pippo',
-                             expected_neutron_id=neutron_id)
-
-    def _test_version_dependent_update_lrouter(self, version):
-        def foo(*args, **kwargs):
-            return version
-
-        foo_func_dict = {
-            'update_lrouter': {
-                2: {-1: foo},
-                3: {-1: foo, 2: foo}
-            }
-        }
-
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               return_value=version_module.Version(version)):
-            with mock.patch.dict(routerlib.ROUTER_FUNC_DICT,
-                                 foo_func_dict, clear=True):
-                return routerlib.update_lrouter(
-                    self.fake_cluster, 'foo_router_id', 'foo_router_name',
-                    'foo_nexthop', routes={'foo_destination': 'foo_address'})
-
-    def test_version_dependent_update_lrouter_old_versions(self):
-        self.assertRaises(nsx_exc.InvalidVersion,
-                          self._test_version_dependent_update_lrouter,
-                          "2.9")
-        self.assertRaises(nsx_exc.InvalidVersion,
-                          self._test_version_dependent_update_lrouter,
-                          "3.0")
-        self.assertRaises(nsx_exc.InvalidVersion,
-                          self._test_version_dependent_update_lrouter,
-                          "3.1")
-
-    def test_version_dependent_update_lrouter_new_versions(self):
-        self.assertEqual("3.2",
-                         self._test_version_dependent_update_lrouter("3.2"))
-        self.assertEqual("4.0",
-                         self._test_version_dependent_update_lrouter("4.0"))
-        self.assertEqual("4.1",
-                         self._test_version_dependent_update_lrouter("4.1"))
-
-    def test_update_lrouter_no_nexthop(self):
-        neutron_id = uuidutils.generate_uuid()
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           neutron_id,
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter = routerlib.update_lrouter(self.fake_cluster,
-                                           lrouter['uuid'],
-                                           'new_name',
-                                           None)
-        res_lrouter = routerlib.get_lrouter(self.fake_cluster,
-                                            lrouter['uuid'])
-        self._verify_lrouter(res_lrouter, lrouter['uuid'],
-                             'new_name', '10.0.0.1', 'pippo',
-                             expected_neutron_id=neutron_id)
-
-    def test_update_lrouter(self):
-        neutron_id = uuidutils.generate_uuid()
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           neutron_id,
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter = routerlib.update_lrouter(self.fake_cluster,
-                                           lrouter['uuid'],
-                                           'new_name',
-                                           '192.168.0.1')
-        res_lrouter = routerlib.get_lrouter(self.fake_cluster,
-                                            lrouter['uuid'])
-        self._verify_lrouter(res_lrouter, lrouter['uuid'],
-                             'new_name', '192.168.0.1', 'pippo',
-                             expected_neutron_id=neutron_id)
-
-    def test_update_nonexistent_lrouter_raises(self):
-        self.assertRaises(exceptions.NotFound,
-                          routerlib.update_lrouter,
-                          self.fake_cluster,
-                          'whatever',
-                          'foo', '9.9.9.9')
-
-    def test_delete_lrouter(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid'])
-        self.assertRaises(exceptions.NotFound,
-                          routerlib.get_lrouter,
-                          self.fake_cluster,
-                          lrouter['uuid'])
-
-    def test_query_lrouter_ports(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        router_port_uuids = [routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo',
-            'qp_id_%s' % k, 'port-%s' % k, True,
-            ['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid']
-            for k in range(3)]
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 3)
-        for res_port in ports:
-            self.assertIn(res_port['uuid'], router_port_uuids)
-
-    def test_query_lrouter_lports_nonexistent_lrouter_raises(self):
-        self.assertRaises(
-            exceptions.NotFound, routerlib.create_router_lport,
-            self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-
-    def test_create_and_get_lrouter_port(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        res_port = ports[0]
-        port_tags = self._build_tag_dict(res_port['tags'])
-        self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
-        self.assertIn('os_tid', port_tags)
-        self.assertIn('q_port_id', port_tags)
-        self.assertEqual('pippo', port_tags['os_tid'])
-        self.assertEqual('neutron_port_id', port_tags['q_port_id'])
-
-    def test_create_lrouter_port_nonexistent_router_raises(self):
-        self.assertRaises(
-            exceptions.NotFound, routerlib.create_router_lport,
-            self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-
-    def test_update_lrouter_port(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        routerlib.update_router_lport(
-            self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
-            'pippo', 'another_port_id', 'name', False,
-            ['192.168.0.1', '10.10.10.254'])
-
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        res_port = ports[0]
-        port_tags = self._build_tag_dict(res_port['tags'])
-        self.assertEqual(['192.168.0.1', '10.10.10.254'],
-                         res_port['ip_addresses'])
-        self.assertEqual('False', res_port['admin_status_enabled'])
-        self.assertIn('os_tid', port_tags)
-        self.assertIn('q_port_id', port_tags)
-        self.assertEqual('pippo', port_tags['os_tid'])
-        self.assertEqual('another_port_id', port_tags['q_port_id'])
-
-    def test_update_lrouter_port_nonexistent_router_raises(self):
-        self.assertRaises(
-            exceptions.NotFound, routerlib.update_router_lport,
-            self.fake_cluster, 'boo-router', 'boo-port', 'pippo',
-            'neutron_port_id', 'name', True, ['192.168.0.1'])
-
-    def test_update_lrouter_port_nonexistent_port_raises(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        self.assertRaises(
-            exceptions.NotFound, routerlib.update_router_lport,
-            self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo',
-            'neutron_port_id', 'name', True, ['192.168.0.1'])
-
-    def test_delete_lrouter_port(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
-            '00:11:22:33:44:55')
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'],
-                                      lrouter_port['uuid'])
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertFalse(len(ports))
-
-    def test_delete_lrouter_port_nonexistent_router_raises(self):
-        self.assertRaises(exceptions.NotFound,
-                          routerlib.delete_router_lport,
-                          self.fake_cluster, 'xyz', 'abc')
-
-    def test_delete_lrouter_port_nonexistent_port_raises(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        self.assertRaises(exceptions.NotFound,
-                          routerlib.delete_router_lport,
-                          self.fake_cluster, lrouter['uuid'], 'abc')
-
-    def test_delete_peer_lrouter_port(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
-            '00:11:22:33:44:55')
-
-        def fakegetport(*args, **kwargs):
-            return {'_relations': {'LogicalPortAttachment':
-                                   {'peer_port_uuid': lrouter_port['uuid']}}}
-        # mock get_port
-        with mock.patch.object(switchlib, 'get_port', new=fakegetport):
-            routerlib.delete_peer_router_lport(self.fake_cluster,
-                                               lrouter_port['uuid'],
-                                               'whatwever', 'whatever')
-
-    def test_update_lrouter_port_ips_add_only(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        routerlib.update_lrouter_port_ips(
-            self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
-            ['10.10.10.254'], [])
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        res_port = ports[0]
-        self.assertEqual(['10.10.10.254', '192.168.0.1'],
-                         res_port['ip_addresses'])
-
-    def test_update_lrouter_port_ips_remove_only(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1', '10.10.10.254'],
-            '00:11:22:33:44:55')
-        routerlib.update_lrouter_port_ips(
-            self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
-            [], ['10.10.10.254'])
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        res_port = ports[0]
-        self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
-
-    def test_update_lrouter_port_ips_add_and_remove(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        routerlib.update_lrouter_port_ips(
-            self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
-            ['10.10.10.254'], ['192.168.0.1'])
-        ports = routerlib.query_lrouter_lports(
-            self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(ports), 1)
-        res_port = ports[0]
-        self.assertEqual(['10.10.10.254'], res_port['ip_addresses'])
-
-    def test_update_lrouter_port_ips_nonexistent_router_raises(self):
-        self.assertRaises(
-            nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
-            self.fake_cluster, 'boo-router', 'boo-port', [], [])
-
-    def test_update_lrouter_port_ips_nsx_exception_raises(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-
-        def raise_nsx_exc(*args, **kwargs):
-            raise api_exc.NsxApiException()
-
-        with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
-            self.assertRaises(
-                nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
-                self.fake_cluster, lrouter['uuid'],
-                lrouter_port['uuid'], [], [])
-
-    def test_plug_lrouter_port_patch_attachment(self):
-        tenant_id = 'pippo'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           _uuid(),
-                                           tenant_id, 'fake-switch',
-                                           transport_zones_config)
-        lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
-                                       tenant_id, 'xyz',
-                                       'name', 'device_id', True)
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           tenant_id,
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
-        result = routerlib.plug_router_port_attachment(
-            self.fake_cluster, lrouter['uuid'],
-            lrouter_port['uuid'],
-            lport['uuid'], 'PatchAttachment')
-        self.assertEqual(lport['uuid'],
-                         result['LogicalPortAttachment']['peer_port_uuid'])
-
-    def test_plug_lrouter_port_l3_gw_attachment(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
-        result = routerlib.plug_router_port_attachment(
-            self.fake_cluster, lrouter['uuid'],
-            lrouter_port['uuid'],
-            'gw_att', 'L3GatewayAttachment')
-        self.assertEqual(
-            'gw_att',
-            result['LogicalPortAttachment']['l3_gateway_service_uuid'])
-
-    def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        result = routerlib.plug_router_port_attachment(
-            self.fake_cluster, lrouter['uuid'],
-            lrouter_port['uuid'],
-            'gw_att', 'L3GatewayAttachment', 123)
-        self.assertEqual(
-            'gw_att',
-            result['LogicalPortAttachment']['l3_gateway_service_uuid'])
-        self.assertEqual(
-            '123',
-            result['LogicalPortAttachment']['vlan_id'])
-
-    def test_plug_lrouter_port_invalid_attachment_type_raises(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        lrouter_port = routerlib.create_router_lport(
-            self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
-            'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
-        self.assertRaises(nsx_exc.InvalidAttachmentType,
-                          routerlib.plug_router_port_attachment,
-                          self.fake_cluster, lrouter['uuid'],
-                          lrouter_port['uuid'], 'gw_att', 'BadType')
-
-    def _test_create_router_snat_rule(self, version):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: version_module.Version(version)):
-            routerlib.create_lrouter_snat_rule(
-                self.fake_cluster, lrouter['uuid'],
-                '10.0.0.2', '10.0.0.2', order=200,
-                match_criteria={'source_ip_addresses': '192.168.0.24'})
-            rules = routerlib.query_nat_rules(
-                self.fake_cluster, lrouter['uuid'])
-            self.assertEqual(len(rules), 1)
-
-    def test_create_router_snat_rule_v3(self):
-        self._test_create_router_snat_rule('3.0')
-
-    def test_create_router_snat_rule_v2(self):
-        self._test_create_router_snat_rule('2.0')
-
-    def _test_create_router_dnat_rule(self, version, dest_port=None):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               return_value=version_module.Version(version)):
-            routerlib.create_lrouter_dnat_rule(
-                self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
-                dest_port=dest_port,
-                match_criteria={'destination_ip_addresses': '10.0.0.3'})
-            rules = routerlib.query_nat_rules(
-                self.fake_cluster, lrouter['uuid'])
-            self.assertEqual(len(rules), 1)
-
-    def test_create_router_dnat_rule_v3(self):
-        self._test_create_router_dnat_rule('3.0')
-
-    def test_create_router_dnat_rule_v2(self):
-        self._test_create_router_dnat_rule('2.0')
-
-    def test_create_router_dnat_rule_v2_with_destination_port(self):
-        self._test_create_router_dnat_rule('2.0', 8080)
-
-    def test_create_router_dnat_rule_v3_with_destination_port(self):
-        self._test_create_router_dnat_rule('3.0', 8080)
-
-    def test_create_router_snat_rule_invalid_match_keys_raises(self):
-        # In this case the version does not make a difference
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: '2.0'):
-            self.assertRaises(AttributeError,
-                              routerlib.create_lrouter_snat_rule,
-                              self.fake_cluster, lrouter['uuid'],
-                              '10.0.0.2', '10.0.0.2', order=200,
-                              match_criteria={'foo': 'bar'})
-
-    def _test_create_router_nosnat_rule(self, version, expected=1):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: version_module.Version(version)):
-            routerlib.create_lrouter_nosnat_rule(
-                self.fake_cluster, lrouter['uuid'],
-                order=100,
-                match_criteria={'destination_ip_addresses': '192.168.0.0/24'})
-            rules = routerlib.query_nat_rules(
-                self.fake_cluster, lrouter['uuid'])
-            # NoSNAT rules do not exist in V2
-            self.assertEqual(len(rules), expected)
-
-    def test_create_router_nosnat_rule_v2(self):
-        self._test_create_router_nosnat_rule('2.0', expected=0)
-
-    def test_create_router_nosnat_rule_v3(self):
-        self._test_create_router_nosnat_rule('3.0')
-
-    def _prepare_nat_rules_for_delete_tests(self):
-        lrouter = routerlib.create_lrouter(self.fake_cluster,
-                                           uuidutils.generate_uuid(),
-                                           'pippo',
-                                           'fake-lrouter',
-                                           '10.0.0.1')
-        # v2 or v3 makes no difference for this test
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: version_module.Version('2.0')):
-            routerlib.create_lrouter_snat_rule(
-                self.fake_cluster, lrouter['uuid'],
-                '10.0.0.2', '10.0.0.2', order=220,
-                match_criteria={'source_ip_addresses': '192.168.0.0/24'})
-            routerlib.create_lrouter_snat_rule(
-                self.fake_cluster, lrouter['uuid'],
-                '10.0.0.3', '10.0.0.3', order=200,
-                match_criteria={'source_ip_addresses': '192.168.0.2/32'})
-            routerlib.create_lrouter_dnat_rule(
-                self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
-                match_criteria={'destination_ip_addresses': '10.0.0.3'})
-        return lrouter
-
-    def test_delete_router_nat_rules_by_match_on_destination_ip(self):
-        lrouter = self._prepare_nat_rules_for_delete_tests()
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1,
-            destination_ip_addresses='10.0.0.3')
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 2)
-
-    def test_delete_router_nat_rules_by_match_on_source_ip(self):
-        lrouter = self._prepare_nat_rules_for_delete_tests()
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1,
-            source_ip_addresses='192.168.0.2/32')
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 2)
-
-    def test_delete_router_nat_rules_by_match_no_match_expected(self):
-        lrouter = self._prepare_nat_rules_for_delete_tests()
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0)
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0,
-            destination_ip_addresses='99.99.99.99')
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-
-    def test_delete_router_nat_rules_by_match_no_match_raises(self):
-        lrouter = self._prepare_nat_rules_for_delete_tests()
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        self.assertRaises(
-            nsx_exc.NatRuleMismatch,
-            routerlib.delete_nat_rules_by_match,
-            self.fake_cluster, lrouter['uuid'],
-            'SomeWeirdType', 1, 1)
-
-    def test_delete_nat_rules_by_match_len_mismatch_does_not_raise(self):
-        lrouter = self._prepare_nat_rules_for_delete_tests()
-        rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
-        self.assertEqual(len(rules), 3)
-        deleted_rules = routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'],
-            'DestinationNatRule',
-            max_num_expected=1, min_num_expected=1,
-            raise_on_len_mismatch=False,
-            destination_ip_addresses='99.99.99.99')
-        self.assertEqual(0, deleted_rules)
-        # add an extra rule to emulate a duplicate one
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'get_version',
-                               new=lambda: version_module.Version('2.0')):
-            routerlib.create_lrouter_snat_rule(
-                self.fake_cluster, lrouter['uuid'],
-                '10.0.0.2', '10.0.0.2', order=220,
-                match_criteria={'source_ip_addresses': '192.168.0.0/24'})
-        deleted_rules_2 = routerlib.delete_nat_rules_by_match(
-            self.fake_cluster, lrouter['uuid'], 'SourceNatRule',
-            min_num_expected=1, max_num_expected=1,
-            raise_on_len_mismatch=False,
-            source_ip_addresses='192.168.0.0/24')
-        self.assertEqual(2, deleted_rules_2)
diff --git a/neutron/tests/unit/vmware/nsxlib/test_secgroup.py b/neutron/tests/unit/vmware/nsxlib/test_secgroup.py
deleted file mode 100644 (file)
index fb2574f..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from neutron.common import exceptions
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit.vmware.nsxlib import base
-
-_uuid = test_api_v2._uuid
-
-
-class SecurityProfileTestCase(base.NsxlibTestCase):
-
-    def test_create_and_get_security_profile(self):
-        sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
-        sec_prof_res = nsxlib.do_request(
-            secgrouplib.HTTP_GET,
-            nsxlib._build_uri_path('security-profile',
-                                   resource_id=sec_prof['uuid']),
-            cluster=self.fake_cluster)
-        self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
-        # Check for builtin rules
-        self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 1)
-        self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2)
-
-    def test_create_and_get_default_security_profile(self):
-        sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, _uuid(), 'pippo', {'name': 'default'})
-        sec_prof_res = nsxlib.do_request(
-            secgrouplib.HTTP_GET,
-            nsxlib._build_uri_path('security-profile',
-                                   resource_id=sec_prof['uuid']),
-            cluster=self.fake_cluster)
-        self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
-        # Check for builtin rules
-        self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 3)
-        self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2)
-
-    def test_update_security_profile_raise_not_found(self):
-        self.assertRaises(exceptions.NotFound,
-                          secgrouplib.update_security_profile,
-                          self.fake_cluster,
-                          _uuid(), 'tatore_magno(the great)')
-
-    def test_update_security_profile(self):
-        tenant_id = 'foo_tenant_uuid'
-        secgroup_id = 'foo_secgroup_uuid'
-        old_sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, tenant_id, secgroup_id,
-            {'name': 'tatore_magno'})
-        new_sec_prof = secgrouplib.update_security_profile(
-            self.fake_cluster, old_sec_prof['uuid'], 'aaron_magno')
-        self.assertEqual('aaron_magno', new_sec_prof['display_name'])
-
-    def test_update_security_profile_rules(self):
-        sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
-        ingress_rule = {'ethertype': 'IPv4'}
-        egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'}
-        new_rules = {'logical_port_egress_rules': [egress_rule],
-                     'logical_port_ingress_rules': [ingress_rule]}
-        secgrouplib.update_security_group_rules(
-            self.fake_cluster, sec_prof['uuid'], new_rules)
-        sec_prof_res = nsxlib.do_request(
-            nsxlib.HTTP_GET,
-            nsxlib._build_uri_path('security-profile',
-                                   resource_id=sec_prof['uuid']),
-            cluster=self.fake_cluster)
-        self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
-        # Check for builtin rules
-        self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2)
-        self.assertIn(egress_rule,
-                      sec_prof_res['logical_port_egress_rules'])
-        self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
-        self.assertIn(ingress_rule,
-                      sec_prof_res['logical_port_ingress_rules'])
-
-    def test_update_security_profile_rules_noingress(self):
-        sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
-        hidden_ingress_rule = {'ethertype': 'IPv4',
-                               'ip_prefix': '127.0.0.1/32'}
-        egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'}
-        new_rules = {'logical_port_egress_rules': [egress_rule],
-                     'logical_port_ingress_rules': []}
-        secgrouplib.update_security_group_rules(
-            self.fake_cluster, sec_prof['uuid'], new_rules)
-        sec_prof_res = nsxlib.do_request(
-            nsxlib.HTTP_GET,
-            nsxlib._build_uri_path('security-profile',
-                                   resource_id=sec_prof['uuid']),
-            cluster=self.fake_cluster)
-        self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
-        # Check for builtin rules
-        self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2)
-        self.assertIn(egress_rule,
-                      sec_prof_res['logical_port_egress_rules'])
-        self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
-        self.assertIn(hidden_ingress_rule,
-                      sec_prof_res['logical_port_ingress_rules'])
-
-    def test_update_non_existing_securityprofile_raises(self):
-        self.assertRaises(exceptions.NeutronException,
-                          secgrouplib.update_security_group_rules,
-                          self.fake_cluster, 'whatever',
-                          {'logical_port_egress_rules': [],
-                           'logical_port_ingress_rules': []})
-
-    def test_delete_security_profile(self):
-        sec_prof = secgrouplib.create_security_profile(
-            self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
-        secgrouplib.delete_security_profile(
-            self.fake_cluster, sec_prof['uuid'])
-        self.assertRaises(exceptions.NotFound,
-                          nsxlib.do_request,
-                          nsxlib.HTTP_GET,
-                          nsxlib._build_uri_path(
-                              'security-profile',
-                              resource_id=sec_prof['uuid']),
-                          cluster=self.fake_cluster)
-
-    def test_delete_non_existing_securityprofile_raises(self):
-        self.assertRaises(exceptions.NeutronException,
-                          secgrouplib.delete_security_profile,
-                          self.fake_cluster, 'whatever')
diff --git a/neutron/tests/unit/vmware/nsxlib/test_switch.py b/neutron/tests/unit/vmware/nsxlib/test_switch.py
deleted file mode 100644 (file)
index db8c5af..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import hashlib
-import mock
-
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.nsxlib import switch as switchlib
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit.vmware.nsxlib import base
-
-_uuid = test_api_v2._uuid
-
-
-class LogicalSwitchesTestCase(base.NsxlibTestCase):
-
-    def test_create_and_get_lswitches_single(self):
-        tenant_id = 'pippo'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           _uuid(),
-                                           tenant_id,
-                                           'fake-switch',
-                                           transport_zones_config)
-        res_lswitch = switchlib.get_lswitches(self.fake_cluster,
-                                              lswitch['uuid'])
-        self.assertEqual(len(res_lswitch), 1)
-        self.assertEqual(res_lswitch[0]['uuid'],
-                         lswitch['uuid'])
-
-    def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
-        tenant_id = 'pippo'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           tenant_id,
-                                           _uuid(),
-                                           '*' * 50,
-                                           transport_zones_config)
-        res_lswitch = switchlib.get_lswitches(self.fake_cluster,
-                                              lswitch['uuid'])
-        self.assertEqual(len(res_lswitch), 1)
-        self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid'])
-        self.assertEqual(res_lswitch[0]['display_name'], '*' * 40)
-
-    def test_create_and_get_lswitches_multiple(self):
-        tenant_id = 'pippo'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        network_id = _uuid()
-        main_lswitch = switchlib.create_lswitch(
-            self.fake_cluster, network_id,
-            tenant_id, 'fake-switch', transport_zones_config,
-            tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
-        # Create secondary lswitch
-        second_lswitch = switchlib.create_lswitch(
-            self.fake_cluster, network_id,
-            tenant_id, 'fake-switch-2', transport_zones_config)
-        res_lswitch = switchlib.get_lswitches(self.fake_cluster,
-                                              network_id)
-        self.assertEqual(len(res_lswitch), 2)
-        switch_uuids = [ls['uuid'] for ls in res_lswitch]
-        self.assertIn(main_lswitch['uuid'], switch_uuids)
-        self.assertIn(second_lswitch['uuid'], switch_uuids)
-        for ls in res_lswitch:
-            if ls['uuid'] == main_lswitch['uuid']:
-                main_ls = ls
-            else:
-                second_ls = ls
-        main_ls_tags = self._build_tag_dict(main_ls['tags'])
-        second_ls_tags = self._build_tag_dict(second_ls['tags'])
-        self.assertIn('multi_lswitch', main_ls_tags)
-        self.assertNotIn('multi_lswitch', second_ls_tags)
-        self.assertIn('quantum_net_id', main_ls_tags)
-        self.assertIn('quantum_net_id', second_ls_tags)
-        self.assertEqual(main_ls_tags['quantum_net_id'],
-                         network_id)
-        self.assertEqual(second_ls_tags['quantum_net_id'],
-                         network_id)
-
-    def _test_update_lswitch(self, tenant_id, name, tags):
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           _uuid(),
-                                           'pippo',
-                                           'fake-switch',
-                                           transport_zones_config)
-        switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'],
-                                 name, tenant_id=tenant_id, tags=tags)
-        res_lswitch = switchlib.get_lswitches(self.fake_cluster,
-                                              lswitch['uuid'])
-        self.assertEqual(len(res_lswitch), 1)
-        self.assertEqual(res_lswitch[0]['display_name'], name)
-        if not tags:
-            # no need to validate tags
-            return
-        switch_tags = self._build_tag_dict(res_lswitch[0]['tags'])
-        for tag in tags:
-            self.assertIn(tag['scope'], switch_tags)
-            self.assertEqual(tag['tag'], switch_tags[tag['scope']])
-
-    def test_update_lswitch(self):
-        self._test_update_lswitch(None, 'new-name',
-                                  [{'scope': 'new_tag', 'tag': 'xxx'}])
-
-    def test_update_lswitch_no_tags(self):
-        self._test_update_lswitch(None, 'new-name', None)
-
-    def test_update_lswitch_tenant_id(self):
-        self._test_update_lswitch('whatever', 'new-name', None)
-
-    def test_update_non_existing_lswitch_raises(self):
-        self.assertRaises(exceptions.NetworkNotFound,
-                          switchlib.update_lswitch,
-                          self.fake_cluster, 'whatever',
-                          'foo', 'bar')
-
-    def test_delete_networks(self):
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           _uuid(),
-                                           'pippo',
-                                           'fake-switch',
-                                           transport_zones_config)
-        switchlib.delete_networks(self.fake_cluster, lswitch['uuid'],
-                                  [lswitch['uuid']])
-        self.assertRaises(exceptions.NotFound,
-                          switchlib.get_lswitches,
-                          self.fake_cluster,
-                          lswitch['uuid'])
-
-    def test_delete_non_existing_lswitch_raises(self):
-        self.assertRaises(exceptions.NetworkNotFound,
-                          switchlib.delete_networks,
-                          self.fake_cluster, 'whatever', ['whatever'])
-
-
-class LogicalPortsTestCase(base.NsxlibTestCase):
-
-    def _create_switch_and_port(self, tenant_id='pippo',
-                                neutron_port_id='whatever',
-                                name='name', device_id='device_id'):
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(self.fake_cluster,
-                                           _uuid(), tenant_id, 'fake-switch',
-                                           transport_zones_config)
-        lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
-                                       tenant_id, neutron_port_id,
-                                       name, device_id, True)
-        return lswitch, lport
-
-    def test_create_and_get_port(self):
-        lswitch, lport = self._create_switch_and_port()
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'])
-        self.assertEqual(lport['uuid'], lport_res['uuid'])
-        # Try again with relation
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'],
-                                       relations='LogicalPortStatus')
-        self.assertEqual(lport['uuid'], lport_res['uuid'])
-
-    def test_plug_interface(self):
-        lswitch, lport = self._create_switch_and_port()
-        switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'],
-                                     lport['uuid'], 'VifAttachment', 'fake')
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'])
-        self.assertEqual(lport['uuid'], lport_res['uuid'])
-
-    def test_get_port_by_tag(self):
-        lswitch, lport = self._create_switch_and_port()
-        lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster,
-                                                   lswitch['uuid'],
-                                                   'whatever')
-        self.assertIsNotNone(lport2)
-        self.assertEqual(lport['uuid'], lport2['uuid'])
-
-    def test_get_port_by_tag_not_found_with_switch_id_raises_not_found(self):
-        tenant_id = 'pippo'
-        neutron_port_id = 'whatever'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        lswitch = switchlib.create_lswitch(
-            self.fake_cluster, tenant_id, _uuid(),
-            'fake-switch', transport_zones_config)
-        self.assertRaises(exceptions.NotFound,
-                          switchlib.get_port_by_neutron_tag,
-                          self.fake_cluster, lswitch['uuid'],
-                          neutron_port_id)
-
-    def test_get_port_by_tag_not_find_wildcard_lswitch_returns_none(self):
-        tenant_id = 'pippo'
-        neutron_port_id = 'whatever'
-        transport_zones_config = [{'zone_uuid': _uuid(),
-                                   'transport_type': 'stt'}]
-        switchlib.create_lswitch(
-            self.fake_cluster, tenant_id, _uuid(),
-            'fake-switch', transport_zones_config)
-        lport = switchlib.get_port_by_neutron_tag(
-            self.fake_cluster, '*', neutron_port_id)
-        self.assertIsNone(lport)
-
-    def test_get_port_status(self):
-        lswitch, lport = self._create_switch_and_port()
-        status = switchlib.get_port_status(
-            self.fake_cluster, lswitch['uuid'], lport['uuid'])
-        self.assertEqual(constants.PORT_STATUS_ACTIVE, status)
-
-    def test_get_port_status_non_existent_raises(self):
-        self.assertRaises(exceptions.PortNotFoundOnNetwork,
-                          switchlib.get_port_status,
-                          self.fake_cluster,
-                          'boo', 'boo')
-
-    def test_update_port(self):
-        lswitch, lport = self._create_switch_and_port()
-        switchlib.update_port(
-            self.fake_cluster, lswitch['uuid'], lport['uuid'],
-            'neutron_port_id', 'pippo2', 'new_name', 'device_id', False)
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'])
-        self.assertEqual(lport['uuid'], lport_res['uuid'])
-        self.assertEqual('new_name', lport_res['display_name'])
-        self.assertEqual('False', lport_res['admin_status_enabled'])
-        port_tags = self._build_tag_dict(lport_res['tags'])
-        self.assertIn('os_tid', port_tags)
-        self.assertIn('q_port_id', port_tags)
-        self.assertIn('vm_id', port_tags)
-
-    def test_create_port_device_id_less_than_40_chars(self):
-        lswitch, lport = self._create_switch_and_port()
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'])
-        port_tags = self._build_tag_dict(lport_res['tags'])
-        self.assertEqual('device_id', port_tags['vm_id'])
-
-    def test_create_port_device_id_more_than_40_chars(self):
-        dev_id = "this_is_a_very_long_device_id_with_lots_of_characters"
-        lswitch, lport = self._create_switch_and_port(device_id=dev_id)
-        lport_res = switchlib.get_port(self.fake_cluster,
-                                       lswitch['uuid'], lport['uuid'])
-        port_tags = self._build_tag_dict(lport_res['tags'])
-        self.assertNotEqual(len(dev_id), len(port_tags['vm_id']))
-
-    def test_get_ports_with_obsolete_and_new_vm_id_tag(self):
-        def obsolete(device_id, obfuscate=False):
-            return hashlib.sha1(device_id).hexdigest()
-
-        with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete):
-            dev_id1 = "short-dev-id-1"
-            _, lport1 = self._create_switch_and_port(device_id=dev_id1)
-        dev_id2 = "short-dev-id-2"
-        _, lport2 = self._create_switch_and_port(device_id=dev_id2)
-
-        lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1])
-        port_tags = self._build_tag_dict(lports['whatever']['tags'])
-        self.assertNotEqual(dev_id1, port_tags['vm_id'])
-
-        lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2])
-        port_tags = self._build_tag_dict(lports['whatever']['tags'])
-        self.assertEqual(dev_id2, port_tags['vm_id'])
-
-    def test_update_non_existent_port_raises(self):
-        self.assertRaises(exceptions.PortNotFoundOnNetwork,
-                          switchlib.update_port, self.fake_cluster,
-                          'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False)
-
-    def test_delete_port(self):
-        lswitch, lport = self._create_switch_and_port()
-        switchlib.delete_port(self.fake_cluster,
-                              lswitch['uuid'], lport['uuid'])
-        self.assertRaises(exceptions.PortNotFoundOnNetwork,
-                          switchlib.get_port, self.fake_cluster,
-                          lswitch['uuid'], lport['uuid'])
-
-    def test_delete_non_existent_port_raises(self):
-        lswitch = self._create_switch_and_port()[0]
-        self.assertRaises(exceptions.PortNotFoundOnNetwork,
-                          switchlib.delete_port, self.fake_cluster,
-                          lswitch['uuid'], 'bad_port_uuid')
-
-    def test_query_lswitch_ports(self):
-        lswitch, lport = self._create_switch_and_port()
-        switch_port_uuids = [
-            switchlib.create_lport(
-                self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
-                'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
-            for k in range(2)]
-        switch_port_uuids.append(lport['uuid'])
-        ports = switchlib.query_lswitch_lports(
-            self.fake_cluster, lswitch['uuid'])
-        self.assertEqual(len(ports), 3)
-        for res_port in ports:
-            self.assertIn(res_port['uuid'], switch_port_uuids)
diff --git a/neutron/tests/unit/vmware/nsxlib/test_versioning.py b/neutron/tests/unit/vmware/nsxlib/test_versioning.py
deleted file mode 100644 (file)
index a50f942..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version as version_module
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import versioning
-from neutron.tests import base
-
-
-class TestVersioning(base.BaseTestCase):
-
-    def test_function_handling_missing_minor(self):
-        version = version_module.Version('2.0')
-        function = versioning.get_function_by_version(
-            routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
-        self.assertEqual(routerlib.create_implicit_routing_lrouter,
-                         function)
-
-    def test_function_handling_with_both_major_and_minor(self):
-        version = version_module.Version('3.2')
-        function = versioning.get_function_by_version(
-            routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
-        self.assertEqual(routerlib.create_explicit_routing_lrouter,
-                         function)
-
-    def test_function_handling_with_newer_major(self):
-        version = version_module.Version('5.2')
-        function = versioning.get_function_by_version(
-            routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
-        self.assertEqual(routerlib.create_explicit_routing_lrouter,
-                         function)
-
-    def test_function_handling_with_obsolete_major(self):
-        version = version_module.Version('1.2')
-        self.assertRaises(NotImplementedError,
-                          versioning.get_function_by_version,
-                          routerlib.ROUTER_FUNC_DICT,
-                          'create_lrouter', version)
-
-    def test_function_handling_with_unknown_version(self):
-        self.assertRaises(exception.ServiceUnavailable,
-                          versioning.get_function_by_version,
-                          routerlib.ROUTER_FUNC_DICT,
-                          'create_lrouter', None)
diff --git a/neutron/tests/unit/vmware/test_agent_scheduler.py b/neutron/tests/unit/vmware/test_agent_scheduler.py
deleted file mode 100644 (file)
index e84639a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import mock
-from oslo_config import cfg
-
-from neutron.common import constants
-from neutron.common import test_lib
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.dhcp_meta import rpc
-from neutron.tests.unit.openvswitch import test_agent_scheduler as test_base
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-
-class DhcpAgentNotifierTestCase(test_base.OvsDhcpAgentNotifierTestCase):
-    plugin_str = vmware.PLUGIN_NAME
-
-    def setUp(self):
-        test_lib.test_config['config_files'] = [
-            vmware.get_fake_conf('nsx.ini.full.test')]
-
-        # mock api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsx_api = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsx_api.start()
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-
-        # Emulate tests against NSX 2.x
-        instance.return_value.get_version.return_value = "2.999"
-        instance.return_value.request.side_effect = self.fc.fake_request
-        super(DhcpAgentNotifierTestCase, self).setUp()
-        self.addCleanup(self.fc.reset_all)
-        self.addCleanup(patch_sync.stop)
-        self.addCleanup(self.mock_nsx_api.stop)
-
-    def _test_gateway_subnet_notification(self, gateway='10.0.0.1'):
-        cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NSX')
-        hosts = ['hosta']
-        with mock.patch.object(rpc.LOG, 'info') as mock_log:
-            net, subnet, port = self._network_port_create(
-                hosts, gateway=gateway, owner=constants.DEVICE_OWNER_DHCP)
-            self.assertEqual(subnet['subnet']['gateway_ip'], gateway)
-            called = 1 if gateway is None else 0
-            self.assertEqual(called, mock_log.call_count)
-
-    def test_gatewayless_subnet_notification(self):
-        self._test_gateway_subnet_notification(gateway=None)
-
-    def test_subnet_with_gateway_notification(self):
-        self._test_gateway_subnet_notification()
diff --git a/neutron/tests/unit/vmware/test_dhcpmeta.py b/neutron/tests/unit/vmware/test_dhcpmeta.py
deleted file mode 100644 (file)
index 77367dd..0000000
+++ /dev/null
@@ -1,1427 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from oslo_config import cfg
-
-from neutron.common import constants as n_consts
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dbexts import lsn_db
-from neutron.plugins.vmware.dhcp_meta import constants
-from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
-from neutron.plugins.vmware.dhcp_meta import migration as mig_man
-from neutron.plugins.vmware.dhcp_meta import nsx
-from neutron.plugins.vmware.dhcp_meta import rpc
-from neutron.tests import base
-from neutron.tests.unit import testlib_api
-
-
-class DhcpMetadataBuilderTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(DhcpMetadataBuilderTestCase, self).setUp()
-        self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock())
-        self.network_id = 'foo_network_id'
-        self.subnet_id = 'foo_subnet_id'
-        self.router_id = 'foo_router_id'
-
-    def test_dhcp_agent_get_all(self):
-        expected = []
-        self.builder.plugin.list_dhcp_agents_hosting_network.return_value = (
-            {'agents': expected})
-        agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id)
-        self.assertEqual(expected, agents)
-
-    def test_dhcp_port_get_all(self):
-        expected = []
-        self.builder.plugin.get_ports.return_value = expected
-        ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id)
-        self.assertEqual(expected, ports)
-
-    def test_router_id_get(self):
-        port = {
-            'device_id': self.router_id,
-            'network_id': self.network_id,
-            'fixed_ips': [{'subnet_id': self.subnet_id}]
-        }
-        subnet = {
-            'id': self.subnet_id,
-            'network_id': self.network_id
-        }
-        self.builder.plugin.get_ports.return_value = [port]
-        result = self.builder.router_id_get(context, subnet)
-        self.assertEqual(self.router_id, result)
-
-    def test_router_id_get_none_subnet(self):
-        self.assertIsNone(self.builder.router_id_get(mock.ANY, None))
-
-    def test_router_id_get_none_no_router(self):
-        self.builder.plugin.get_ports.return_value = []
-        subnet = {'network_id': self.network_id}
-        self.assertIsNone(self.builder.router_id_get(mock.ANY, subnet))
-
-    def test_metadata_deallocate(self):
-        self.builder.metadata_deallocate(
-            mock.ANY, self.router_id, self.subnet_id)
-        self.assertTrue(self.builder.plugin.remove_router_interface.call_count)
-
-    def test_metadata_allocate(self):
-        self.builder.metadata_allocate(
-            mock.ANY, self.router_id, self.subnet_id)
-        self.assertTrue(self.builder.plugin.add_router_interface.call_count)
-
-    def test_dhcp_deallocate(self):
-        agents = [{'id': 'foo_agent_id'}]
-        ports = [{'id': 'foo_port_id'}]
-        self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports)
-        self.assertTrue(
-            self.builder.plugin.remove_network_from_dhcp_agent.call_count)
-        self.assertTrue(self.builder.plugin.delete_port.call_count)
-
-    def _test_dhcp_allocate(self, subnet, expected_notify_count):
-        with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f:
-            self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet)
-            self.assertTrue(f.call_count)
-            self.assertEqual(expected_notify_count,
-                             self.builder.notifier.notify.call_count)
-
-    def test_dhcp_allocate(self):
-        subnet = {'network_id': self.network_id, 'id': self.subnet_id}
-        self._test_dhcp_allocate(subnet, 2)
-
-    def test_dhcp_allocate_none_subnet(self):
-        self._test_dhcp_allocate(None, 0)
-
-
-class MigrationManagerTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(MigrationManagerTestCase, self).setUp()
-        self.manager = mig_man.MigrationManager(mock.Mock(),
-                                                mock.Mock(),
-                                                mock.Mock())
-        self.network_id = 'foo_network_id'
-        self.router_id = 'foo_router_id'
-        self.subnet_id = 'foo_subnet_id'
-        self.mock_builder_p = mock.patch.object(self.manager, 'builder')
-        self.mock_builder = self.mock_builder_p.start()
-
-    def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None):
-        network = {'router:external': ext_net}
-        self.manager.manager.lsn_exists.return_value = lsn_exists
-        self.manager.plugin.get_network.return_value = network
-        self.manager.plugin.get_subnets.return_value = subnets
-        result = self.manager.validate(mock.ANY, self.network_id)
-        if len(subnets):
-            self.assertEqual(subnets[0], result)
-        else:
-            self.assertIsNone(result)
-
-    def test_validate_no_subnets(self):
-        self._test_validate(subnets=[])
-
-    def test_validate_with_one_subnet(self):
-        self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}])
-
-    def test_validate_raise_conflict_many_subnets(self):
-        self.assertRaises(p_exc.LsnMigrationConflict,
-                          self._test_validate,
-                          subnets=[{'id': 'sub1'}, {'id': 'sub2'}])
-
-    def test_validate_raise_conflict_lsn_exists(self):
-        self.assertRaises(p_exc.LsnMigrationConflict,
-                          self._test_validate,
-                          lsn_exists=True)
-
-    def test_validate_raise_badrequest_external_net(self):
-        self.assertRaises(n_exc.BadRequest,
-                          self._test_validate,
-                          ext_net=True)
-
-    def test_validate_raise_badrequest_metadata_net(self):
-        self.assertRaises(n_exc.BadRequest,
-                          self._test_validate,
-                          ext_net=False,
-                          subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}])
-
-    def _test_migrate(self, router, subnet, expected_calls):
-        self.mock_builder.router_id_get.return_value = router
-        self.manager.migrate(mock.ANY, self.network_id, subnet)
-        # testing the exact the order of calls is important
-        self.assertEqual(expected_calls, self.mock_builder.mock_calls)
-
-    def test_migrate(self):
-        subnet = {
-            'id': self.subnet_id,
-            'network_id': self.network_id
-        }
-        call_sequence = [
-            mock.call.router_id_get(mock.ANY, subnet),
-            mock.call.metadata_deallocate(
-                mock.ANY, self.router_id, self.subnet_id),
-            mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
-            mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
-            mock.call.dhcp_deallocate(
-                mock.ANY, self.network_id, mock.ANY, mock.ANY),
-            mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
-            mock.call.metadata_allocate(
-                mock.ANY, self.router_id, self.subnet_id)
-        ]
-        self._test_migrate(self.router_id, subnet, call_sequence)
-
-    def test_migrate_no_router_uplink(self):
-        subnet = {
-            'id': self.subnet_id,
-            'network_id': self.network_id
-        }
-        call_sequence = [
-            mock.call.router_id_get(mock.ANY, subnet),
-            mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
-            mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
-            mock.call.dhcp_deallocate(
-                mock.ANY, self.network_id, mock.ANY, mock.ANY),
-            mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
-        ]
-        self._test_migrate(None, subnet, call_sequence)
-
-    def test_migrate_no_subnet(self):
-        call_sequence = [
-            mock.call.router_id_get(mock.ANY, None),
-            mock.call.dhcp_allocate(mock.ANY, self.network_id, None),
-        ]
-        self._test_migrate(None, None, call_sequence)
-
-    def _test_report(self, lsn_attrs, expected):
-        self.manager.manager.lsn_port_get.return_value = lsn_attrs
-        report = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
-        self.assertEqual(expected, report)
-
-    def test_report_for_lsn(self):
-        self._test_report(('foo_lsn_id', 'foo_lsn_port_id'),
-                          {'ports': ['foo_lsn_port_id'],
-                           'services': ['foo_lsn_id'], 'type': 'lsn'})
-
-    def test_report_for_lsn_without_lsn_port(self):
-        self._test_report(('foo_lsn_id', None),
-                          {'ports': [],
-                           'services': ['foo_lsn_id'], 'type': 'lsn'})
-
-    def _test_report_for_lsn_without_subnet(self, validated_subnet):
-        with mock.patch.object(self.manager.plugin, 'get_subnets',
-                               return_value=validated_subnet):
-            self.manager.manager.lsn_port_get.return_value = (
-                ('foo_lsn_id', 'foo_lsn_port_id'))
-            report = self.manager.report(context, self.network_id)
-            expected = {
-                'ports': ['foo_lsn_port_id'] if validated_subnet else [],
-                'services': ['foo_lsn_id'], 'type': 'lsn'
-            }
-            self.assertEqual(expected, report)
-
-    def test_report_for_lsn_without_subnet_subnet_found(self):
-        self._test_report_for_lsn_without_subnet([{'id': self.subnet_id}])
-
-    def test_report_for_lsn_without_subnet_subnet_not_found(self):
-        self.manager.manager.lsn_get.return_value = 'foo_lsn_id'
-        self._test_report_for_lsn_without_subnet(None)
-
-    def test_report_for_dhcp_agent(self):
-        self.manager.manager.lsn_port_get.return_value = (None, None)
-        self.mock_builder.dhcp_agent_get_all.return_value = (
-            [{'id': 'foo_agent_id'}])
-        self.mock_builder.dhcp_port_get_all.return_value = (
-            [{'id': 'foo_dhcp_port_id'}])
-        result = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
-        expected = {
-            'ports': ['foo_dhcp_port_id'],
-            'services': ['foo_agent_id'],
-            'type': 'agent'
-        }
-        self.assertEqual(expected, result)
-
-
-class LsnManagerTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(LsnManagerTestCase, self).setUp()
-        self.net_id = 'foo_network_id'
-        self.sub_id = 'foo_subnet_id'
-        self.port_id = 'foo_port_id'
-        self.lsn_id = 'foo_lsn_id'
-        self.mac = 'aa:bb:cc:dd:ee:ff'
-        self.switch_id = 'foo_switch_id'
-        self.lsn_port_id = 'foo_lsn_port_id'
-        self.tenant_id = 'foo_tenant_id'
-        self.manager = lsn_man.LsnManager(mock.Mock())
-        self.context = context.get_admin_context()
-        self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
-        self.mock_lsn_api = self.mock_lsn_api_p.start()
-        self.mock_nsx_utils_p = mock.patch.object(lsn_man, 'nsx_utils')
-        self.mock_nsx_utils = self.mock_nsx_utils_p.start()
-        nsx.register_dhcp_opts(cfg)
-        nsx.register_metadata_opts(cfg)
-
-    def test_lsn_get(self):
-        self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
-        expected = self.manager.lsn_get(mock.ANY, self.net_id)
-        self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
-            mock.ANY, self.net_id)
-        self.assertEqual(expected, self.lsn_id)
-
-    def _test_lsn_get_raise_not_found_with_exc(self, exc):
-        self.mock_lsn_api.lsn_for_network_get.side_effect = exc
-        self.assertRaises(p_exc.LsnNotFound,
-                          self.manager.lsn_get,
-                          mock.ANY, self.net_id)
-        self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
-            mock.ANY, self.net_id)
-
-    def test_lsn_get_raise_not_found_with_not_found(self):
-        self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound)
-
-    def test_lsn_get_raise_not_found_with_api_error(self):
-        self._test_lsn_get_raise_not_found_with_exc(exception.NsxApiException)
-
-    def _test_lsn_get_silent_raise_with_exc(self, exc):
-        self.mock_lsn_api.lsn_for_network_get.side_effect = exc
-        expected = self.manager.lsn_get(
-            mock.ANY, self.net_id, raise_on_err=False)
-        self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
-            mock.ANY, self.net_id)
-        self.assertIsNone(expected)
-
-    def test_lsn_get_silent_raise_with_not_found(self):
-        self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound)
-
-    def test_lsn_get_silent_raise_with_api_error(self):
-        self._test_lsn_get_silent_raise_with_exc(exception.NsxApiException)
-
-    def test_lsn_create(self):
-        self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
-        self.manager.lsn_create(mock.ANY, self.net_id)
-        self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
-            mock.ANY, self.net_id)
-
-    def test_lsn_create_raise_api_error(self):
-        self.mock_lsn_api.lsn_for_network_create.side_effect = (
-            exception.NsxApiException)
-        self.assertRaises(p_exc.NsxPluginException,
-                          self.manager.lsn_create,
-                          mock.ANY, self.net_id)
-        self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
-            mock.ANY, self.net_id)
-
-    def test_lsn_delete(self):
-        self.manager.lsn_delete(mock.ANY, self.lsn_id)
-        self.mock_lsn_api.lsn_delete.assert_called_once_with(
-            mock.ANY, self.lsn_id)
-
-    def _test_lsn_delete_with_exc(self, exc):
-        self.mock_lsn_api.lsn_delete.side_effect = exc
-        self.manager.lsn_delete(mock.ANY, self.lsn_id)
-        self.mock_lsn_api.lsn_delete.assert_called_once_with(
-            mock.ANY, self.lsn_id)
-
-    def test_lsn_delete_with_not_found(self):
-        self._test_lsn_delete_with_exc(n_exc.NotFound)
-
-    def test_lsn_delete_api_exception(self):
-        self._test_lsn_delete_with_exc(exception.NsxApiException)
-
-    def test_lsn_delete_by_network(self):
-        self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
-        with mock.patch.object(self.manager, 'lsn_delete') as f:
-            self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
-            self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
-                mock.ANY, self.net_id)
-            f.assert_called_once_with(mock.ANY, self.lsn_id)
-
-    def _test_lsn_delete_by_network_with_exc(self, exc):
-        self.mock_lsn_api.lsn_for_network_get.side_effect = exc
-        with mock.patch.object(lsn_man.LOG, 'warn') as l:
-            self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
-            self.assertEqual(1, l.call_count)
-
-    def test_lsn_delete_by_network_with_not_found(self):
-        self._test_lsn_delete_by_network_with_exc(n_exc.NotFound)
-
-    def test_lsn_delete_by_network_with_not_api_error(self):
-        self._test_lsn_delete_by_network_with_exc(exception.NsxApiException)
-
-    def test_lsn_port_get(self):
-        self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
-            self.lsn_port_id)
-        with mock.patch.object(
-            self.manager, 'lsn_get', return_value=self.lsn_id):
-            expected = self.manager.lsn_port_get(
-                mock.ANY, self.net_id, self.sub_id)
-            self.assertEqual(expected, (self.lsn_id, self.lsn_port_id))
-
-    def test_lsn_port_get_lsn_not_found_on_raise(self):
-        with mock.patch.object(
-            self.manager, 'lsn_get',
-            side_effect=p_exc.LsnNotFound(entity='network',
-                                          entity_id=self.net_id)):
-            self.assertRaises(p_exc.LsnNotFound,
-                              self.manager.lsn_port_get,
-                              mock.ANY, self.net_id, self.sub_id)
-
-    def test_lsn_port_get_lsn_not_found_silent_raise(self):
-        with mock.patch.object(self.manager, 'lsn_get', return_value=None):
-            expected = self.manager.lsn_port_get(
-                mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
-            self.assertEqual(expected, (None, None))
-
-    def test_lsn_port_get_port_not_found_on_raise(self):
-        self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
-        with mock.patch.object(
-            self.manager, 'lsn_get', return_value=self.lsn_id):
-            self.assertRaises(p_exc.LsnPortNotFound,
-                              self.manager.lsn_port_get,
-                              mock.ANY, self.net_id, self.sub_id)
-
-    def test_lsn_port_get_port_not_found_silent_raise(self):
-        self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
-        with mock.patch.object(
-            self.manager, 'lsn_get', return_value=self.lsn_id):
-            expected = self.manager.lsn_port_get(
-                mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
-            self.assertEqual(expected, (self.lsn_id, None))
-
-    def test_lsn_port_create(self):
-        self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
-        expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY)
-        self.assertEqual(expected, self.lsn_port_id)
-
-    def _test_lsn_port_create_with_exc(self, exc, expected):
-        self.mock_lsn_api.lsn_port_create.side_effect = exc
-        self.assertRaises(expected,
-                          self.manager.lsn_port_create,
-                          mock.ANY, mock.ANY, mock.ANY)
-
-    def test_lsn_port_create_with_not_found(self):
-        self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound)
-
-    def test_lsn_port_create_api_exception(self):
-        self._test_lsn_port_create_with_exc(exception.NsxApiException,
-                                            p_exc.NsxPluginException)
-
-    def test_lsn_port_delete(self):
-        self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
-        self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
-
-    def _test_lsn_port_delete_with_exc(self, exc):
-        self.mock_lsn_api.lsn_port_delete.side_effect = exc
-        with mock.patch.object(lsn_man.LOG, 'warn') as l:
-            self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
-            self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
-            self.assertEqual(1, l.call_count)
-
-    def test_lsn_port_delete_with_not_found(self):
-        self._test_lsn_port_delete_with_exc(n_exc.NotFound)
-
-    def test_lsn_port_delete_api_exception(self):
-        self._test_lsn_port_delete_with_exc(exception.NsxApiException)
-
-    def _test_lsn_port_dhcp_setup(self, ret_val, sub):
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
-        with mock.patch.object(
-            self.manager, 'lsn_get', return_value=self.lsn_id):
-            with mock.patch.object(lsn_man.switch_api,
-                                   'get_port_by_neutron_tag'):
-                expected = self.manager.lsn_port_dhcp_setup(
-                    mock.Mock(), mock.ANY, mock.ANY,
-                    mock.ANY, subnet_config=sub)
-                self.assertEqual(
-                    1, self.mock_lsn_api.lsn_port_create.call_count)
-                self.assertEqual(
-                    1, self.mock_lsn_api.lsn_port_plug_network.call_count)
-                self.assertEqual(expected, ret_val)
-
-    def test_lsn_port_dhcp_setup(self):
-        self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None)
-
-    def test_lsn_port_dhcp_setup_with_config(self):
-        with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f:
-            self._test_lsn_port_dhcp_setup(None, mock.ANY)
-            self.assertEqual(1, f.call_count)
-
-    def test_lsn_port_dhcp_setup_with_not_found(self):
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        with mock.patch.object(lsn_man.switch_api,
-                               'get_port_by_neutron_tag') as f:
-            f.side_effect = n_exc.NotFound
-            self.assertRaises(p_exc.PortConfigurationError,
-                              self.manager.lsn_port_dhcp_setup,
-                              mock.Mock(), mock.ANY, mock.ANY, mock.ANY)
-
-    def test_lsn_port_dhcp_setup_with_conflict(self):
-        self.mock_lsn_api.lsn_port_plug_network.side_effect = (
-            p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'):
-            with mock.patch.object(self.manager, 'lsn_port_delete') as g:
-                self.assertRaises(p_exc.PortConfigurationError,
-                                  self.manager.lsn_port_dhcp_setup,
-                                  mock.Mock(), mock.ANY, mock.ANY, mock.ANY)
-                self.assertEqual(1, g.call_count)
-
-    def _test_lsn_port_dhcp_configure_with_subnet(
-        self, expected, dns=None, gw=None, routes=None):
-        subnet = {
-            'enable_dhcp': True,
-            'dns_nameservers': dns or [],
-            'gateway_ip': gw,
-            'host_routes': routes
-        }
-        self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id,
-                                             self.lsn_port_id, subnet)
-        self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with(
-            mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'],
-            expected)
-
-    def test_lsn_port_dhcp_configure(self):
-        expected = {
-            'routers': '127.0.0.1',
-            'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
-            'domain_name': cfg.CONF.NSX_DHCP.domain_name
-        }
-        self._test_lsn_port_dhcp_configure_with_subnet(
-            expected, dns=[], gw='127.0.0.1', routes=[])
-
-    def test_lsn_port_dhcp_configure_gatewayless(self):
-        expected = {
-            'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
-            'domain_name': cfg.CONF.NSX_DHCP.domain_name
-        }
-        self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None)
-
-    def test_lsn_port_dhcp_configure_with_extra_dns_servers(self):
-        expected = {
-            'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
-            'domain_name_servers': '8.8.8.8,9.9.9.9',
-            'domain_name': cfg.CONF.NSX_DHCP.domain_name
-        }
-        self._test_lsn_port_dhcp_configure_with_subnet(
-            expected, dns=['8.8.8.8', '9.9.9.9'])
-
-    def test_lsn_port_dhcp_configure_with_host_routes(self):
-        expected = {
-            'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
-            'domain_name': cfg.CONF.NSX_DHCP.domain_name,
-            'classless_static_routes': '8.8.8.8,9.9.9.9'
-        }
-        self._test_lsn_port_dhcp_configure_with_subnet(
-            expected, routes=['8.8.8.8', '9.9.9.9'])
-
-    def _test_lsn_metadata_configure(self, is_enabled):
-        with mock.patch.object(self.manager, 'lsn_port_dispose') as f:
-            self.manager.plugin.get_subnet.return_value = (
-                {'network_id': self.net_id})
-            self.manager.lsn_metadata_configure(mock.ANY,
-                                                self.sub_id, is_enabled)
-            expected = {
-                'metadata_server_port': 8775,
-                'metadata_server_ip': '127.0.0.1',
-                'metadata_proxy_shared_secret': ''
-            }
-            self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with(
-                mock.ANY, mock.ANY, is_enabled, expected)
-            if is_enabled:
-                self.assertEqual(
-                    1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
-            else:
-                self.assertEqual(1, f.call_count)
-
-    def test_lsn_metadata_configure_enabled(self):
-        self._test_lsn_metadata_configure(True)
-
-    def test_lsn_metadata_configure_disabled(self):
-        self._test_lsn_metadata_configure(False)
-
-    def test_lsn_metadata_configure_not_found(self):
-        self.mock_lsn_api.lsn_metadata_configure.side_effect = (
-            p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id))
-        self.manager.plugin.get_subnet.return_value = (
-            {'network_id': self.net_id})
-        self.assertRaises(p_exc.NsxPluginException,
-                          self.manager.lsn_metadata_configure,
-                          mock.ANY, self.sub_id, True)
-
-    def test_lsn_port_metadata_setup(self):
-        subnet = {
-            'cidr': '0.0.0.0/0',
-            'id': self.sub_id,
-            'network_id': self.net_id,
-            'tenant_id': self.tenant_id
-        }
-        expected_data = {
-            'subnet_id': subnet['id'],
-            'ip_address': subnet['cidr'],
-            'mac_address': constants.METADATA_MAC
-        }
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
-            with mock.patch.object(self.manager, 'lsn_port_create') as g:
-                f.return_value = {'uuid': self.port_id}
-                self.manager.lsn_port_metadata_setup(
-                    self.context, self.lsn_id, subnet)
-                (self.mock_lsn_api.lsn_port_plug_network.
-                 assert_called_once_with(mock.ANY, self.lsn_id,
-                                         mock.ANY, self.port_id))
-                g.assert_called_once_with(
-                    self.context, self.lsn_id, expected_data)
-
-    def test_lsn_port_metadata_setup_raise_not_found(self):
-        subnet = {
-            'cidr': '0.0.0.0/0',
-            'id': self.sub_id,
-            'network_id': self.net_id,
-            'tenant_id': self.tenant_id
-        }
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
-            f.side_effect = n_exc.NotFound
-            self.assertRaises(p_exc.PortConfigurationError,
-                              self.manager.lsn_port_metadata_setup,
-                              mock.Mock(), self.lsn_id, subnet)
-
-    def test_lsn_port_metadata_setup_raise_conflict(self):
-        subnet = {
-            'cidr': '0.0.0.0/0',
-            'id': self.sub_id,
-            'network_id': self.net_id,
-            'tenant_id': self.tenant_id
-        }
-        self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
-        with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
-            with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
-                f.return_value = {'uuid': self.port_id}
-                self.mock_lsn_api.lsn_port_plug_network.side_effect = (
-                    p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
-                self.assertRaises(p_exc.PortConfigurationError,
-                                  self.manager.lsn_port_metadata_setup,
-                                  mock.Mock(), self.lsn_id, subnet)
-                self.assertEqual(1,
-                                 self.mock_lsn_api.lsn_port_delete.call_count)
-                self.assertEqual(1, g.call_count)
-
-    def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count):
-        with mock.patch.object(self.manager,
-                               'lsn_port_get_by_mac',
-                               return_value=(lsn_id, lsn_port_id)):
-            self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
-            self.assertEqual(count,
-                             self.mock_lsn_api.lsn_port_delete.call_count)
-
-    def test_lsn_port_dispose(self):
-        self._test_lsn_port_dispose_with_values(
-            self.lsn_id, self.lsn_port_id, 1)
-
-    def test_lsn_port_dispose_meta_mac(self):
-        self.mac = constants.METADATA_MAC
-        with mock.patch.object(lsn_man.switch_api,
-                               'get_port_by_neutron_tag') as f:
-            with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
-                f.return_value = {'uuid': self.port_id}
-                self._test_lsn_port_dispose_with_values(
-                    self.lsn_id, self.lsn_port_id, 1)
-                f.assert_called_once_with(
-                    mock.ANY, self.net_id, constants.METADATA_PORT_ID)
-                g.assert_called_once_with(mock.ANY, self.net_id, self.port_id)
-
-    def test_lsn_port_dispose_lsn_not_found(self):
-        self._test_lsn_port_dispose_with_values(None, None, 0)
-
-    def test_lsn_port_dispose_lsn_port_not_found(self):
-        self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0)
-
-    def test_lsn_port_dispose_api_error(self):
-        self.mock_lsn_api.lsn_port_delete.side_effect = (
-            exception.NsxApiException)
-        with mock.patch.object(lsn_man.LOG, 'warn') as l:
-            self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
-            self.assertEqual(1, l.call_count)
-
-    def test_lsn_port_host_conf(self):
-        with mock.patch.object(self.manager,
-                               'lsn_port_get',
-                               return_value=(self.lsn_id, self.lsn_port_id)):
-            f = mock.Mock()
-            self.manager._lsn_port_host_conf(mock.ANY, self.net_id,
-                                             self.sub_id, mock.ANY, f)
-            self.assertEqual(1, f.call_count)
-
-    def test_lsn_port_host_conf_lsn_port_not_found(self):
-        with mock.patch.object(
-            self.manager, 'lsn_port_get', return_value=(None, None)) as f:
-            self.manager._lsn_port_host_conf(
-                mock.ANY, self.net_id, self.sub_id, mock.ANY, mock.Mock())
-            self.assertEqual(1, f.call_count)
-
-    def _test_lsn_port_update(self, dhcp=None, meta=None):
-        self.manager.lsn_port_update(
-            mock.ANY, self.net_id, self.sub_id, dhcp, meta)
-        count = 1 if dhcp else 0
-        count = count + 1 if meta else count
-        self.assertEqual(count, (self.mock_lsn_api.
-                                 lsn_port_host_entries_update.call_count))
-
-    def test_lsn_port_update(self):
-        self._test_lsn_port_update()
-
-    def test_lsn_port_update_dhcp_meta(self):
-        self._test_lsn_port_update(mock.ANY, mock.ANY)
-
-    def test_lsn_port_update_dhcp_and_nometa(self):
-        self._test_lsn_port_update(mock.ANY, None)
-
-    def test_lsn_port_update_nodhcp_and_nmeta(self):
-        self._test_lsn_port_update(None, mock.ANY)
-
-    def test_lsn_port_update_raise_error(self):
-        self.mock_lsn_api.lsn_port_host_entries_update.side_effect = (
-            exception.NsxApiException)
-        self.assertRaises(p_exc.PortConfigurationError,
-                          self.manager.lsn_port_update,
-                          mock.ANY, mock.ANY, mock.ANY, mock.ANY)
-
-
-class PersistentLsnManagerTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        super(PersistentLsnManagerTestCase, self).setUp()
-        self.net_id = 'foo_network_id'
-        self.sub_id = 'foo_subnet_id'
-        self.port_id = 'foo_port_id'
-        self.lsn_id = 'foo_lsn_id'
-        self.mac = 'aa:bb:cc:dd:ee:ff'
-        self.lsn_port_id = 'foo_lsn_port_id'
-        self.tenant_id = 'foo_tenant_id'
-        nsx.register_dhcp_opts(cfg)
-        nsx.register_metadata_opts(cfg)
-        lsn_man.register_lsn_opts(cfg)
-        self.manager = lsn_man.PersistentLsnManager(mock.Mock())
-        self.context = context.get_admin_context()
-        self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
-        self.mock_lsn_api = self.mock_lsn_api_p.start()
-
-    def test_lsn_get(self):
-        lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
-        result = self.manager.lsn_get(self.context, self.net_id)
-        self.assertEqual(self.lsn_id, result)
-
-    def test_lsn_get_raise_not_found(self):
-        self.assertRaises(p_exc.LsnNotFound,
-                          self.manager.lsn_get, self.context, self.net_id)
-
-    def test_lsn_get_silent_not_found(self):
-        result = self.manager.lsn_get(
-            self.context, self.net_id, raise_on_err=False)
-        self.assertIsNone(result)
-
-    def test_lsn_get_sync_on_missing(self):
-        cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
-        self.manager = lsn_man.PersistentLsnManager(mock.Mock())
-        with mock.patch.object(self.manager, 'lsn_save') as f:
-            self.manager.lsn_get(self.context, self.net_id, raise_on_err=True)
-            self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count)
-            self.assertTrue(f.call_count)
-
-    def test_lsn_save(self):
-        self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
-        result = self.manager.lsn_get(self.context, self.net_id)
-        self.assertEqual(self.lsn_id, result)
-
-    def test_lsn_create(self):
-        self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
-        with mock.patch.object(self.manager, 'lsn_save') as f:
-            result = self.manager.lsn_create(self.context, self.net_id)
-            self.assertTrue(
-                self.mock_lsn_api.lsn_for_network_create.call_count)
-            self.assertTrue(f.call_count)
-            self.assertEqual(self.lsn_id, result)
-
-    def test_lsn_create_failure(self):
-        with mock.patch.object(
-            self.manager, 'lsn_save',
-            side_effect=p_exc.NsxPluginException(err_msg='')):
-            self.assertRaises(p_exc.NsxPluginException,
-                              self.manager.lsn_create,
-                              self.context, self.net_id)
-            self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
-
-    def test_lsn_delete(self):
-        self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
-        self.manager.lsn_create(self.context, self.net_id)
-        self.manager.lsn_delete(self.context, self.lsn_id)
-        self.assertIsNone(self.manager.lsn_get(
-            self.context, self.net_id, raise_on_err=False))
-
-    def test_lsn_delete_not_existent(self):
-        self.manager.lsn_delete(self.context, self.lsn_id)
-        self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
-
-    def test_lsn_port_get(self):
-        lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
-                                    self.sub_id, self.mac, self.lsn_id)
-        res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id)
-        self.assertEqual((self.lsn_id, self.lsn_port_id), res)
-
-    def test_lsn_port_get_raise_not_found(self):
-        self.assertRaises(p_exc.LsnPortNotFound,
-                          self.manager.lsn_port_get,
-                          self.context, self.net_id, self.sub_id)
-
-    def test_lsn_port_get_silent_not_found(self):
-        result = self.manager.lsn_port_get(
-            self.context, self.net_id, self.sub_id, raise_on_err=False)
-        self.assertEqual((None, None), result)
-
-    def test_lsn_port_get_sync_on_missing(self):
-        return
-        cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
-        self.manager = lsn_man.PersistentLsnManager(mock.Mock())
-        self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
-        self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
-            self.lsn_id, self.lsn_port_id)
-        with mock.patch.object(self.manager, 'lsn_save') as f:
-            with mock.patch.object(self.manager, 'lsn_port_save') as g:
-                self.manager.lsn_port_get(
-                    self.context, self.net_id, self.sub_id)
-                self.assertTrue(
-                    self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
-                self.assertTrue(
-                    self.mock_lsn_api.lsn_port_info_get.call_count)
-                self.assertTrue(f.call_count)
-                self.assertTrue(g.call_count)
-
-    def test_lsn_port_get_by_mac(self):
-        lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
-                                    self.sub_id, self.mac, self.lsn_id)
-        res = self.manager.lsn_port_get_by_mac(
-            self.context, self.net_id, self.mac)
-        self.assertEqual((self.lsn_id, self.lsn_port_id), res)
-
-    def test_lsn_port_get_by_mac_raise_not_found(self):
-        self.assertRaises(p_exc.LsnPortNotFound,
-                          self.manager.lsn_port_get_by_mac,
-                          self.context, self.net_id, self.sub_id)
-
-    def test_lsn_port_get_by_mac_silent_not_found(self):
-        result = self.manager.lsn_port_get_by_mac(
-            self.context, self.net_id, self.sub_id, raise_on_err=False)
-        self.assertEqual((None, None), result)
-
-    def test_lsn_port_create(self):
-        lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
-        self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
-        subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
-        with mock.patch.object(self.manager, 'lsn_port_save') as f:
-            result = self.manager.lsn_port_create(
-                self.context, self.net_id, subnet)
-            self.assertTrue(
-                self.mock_lsn_api.lsn_port_create.call_count)
-            self.assertTrue(f.call_count)
-            self.assertEqual(self.lsn_port_id, result)
-
-    def test_lsn_port_create_failure(self):
-        subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
-        with mock.patch.object(
-            self.manager, 'lsn_port_save',
-            side_effect=p_exc.NsxPluginException(err_msg='')):
-            self.assertRaises(p_exc.NsxPluginException,
-                              self.manager.lsn_port_create,
-                              self.context, self.net_id, subnet)
-            self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
-
-    def test_lsn_port_delete(self):
-        lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
-        lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
-                                    self.sub_id, self.mac, self.lsn_id)
-        self.manager.lsn_port_delete(
-            self.context, self.lsn_id, self.lsn_port_id)
-        self.assertEqual((None, None), self.manager.lsn_port_get(
-            self.context, self.lsn_id, self.sub_id, raise_on_err=False))
-
-    def test_lsn_port_delete_not_existent(self):
-        self.manager.lsn_port_delete(
-            self.context, self.lsn_id, self.lsn_port_id)
-        self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
-
-    def test_lsn_port_save(self):
-        self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
-        self.manager.lsn_port_save(self.context, self.lsn_port_id,
-                                   self.sub_id, self.mac, self.lsn_id)
-        result = self.manager.lsn_port_get(
-            self.context, self.net_id, self.sub_id, raise_on_err=False)
-        self.assertEqual((self.lsn_id, self.lsn_port_id), result)
-
-
-class DhcpAgentNotifyAPITestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(DhcpAgentNotifyAPITestCase, self).setUp()
-        self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock())
-        self.plugin = self.notifier.plugin
-        self.lsn_manager = self.notifier.lsn_manager
-
-    def _test_notify_port_update(
-        self, ports, expected_count, expected_args=None):
-        port = {
-            'id': 'foo_port_id',
-            'network_id': 'foo_network_id',
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
-        }
-        self.notifier.plugin.get_ports.return_value = ports
-        self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end')
-        self.lsn_manager.lsn_port_update.assert_has_calls(expected_args)
-
-    def test_notify_ports_update_no_ports(self):
-        self._test_notify_port_update(None, 0, [])
-        self._test_notify_port_update([], 0, [])
-
-    def test_notify_ports_update_one_port(self):
-        ports = [{
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                           'ip_address': '1.2.3.4'}],
-            'device_id': 'foo_device_id',
-            'device_owner': 'foo_device_owner',
-            'mac_address': 'fa:16:3e:da:1d:46'
-        }]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id',
-            dhcp=[{'ip_address': '1.2.3.4',
-                   'mac_address': 'fa:16:3e:da:1d:46'}],
-            meta=[{'instance_id': 'foo_device_id',
-                   'ip_address': '1.2.3.4'}])
-        self._test_notify_port_update(ports, 1, call_args)
-
-    def test_notify_ports_update_ports_with_empty_device_id(self):
-        ports = [{
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                           'ip_address': '1.2.3.4'}],
-            'device_id': '',
-            'device_owner': 'foo_device_owner',
-            'mac_address': 'fa:16:3e:da:1d:46'
-        }]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id',
-            dhcp=[{'ip_address': '1.2.3.4',
-                   'mac_address': 'fa:16:3e:da:1d:46'}],
-            meta=[]
-        )
-        self._test_notify_port_update(ports, 1, call_args)
-
-    def test_notify_ports_update_ports_with_no_fixed_ips(self):
-        ports = [{
-            'fixed_ips': [],
-            'device_id': 'foo_device_id',
-            'device_owner': 'foo_device_owner',
-            'mac_address': 'fa:16:3e:da:1d:46'
-        }]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
-        self._test_notify_port_update(ports, 1, call_args)
-
-    def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self):
-        ports = [{
-            'fixed_ips': [],
-            'device_id': '',
-            'device_owner': 'foo_device_owner',
-            'mac_address': 'fa:16:3e:da:1d:46'
-        }]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
-        self._test_notify_port_update(ports, 0, call_args)
-
-    def test_notify_ports_update_with_special_ports(self):
-        ports = [{'fixed_ips': [],
-                  'device_id': '',
-                  'device_owner': n_consts.DEVICE_OWNER_DHCP,
-                  'mac_address': 'fa:16:3e:da:1d:46'},
-                 {'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                                 'ip_address': '1.2.3.4'}],
-                  'device_id': 'foo_device_id',
-                  'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW,
-                  'mac_address': 'fa:16:3e:da:1d:46'}]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
-        self._test_notify_port_update(ports, 0, call_args)
-
-    def test_notify_ports_update_many_ports(self):
-        ports = [{'fixed_ips': [],
-                  'device_id': '',
-                  'device_owner': 'foo_device_owner',
-                  'mac_address': 'fa:16:3e:da:1d:46'},
-                 {'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                                 'ip_address': '1.2.3.4'}],
-                  'device_id': 'foo_device_id',
-                  'device_owner': 'foo_device_owner',
-                  'mac_address': 'fa:16:3e:da:1d:46'}]
-        call_args = mock.call(
-            mock.ANY, 'foo_network_id', 'foo_subnet_id',
-            dhcp=[{'ip_address': '1.2.3.4',
-                   'mac_address': 'fa:16:3e:da:1d:46'}],
-            meta=[{'instance_id': 'foo_device_id',
-                   'ip_address': '1.2.3.4'}])
-        self._test_notify_port_update(ports, 1, call_args)
-
-    def _test_notify_subnet_action(self, action):
-        with mock.patch.object(self.notifier, '_subnet_%s' % action) as f:
-            self.notifier._handle_subnet_dhcp_access[action] = f
-            subnet = {'subnet': mock.ANY}
-            self.notifier.notify(
-                mock.ANY, subnet, 'subnet.%s.end' % action)
-            f.assert_called_once_with(mock.ANY, subnet)
-
-    def test_notify_subnet_create(self):
-        self._test_notify_subnet_action('create')
-
-    def test_notify_subnet_update(self):
-        self._test_notify_subnet_action('update')
-
-    def test_notify_subnet_delete(self):
-        self._test_notify_subnet_action('delete')
-
-    def _test_subnet_create(self, enable_dhcp, exc=None,
-                            exc_obj=None, call_notify=True):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'enable_dhcp': enable_dhcp,
-            'network_id': 'foo_network_id',
-            'tenant_id': 'foo_tenant_id',
-            'cidr': '0.0.0.0/0'
-        }
-        if exc:
-            self.plugin.create_port.side_effect = exc_obj or exc
-            self.assertRaises(exc,
-                              self.notifier.notify,
-                              mock.ANY,
-                              {'subnet': subnet},
-                              'subnet.create.end')
-            self.plugin.delete_subnet.assert_called_with(
-                mock.ANY, subnet['id'])
-        else:
-            if call_notify:
-                self.notifier.notify(
-                    mock.ANY, {'subnet': subnet}, 'subnet.create.end')
-            if enable_dhcp:
-                dhcp_port = {
-                    'name': '',
-                    'admin_state_up': True,
-                    'network_id': 'foo_network_id',
-                    'tenant_id': 'foo_tenant_id',
-                    'device_owner': n_consts.DEVICE_OWNER_DHCP,
-                    'mac_address': mock.ANY,
-                    'fixed_ips': [{'subnet_id': 'foo_subnet_id'}],
-                    'device_id': ''
-                }
-                self.plugin.create_port.assert_called_once_with(
-                    mock.ANY, {'port': dhcp_port})
-            else:
-                self.assertEqual(0, self.plugin.create_port.call_count)
-
-    def test_subnet_create_enabled_dhcp(self):
-        self._test_subnet_create(True)
-
-    def test_subnet_create_disabled_dhcp(self):
-        self._test_subnet_create(False)
-
-    def test_subnet_create_raise_port_config_error(self):
-        with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
-                               'delete_port') as d:
-            self._test_subnet_create(
-                True,
-                exc=n_exc.Conflict,
-                exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
-                                                     net_id='foo_net_id',
-                                                     port_id='foo_port_id'))
-            d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id')
-
-    def test_subnet_update(self):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'network_id': 'foo_network_id',
-        }
-        self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id',
-                                                      'foo_lsn_port_id')
-        self.notifier.notify(
-            mock.ANY, {'subnet': subnet}, 'subnet.update.end')
-        self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with(
-            mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet)
-
-    def test_subnet_update_raise_lsn_not_found(self):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'network_id': 'foo_network_id',
-        }
-        self.lsn_manager.lsn_port_get.side_effect = (
-            p_exc.LsnNotFound(entity='network',
-                              entity_id=subnet['network_id']))
-        self.assertRaises(p_exc.LsnNotFound,
-                          self.notifier.notify,
-                          mock.ANY, {'subnet': subnet}, 'subnet.update.end')
-
-    def _test_subnet_update_lsn_port_not_found(self, dhcp_port):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'enable_dhcp': True,
-            'network_id': 'foo_network_id',
-            'tenant_id': 'foo_tenant_id'
-        }
-        self.lsn_manager.lsn_port_get.side_effect = (
-            p_exc.LsnPortNotFound(lsn_id='foo_lsn_id',
-                                  entity='subnet',
-                                  entity_id=subnet['id']))
-        self.notifier.plugin.get_ports.return_value = dhcp_port
-        count = 0 if dhcp_port is None else 1
-        with mock.patch.object(nsx, 'handle_port_dhcp_access') as h:
-            self.notifier.notify(
-                mock.ANY, {'subnet': subnet}, 'subnet.update.end')
-            self.assertEqual(count, h.call_count)
-            if not dhcp_port:
-                self._test_subnet_create(enable_dhcp=True,
-                                         exc=None, call_notify=False)
-
-    def test_subnet_update_lsn_port_not_found_without_dhcp_port(self):
-        self._test_subnet_update_lsn_port_not_found(None)
-
-    def test_subnet_update_lsn_port_not_found_with_dhcp_port(self):
-        self._test_subnet_update_lsn_port_not_found([mock.ANY])
-
-    def _test_subnet_delete(self, ports=None):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'network_id': 'foo_network_id',
-            'cidr': '0.0.0.0/0'
-        }
-        self.plugin.get_ports.return_value = ports
-        self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end')
-        filters = {
-            'network_id': [subnet['network_id']],
-            'device_owner': [n_consts.DEVICE_OWNER_DHCP]
-        }
-        self.plugin.get_ports.assert_called_once_with(
-            mock.ANY, filters=filters)
-        if ports:
-            self.plugin.delete_port.assert_called_once_with(
-                mock.ANY, ports[0]['id'])
-        else:
-            self.assertEqual(0, self.plugin.delete_port.call_count)
-
-    def test_subnet_delete_enabled_dhcp_no_ports(self):
-        self._test_subnet_delete()
-
-    def test_subnet_delete_enabled_dhcp_with_dhcp_port(self):
-        self._test_subnet_delete([{'id': 'foo_port_id'}])
-
-
-class DhcpTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(DhcpTestCase, self).setUp()
-        self.plugin = mock.Mock()
-        self.plugin.lsn_manager = mock.Mock()
-
-    def test_handle_create_network(self):
-        network = {'id': 'foo_network_id'}
-        nsx.handle_network_dhcp_access(
-            self.plugin, mock.ANY, network, 'create_network')
-        self.plugin.lsn_manager.lsn_create.assert_called_once_with(
-            mock.ANY, network['id'])
-
-    def test_handle_create_network_router_external(self):
-        network = {'id': 'foo_network_id', 'router:external': True}
-        nsx.handle_network_dhcp_access(
-            self.plugin, mock.ANY, network, 'create_network')
-        self.assertFalse(self.plugin.lsn_manager.lsn_create.call_count)
-
-    def test_handle_delete_network(self):
-        network_id = 'foo_network_id'
-        self.plugin.lsn_manager.lsn_delete_by_network.return_value = (
-            'foo_lsn_id')
-        nsx.handle_network_dhcp_access(
-            self.plugin, mock.ANY, network_id, 'delete_network')
-        self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with(
-            mock.ANY, 'foo_network_id')
-
-    def _test_handle_create_dhcp_owner_port(self, exc=None):
-        subnet = {
-            'cidr': '0.0.0.0/0',
-            'id': 'foo_subnet_id'
-        }
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': n_consts.DEVICE_OWNER_DHCP,
-            'mac_address': 'aa:bb:cc:dd:ee:ff',
-            'network_id': 'foo_network_id',
-            'fixed_ips': [{'subnet_id': subnet['id']}]
-        }
-        expected_data = {
-            'subnet_id': subnet['id'],
-            'ip_address': subnet['cidr'],
-            'mac_address': port['mac_address']
-        }
-        self.plugin.get_subnet.return_value = subnet
-        if exc is None:
-            nsx.handle_port_dhcp_access(
-                self.plugin, mock.ANY, port, 'create_port')
-            (self.plugin.lsn_manager.lsn_port_dhcp_setup.
-             assert_called_once_with(mock.ANY, port['network_id'],
-                                     port['id'], expected_data, subnet))
-        else:
-            self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc
-            self.assertRaises(n_exc.NeutronException,
-                              nsx.handle_port_dhcp_access,
-                              self.plugin, mock.ANY, port, 'create_port')
-
-    def test_handle_create_dhcp_owner_port(self):
-        self._test_handle_create_dhcp_owner_port()
-
-    def test_handle_create_dhcp_owner_port_raise_port_config_error(self):
-        config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
-                                                    net_id='foo_net_id',
-                                                    port_id='foo_port_id')
-        self._test_handle_create_dhcp_owner_port(exc=config_error)
-
-    def test_handle_delete_dhcp_owner_port(self):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': n_consts.DEVICE_OWNER_DHCP,
-            'network_id': 'foo_network_id',
-            'fixed_ips': [],
-            'mac_address': 'aa:bb:cc:dd:ee:ff'
-        }
-        nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port')
-        self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with(
-            mock.ANY, port['network_id'], port['mac_address'])
-
-    def _test_handle_user_port(self, action, handler):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': 'foo_device_owner',
-            'network_id': 'foo_network_id',
-            'mac_address': 'aa:bb:cc:dd:ee:ff',
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                           'ip_address': '1.2.3.4'}]
-        }
-        expected_data = {
-            'ip_address': '1.2.3.4',
-            'mac_address': 'aa:bb:cc:dd:ee:ff'
-        }
-        self.plugin.get_subnet.return_value = {'enable_dhcp': True}
-        nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
-        handler.assert_called_once_with(
-            mock.ANY, port['network_id'], 'foo_subnet_id', expected_data)
-
-    def test_handle_create_user_port(self):
-        self._test_handle_user_port(
-            'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
-
-    def test_handle_delete_user_port(self):
-        self._test_handle_user_port(
-            'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
-
-    def _test_handle_user_port_disabled_dhcp(self, action, handler):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': 'foo_device_owner',
-            'network_id': 'foo_network_id',
-            'mac_address': 'aa:bb:cc:dd:ee:ff',
-            'fixed_ips': [{'subnet_id': 'foo_subnet_id',
-                           'ip_address': '1.2.3.4'}]
-        }
-        self.plugin.get_subnet.return_value = {'enable_dhcp': False}
-        nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
-        self.assertEqual(0, handler.call_count)
-
-    def test_handle_create_user_port_disabled_dhcp(self):
-        self._test_handle_user_port_disabled_dhcp(
-            'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
-
-    def test_handle_delete_user_port_disabled_dhcp(self):
-        self._test_handle_user_port_disabled_dhcp(
-            'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
-
-    def _test_handle_user_port_no_fixed_ips(self, action, handler):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': 'foo_device_owner',
-            'network_id': 'foo_network_id',
-            'fixed_ips': []
-        }
-        nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
-        self.assertEqual(0, handler.call_count)
-
-    def test_handle_create_user_port_no_fixed_ips(self):
-        self._test_handle_user_port_no_fixed_ips(
-            'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
-
-    def test_handle_delete_user_port_no_fixed_ips(self):
-        self._test_handle_user_port_no_fixed_ips(
-            'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
-
-
-class MetadataTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(MetadataTestCase, self).setUp()
-        self.plugin = mock.Mock()
-        self.plugin.lsn_manager = mock.Mock()
-
-    def _test_handle_port_metadata_access_special_owners(
-        self, owner, dev_id='foo_device_id', ips=None):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': owner,
-            'device_id': dev_id,
-            'fixed_ips': ips or []
-        }
-        nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
-        self.assertFalse(
-            self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
-        self.assertFalse(
-            self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
-
-    def test_handle_port_metadata_access_external_network(self):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': 'foo_device_owner',
-            'device_id': 'foo_device_id',
-            'network_id': 'foo_network_id',
-            'fixed_ips': [{'subnet_id': 'foo_subnet'}]
-        }
-        self.plugin.get_network.return_value = {'router:external': True}
-        nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
-        self.assertFalse(
-            self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
-        self.assertFalse(
-            self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
-
-    def test_handle_port_metadata_access_dhcp_port(self):
-        self._test_handle_port_metadata_access_special_owners(
-            n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}])
-
-    def test_handle_port_metadata_access_router_port(self):
-        self._test_handle_port_metadata_access_special_owners(
-            n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}])
-
-    def test_handle_port_metadata_access_no_device_id(self):
-        self._test_handle_port_metadata_access_special_owners(
-            n_consts.DEVICE_OWNER_DHCP, '')
-
-    def test_handle_port_metadata_access_no_fixed_ips(self):
-        self._test_handle_port_metadata_access_special_owners(
-            'foo', 'foo', None)
-
-    def _test_handle_port_metadata_access(self, is_delete, raise_exc=False):
-        port = {
-            'id': 'foo_port_id',
-            'device_owner': 'foo_device_id',
-            'network_id': 'foo_network_id',
-            'device_id': 'foo_device_id',
-            'tenant_id': 'foo_tenant_id',
-            'fixed_ips': [
-                {'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}
-            ]
-        }
-        meta = {
-            'instance_id': port['device_id'],
-            'tenant_id': port['tenant_id'],
-            'ip_address': port['fixed_ips'][0]['ip_address']
-        }
-        self.plugin.get_network.return_value = {'router:external': False}
-        if is_delete:
-            mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove
-        else:
-            mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add
-        if raise_exc:
-            mock_func.side_effect = p_exc.PortConfigurationError(
-                lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None)
-            with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
-                                   'delete_port') as d:
-                self.assertRaises(p_exc.PortConfigurationError,
-                                  nsx.handle_port_metadata_access,
-                                  self.plugin, mock.ANY, port,
-                                  is_delete=is_delete)
-                if not is_delete:
-                    d.assert_called_once_with(mock.ANY, mock.ANY, port['id'])
-                else:
-                    self.assertFalse(d.call_count)
-        else:
-            nsx.handle_port_metadata_access(
-                self.plugin, mock.ANY, port, is_delete=is_delete)
-        mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta)
-
-    def test_handle_port_metadata_access_on_delete_true(self):
-        self._test_handle_port_metadata_access(True)
-
-    def test_handle_port_metadata_access_on_delete_false(self):
-        self._test_handle_port_metadata_access(False)
-
-    def test_handle_port_metadata_access_on_delete_true_raise(self):
-        self._test_handle_port_metadata_access(True, raise_exc=True)
-
-    def test_handle_port_metadata_access_on_delete_false_raise(self):
-        self._test_handle_port_metadata_access(False, raise_exc=True)
-
-    def _test_handle_router_metadata_access(
-        self, is_port_found, raise_exc=False):
-        subnet = {
-            'id': 'foo_subnet_id',
-            'network_id': 'foo_network_id'
-        }
-        interface = {
-            'subnet_id': subnet['id'],
-            'port_id': 'foo_port_id'
-        }
-        mock_func = self.plugin.lsn_manager.lsn_metadata_configure
-        if not is_port_found:
-            self.plugin.get_port.side_effect = n_exc.NotFound
-        if raise_exc:
-            with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin,
-                                   'remove_router_interface') as d:
-                mock_func.side_effect = p_exc.NsxPluginException(err_msg='')
-                self.assertRaises(p_exc.NsxPluginException,
-                                  nsx.handle_router_metadata_access,
-                                  self.plugin, mock.ANY, 'foo_router_id',
-                                  interface)
-                d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id',
-                                          interface)
-        else:
-            nsx.handle_router_metadata_access(
-                self.plugin, mock.ANY, 'foo_router_id', interface)
-            mock_func.assert_called_once_with(
-                mock.ANY, subnet['id'], is_port_found)
-
-    def test_handle_router_metadata_access_add_interface(self):
-        self._test_handle_router_metadata_access(True)
-
-    def test_handle_router_metadata_access_delete_interface(self):
-        self._test_handle_router_metadata_access(False)
-
-    def test_handle_router_metadata_access_raise_error_on_add(self):
-        self._test_handle_router_metadata_access(True, raise_exc=True)
-
-    def test_handle_router_metadata_access_raise_error_on_delete(self):
-        self._test_handle_router_metadata_access(True, raise_exc=False)
diff --git a/neutron/tests/unit/vmware/test_nsx_opts.py b/neutron/tests/unit/vmware/test_nsx_opts.py
deleted file mode 100644 (file)
index 0ea2b97..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import fixtures
-
-import mock
-from oslo_config import cfg
-
-from neutron import manager
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import client
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import config  # noqa
-from neutron.plugins.vmware.common import exceptions
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware import nsx_cluster
-from neutron.plugins.vmware.nsxlib import lsn as lsnlib
-from neutron.tests import base
-from neutron.tests.unit import vmware
-
-BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test')
-NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.basic.test')
-NSX_INI_FULL_PATH = vmware.get_fake_conf('nsx.ini.full.test')
-NSX_INI_AGENTLESS_PATH = vmware.get_fake_conf('nsx.ini.agentless.test')
-NSX_INI_COMBINED_PATH = vmware.get_fake_conf('nsx.ini.combined.test')
-NVP_INI_DEPR_PATH = vmware.get_fake_conf('nvp.ini.full.test')
-
-
-class NSXClusterTest(base.BaseTestCase):
-
-    cluster_opts = {'default_tz_uuid': uuidutils.generate_uuid(),
-                    'default_l2_gw_service_uuid': uuidutils.generate_uuid(),
-                    'default_l2_gw_service_uuid': uuidutils.generate_uuid(),
-                    'nsx_user': 'foo',
-                    'nsx_password': 'bar',
-                    'http_timeout': 25,
-                    'retries': 7,
-                    'redirects': 23,
-                    'default_interface_name': 'baz',
-                    'nsx_controllers': ['1.1.1.1:443']}
-
-    def test_create_cluster(self):
-        cluster = nsx_cluster.NSXCluster(**self.cluster_opts)
-        for (k, v) in self.cluster_opts.iteritems():
-            self.assertEqual(v, getattr(cluster, k))
-
-    def test_create_cluster_default_port(self):
-        opts = self.cluster_opts.copy()
-        opts['nsx_controllers'] = ['1.1.1.1']
-        cluster = nsx_cluster.NSXCluster(**opts)
-        for (k, v) in self.cluster_opts.iteritems():
-            self.assertEqual(v, getattr(cluster, k))
-
-    def test_create_cluster_missing_required_attribute_raises(self):
-        opts = self.cluster_opts.copy()
-        opts.pop('default_tz_uuid')
-        self.assertRaises(exceptions.InvalidClusterConfiguration,
-                          nsx_cluster.NSXCluster, **opts)
-
-
-class ConfigurationTest(base.BaseTestCase):
-
-    def setUp(self):
-        super(ConfigurationTest, self).setUp()
-        self.useFixture(fixtures.MonkeyPatch(
-                        'neutron.manager.NeutronManager._instance',
-                        None))
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-        dhcp_periodic_p = mock.patch('neutron.db.agentschedulers_db.'
-                                     'DhcpAgentSchedulerDbMixin.'
-                                     'start_periodic_dhcp_agent_status_check')
-        dhcp_periodic_p.start()
-
-    def _assert_required_options(self, cluster):
-        self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443'])
-        self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
-        self.assertEqual(cluster.nsx_user, 'foo')
-        self.assertEqual(cluster.nsx_password, 'bar')
-
-    def _assert_extra_options(self, cluster):
-        self.assertEqual(13, cluster.http_timeout)
-        self.assertEqual(12, cluster.redirects)
-        self.assertEqual(11, cluster.retries)
-        self.assertEqual('whatever', cluster.default_l2_gw_service_uuid)
-        self.assertEqual('whatever', cluster.default_l3_gw_service_uuid)
-        self.assertEqual('whatever', cluster.default_interface_name)
-
-    def test_load_plugin_with_full_options(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_FULL_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        plugin = manager.NeutronManager().get_plugin()
-        cluster = plugin.cluster
-        self._assert_required_options(cluster)
-        self._assert_extra_options(cluster)
-
-    def test_load_plugin_with_required_options_only(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        plugin = manager.NeutronManager().get_plugin()
-        self._assert_required_options(plugin.cluster)
-
-    def test_defaults(self):
-        self.assertEqual(5000, cfg.CONF.NSX.max_lp_per_bridged_ls)
-        self.assertEqual(256, cfg.CONF.NSX.max_lp_per_overlay_ls)
-        self.assertEqual(10, cfg.CONF.NSX.concurrent_connections)
-        self.assertEqual('access_network', cfg.CONF.NSX.metadata_mode)
-        self.assertEqual('stt', cfg.CONF.NSX.default_transport_type)
-        self.assertEqual('service', cfg.CONF.NSX.replication_mode)
-
-        self.assertIsNone(cfg.CONF.default_tz_uuid)
-        self.assertEqual('admin', cfg.CONF.nsx_user)
-        self.assertEqual('admin', cfg.CONF.nsx_password)
-        self.assertEqual(75, cfg.CONF.http_timeout)
-        self.assertEqual(2, cfg.CONF.retries)
-        self.assertEqual(2, cfg.CONF.redirects)
-        self.assertIsNone(cfg.CONF.nsx_controllers)
-        self.assertIsNone(cfg.CONF.default_l3_gw_service_uuid)
-        self.assertIsNone(cfg.CONF.default_l2_gw_service_uuid)
-        self.assertEqual('breth0', cfg.CONF.default_interface_name)
-        self.assertEqual(900, cfg.CONF.conn_idle_timeout)
-
-    def test_load_api_extensions(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_FULL_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        # Load the configuration, and initialize the plugin
-        manager.NeutronManager().get_plugin()
-        self.assertIn('extensions', cfg.CONF.api_extensions_path)
-
-    def test_agentless_extensions(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_AGENTLESS_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        self.assertEqual(config.AgentModes.AGENTLESS,
-                         cfg.CONF.NSX.agent_mode)
-        # The version returned from NSX does not really matter here
-        with mock.patch.object(client.NsxApiClient,
-                               'get_version',
-                               return_value=version.Version("9.9")):
-            with mock.patch.object(lsnlib,
-                                   'service_cluster_exists',
-                                   return_value=True):
-                plugin = manager.NeutronManager().get_plugin()
-                self.assertNotIn('agent',
-                                 plugin.supported_extension_aliases)
-                self.assertNotIn('dhcp_agent_scheduler',
-                                 plugin.supported_extension_aliases)
-                self.assertNotIn('lsn',
-                                 plugin.supported_extension_aliases)
-
-    def test_agentless_extensions_version_fail(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_AGENTLESS_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        self.assertEqual(config.AgentModes.AGENTLESS,
-                         cfg.CONF.NSX.agent_mode)
-        with mock.patch.object(client.NsxApiClient,
-                               'get_version',
-                               return_value=version.Version("3.2")):
-            self.assertRaises(exceptions.NsxPluginException,
-                              manager.NeutronManager)
-
-    def test_agentless_extensions_unmet_deps_fail(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_AGENTLESS_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        self.assertEqual(config.AgentModes.AGENTLESS,
-                         cfg.CONF.NSX.agent_mode)
-        with mock.patch.object(client.NsxApiClient,
-                               'get_version',
-                               return_value=version.Version("3.2")):
-            with mock.patch.object(lsnlib,
-                                   'service_cluster_exists',
-                                   return_value=False):
-                self.assertRaises(exceptions.NsxPluginException,
-                                  manager.NeutronManager)
-
-    def test_agent_extensions(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_FULL_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        self.assertEqual(config.AgentModes.AGENT,
-                         cfg.CONF.NSX.agent_mode)
-        plugin = manager.NeutronManager().get_plugin()
-        self.assertIn('agent',
-                      plugin.supported_extension_aliases)
-        self.assertIn('dhcp_agent_scheduler',
-                      plugin.supported_extension_aliases)
-
-    def test_combined_extensions(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NSX_INI_COMBINED_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        self.assertEqual(config.AgentModes.COMBINED,
-                         cfg.CONF.NSX.agent_mode)
-        with mock.patch.object(client.NsxApiClient,
-                               'get_version',
-                               return_value=version.Version("4.2")):
-            with mock.patch.object(lsnlib,
-                                   'service_cluster_exists',
-                                   return_value=True):
-                plugin = manager.NeutronManager().get_plugin()
-                self.assertIn('agent',
-                              plugin.supported_extension_aliases)
-                self.assertIn('dhcp_agent_scheduler',
-                              plugin.supported_extension_aliases)
-                self.assertIn('lsn',
-                              plugin.supported_extension_aliases)
-
-
-class OldNVPConfigurationTest(base.BaseTestCase):
-
-    def setUp(self):
-        super(OldNVPConfigurationTest, self).setUp()
-        self.useFixture(fixtures.MonkeyPatch(
-                        'neutron.manager.NeutronManager._instance',
-                        None))
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-        dhcp_periodic_p = mock.patch('neutron.db.agentschedulers_db.'
-                                     'DhcpAgentSchedulerDbMixin.'
-                                     'start_periodic_dhcp_agent_status_check')
-        dhcp_periodic_p.start()
-
-    def _assert_required_options(self, cluster):
-        self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443'])
-        self.assertEqual(cluster.nsx_user, 'foo')
-        self.assertEqual(cluster.nsx_password, 'bar')
-        self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
-
-    def test_load_plugin_with_deprecated_options(self):
-        self.config_parse(args=['--config-file', BASE_CONF_PATH,
-                                '--config-file', NVP_INI_DEPR_PATH])
-        cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
-        plugin = manager.NeutronManager().get_plugin()
-        cluster = plugin.cluster
-        # Verify old nvp_* params have been fully parsed
-        self._assert_required_options(cluster)
-        self.assertEqual(3, cluster.http_timeout)
-        self.assertEqual(2, cluster.retries)
-        self.assertEqual(2, cluster.redirects)
diff --git a/neutron/tests/unit/vmware/test_nsx_plugin.py b/neutron/tests/unit/vmware/test_nsx_plugin.py
deleted file mode 100644 (file)
index 2206d13..0000000
+++ /dev/null
@@ -1,1265 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-import uuid
-
-import mock
-import netaddr
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from sqlalchemy import exc as sql_exc
-import webob.exc
-
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions as ntn_exc
-import neutron.common.test_lib as test_lib
-from neutron import context
-from neutron.extensions import dvr
-from neutron.extensions import external_net
-from neutron.extensions import l3
-from neutron.extensions import l3_ext_gw_mode
-from neutron.extensions import portbindings
-from neutron.extensions import providernet as pnet
-from neutron.extensions import securitygroup as secgrp
-from neutron import manager
-from neutron.openstack.common import log
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version as version_module
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
-from neutron.plugins.vmware import nsxlib
-from neutron.tests.unit import _test_extension_portbindings as test_bindings
-import neutron.tests.unit.test_db_plugin as test_plugin
-import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
-import neutron.tests.unit.test_extension_security_group as ext_sg
-import neutron.tests.unit.test_l3_plugin as test_l3_plugin
-from neutron.tests.unit import testlib_api
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-LOG = log.getLogger(__name__)
-
-
-class NsxPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
-
-    def _create_network(self, fmt, name, admin_state_up,
-                        arg_list=None, providernet_args=None, **kwargs):
-        data = {'network': {'name': name,
-                            'admin_state_up': admin_state_up,
-                            'tenant_id': self._tenant_id}}
-        # Fix to allow the router:external attribute and any other
-        # attributes containing a colon to be passed with
-        # a double underscore instead
-        kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())
-        if external_net.EXTERNAL in kwargs:
-            arg_list = (external_net.EXTERNAL, ) + (arg_list or ())
-
-        attrs = kwargs
-        if providernet_args:
-            attrs.update(providernet_args)
-        for arg in (('admin_state_up', 'tenant_id', 'shared') +
-                    (arg_list or ())):
-            # Arg must be present and not empty
-            if kwargs.get(arg):
-                data['network'][arg] = kwargs[arg]
-        network_req = self.new_create_request('networks', data, fmt)
-        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
-            # create a specific auth context for this request
-            network_req.environ['neutron.context'] = context.Context(
-                '', kwargs['tenant_id'])
-        return network_req.get_response(self.api)
-
-    def setUp(self,
-              plugin=vmware.PLUGIN_NAME,
-              ext_mgr=None,
-              service_plugins=None):
-        test_lib.test_config['config_files'] = [
-            vmware.get_fake_conf('nsx.ini.test')]
-        # mock api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        self.mock_instance = self.mock_nsx.start()
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-
-        # Emulate tests against NSX 2.x
-        self.mock_instance.return_value.get_version.return_value = (
-            version_module.Version("2.9"))
-        self.mock_instance.return_value.request.side_effect = (
-            self.fc.fake_request)
-        super(NsxPluginV2TestCase, self).setUp(plugin=plugin,
-                                               ext_mgr=ext_mgr)
-        # Newly created port's status is always 'DOWN' till NSX wires them.
-        self.port_create_status = constants.PORT_STATUS_DOWN
-        cfg.CONF.set_override('metadata_mode', None, 'NSX')
-        self.addCleanup(self.fc.reset_all)
-
-
-class TestBasicGet(test_plugin.TestBasicGet, NsxPluginV2TestCase):
-    pass
-
-
-class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxPluginV2TestCase):
-    pass
-
-
-class TestPortsV2(NsxPluginV2TestCase,
-                  test_plugin.TestPortsV2,
-                  test_bindings.PortBindingsTestCase,
-                  test_bindings.PortBindingsHostTestCaseMixin,
-                  test_bindings.PortBindingsVnicTestCaseMixin):
-
-    VIF_TYPE = portbindings.VIF_TYPE_OVS
-    HAS_PORT_FILTER = True
-
-    def test_exhaust_ports_overlay_network(self):
-        cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX')
-        with self.network(name='testnet',
-                          arg_list=(pnet.NETWORK_TYPE,
-                                    pnet.PHYSICAL_NETWORK,
-                                    pnet.SEGMENTATION_ID)) as net:
-            with self.subnet(network=net) as sub:
-                with self.port(subnet=sub):
-                    # creating another port should see an exception
-                    self._create_port('json', net['network']['id'], 400)
-
-    def test_exhaust_ports_bridged_network(self):
-        cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NSX")
-        providernet_args = {pnet.NETWORK_TYPE: 'flat',
-                            pnet.PHYSICAL_NETWORK: 'tzuuid'}
-        with self.network(name='testnet',
-                          providernet_args=providernet_args,
-                          arg_list=(pnet.NETWORK_TYPE,
-                                    pnet.PHYSICAL_NETWORK,
-                                    pnet.SEGMENTATION_ID)) as net:
-            with self.subnet(network=net) as sub:
-                with self.port(subnet=sub):
-                    with self.port(subnet=sub):
-                        plugin = manager.NeutronManager.get_plugin()
-                        ls = nsxlib.switch.get_lswitches(plugin.cluster,
-                                                         net['network']['id'])
-                        self.assertEqual(len(ls), 2)
-
-    def test_update_port_delete_ip(self):
-        # This test case overrides the default because the nsx plugin
-        # implements port_security/security groups and it is not allowed
-        # to remove an ip address from a port unless the security group
-        # is first removed.
-        with self.subnet() as subnet:
-            with self.port(subnet=subnet) as port:
-                data = {'port': {'admin_state_up': False,
-                                 'fixed_ips': [],
-                                 secgrp.SECURITYGROUPS: []}}
-                req = self.new_update_request('ports',
-                                              data, port['port']['id'])
-                res = self.deserialize('json', req.get_response(self.api))
-                self.assertEqual(res['port']['admin_state_up'],
-                                 data['port']['admin_state_up'])
-                self.assertEqual(res['port']['fixed_ips'],
-                                 data['port']['fixed_ips'])
-
-    def test_create_port_name_exceeds_40_chars(self):
-        name = 'this_is_a_port_whose_name_is_longer_than_40_chars'
-        with self.port(name=name) as port:
-            # Assert the neutron name is not truncated
-            self.assertEqual(name, port['port']['name'])
-
-    def _verify_no_orphan_left(self, net_id):
-        # Verify no port exists on net
-        # ie: cleanup on db was successful
-        query_params = "network_id=%s" % net_id
-        self._test_list_resources('port', [],
-                                  query_params=query_params)
-        # Also verify no orphan port was left on nsx
-        # no port should be there at all
-        self.assertFalse(self.fc._fake_lswitch_lport_dict)
-
-    def test_create_port_nsx_error_no_orphan_left(self):
-        with mock.patch.object(nsxlib.switch, 'create_lport',
-                               side_effect=api_exc.NsxApiException):
-            with self.network() as net:
-                net_id = net['network']['id']
-                self._create_port(self.fmt, net_id,
-                                  webob.exc.HTTPInternalServerError.code)
-                self._verify_no_orphan_left(net_id)
-
-    def test_create_port_neutron_error_no_orphan_left(self):
-        with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping',
-                               side_effect=ntn_exc.NeutronException):
-            with self.network() as net:
-                net_id = net['network']['id']
-                self._create_port(self.fmt, net_id,
-                                  webob.exc.HTTPInternalServerError.code)
-                self._verify_no_orphan_left(net_id)
-
-    def test_create_port_db_error_no_orphan_left(self):
-        db_exception = db_exc.DBError(
-            inner_exception=sql_exc.IntegrityError(mock.ANY,
-                                                   mock.ANY,
-                                                   mock.ANY))
-        with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping',
-                               side_effect=db_exception):
-            with self.network() as net:
-                with self.port(device_owner=constants.DEVICE_OWNER_DHCP):
-                    self._verify_no_orphan_left(net['network']['id'])
-
-    def test_create_port_maintenance_returns_503(self):
-        with self.network() as net:
-            with mock.patch.object(nsxlib, 'do_request',
-                                   side_effect=nsx_exc.MaintenanceInProgress):
-                data = {'port': {'network_id': net['network']['id'],
-                                 'admin_state_up': False,
-                                 'fixed_ips': [],
-                                 'tenant_id': self._tenant_id}}
-                plugin = manager.NeutronManager.get_plugin()
-                with mock.patch.object(plugin, 'get_network',
-                                       return_value=net['network']):
-                    port_req = self.new_create_request('ports', data, self.fmt)
-                    res = port_req.get_response(self.api)
-                    self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
-                                     res.status_int)
-
-
-class TestNetworksV2(test_plugin.TestNetworksV2, NsxPluginV2TestCase):
-
-    def _test_create_bridge_network(self, vlan_id=0):
-        net_type = 'vlan' if vlan_id else 'flat'
-        name = 'bridge_net'
-        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
-                    ('status', 'ACTIVE'), ('shared', False),
-                    (pnet.NETWORK_TYPE, net_type),
-                    (pnet.PHYSICAL_NETWORK, 'tzuuid'),
-                    (pnet.SEGMENTATION_ID, vlan_id)]
-        providernet_args = {pnet.NETWORK_TYPE: net_type,
-                            pnet.PHYSICAL_NETWORK: 'tzuuid'}
-        if vlan_id:
-            providernet_args[pnet.SEGMENTATION_ID] = vlan_id
-        with self.network(name=name,
-                          providernet_args=providernet_args,
-                          arg_list=(pnet.NETWORK_TYPE,
-                                    pnet.PHYSICAL_NETWORK,
-                                    pnet.SEGMENTATION_ID)) as net:
-            for k, v in expected:
-                self.assertEqual(net['network'][k], v)
-
-    def test_create_bridge_network(self):
-        self._test_create_bridge_network()
-
-    def test_create_bridge_vlan_network(self):
-        self._test_create_bridge_network(vlan_id=123)
-
-    def test_create_bridge_vlan_network_outofrange_returns_400(self):
-        with testlib_api.ExpectedException(
-                webob.exc.HTTPClientError) as ctx_manager:
-            self._test_create_bridge_network(vlan_id=5000)
-        self.assertEqual(ctx_manager.exception.code, 400)
-
-    def test_list_networks_filter_by_id(self):
-        # We add this unit test to cover some logic specific to the
-        # nsx plugin
-        with contextlib.nested(self.network(name='net1'),
-                               self.network(name='net2')) as (net1, net2):
-            query_params = 'id=%s' % net1['network']['id']
-            self._test_list_resources('network', [net1],
-                                      query_params=query_params)
-            query_params += '&id=%s' % net2['network']['id']
-            self._test_list_resources('network', [net1, net2],
-                                      query_params=query_params)
-
-    def test_delete_network_after_removing_subet(self):
-        gateway_ip = '10.0.0.1'
-        cidr = '10.0.0.0/24'
-        fmt = 'json'
-        # Create new network
-        res = self._create_network(fmt=fmt, name='net',
-                                   admin_state_up=True)
-        network = self.deserialize(fmt, res)
-        subnet = self._make_subnet(fmt, network, gateway_ip,
-                                   cidr, ip_version=4)
-        req = self.new_delete_request('subnets', subnet['subnet']['id'])
-        sub_del_res = req.get_response(self.api)
-        self.assertEqual(sub_del_res.status_int, 204)
-        req = self.new_delete_request('networks', network['network']['id'])
-        net_del_res = req.get_response(self.api)
-        self.assertEqual(net_del_res.status_int, 204)
-
-    def test_list_networks_with_shared(self):
-        with self.network(name='net1'):
-            with self.network(name='net2', shared=True):
-                req = self.new_list_request('networks')
-                res = self.deserialize('json', req.get_response(self.api))
-                self.assertEqual(len(res['networks']), 2)
-                req_2 = self.new_list_request('networks')
-                req_2.environ['neutron.context'] = context.Context('',
-                                                                   'somebody')
-                res = self.deserialize('json', req_2.get_response(self.api))
-                # tenant must see a single network
-                self.assertEqual(len(res['networks']), 1)
-
-    def test_create_network_name_exceeds_40_chars(self):
-        name = 'this_is_a_network_whose_name_is_longer_than_40_chars'
-        with self.network(name=name) as net:
-            # Assert neutron name is not truncated
-            self.assertEqual(net['network']['name'], name)
-
-    def test_create_network_maintenance_returns_503(self):
-        data = {'network': {'name': 'foo',
-                            'admin_state_up': True,
-                            'tenant_id': self._tenant_id}}
-        with mock.patch.object(nsxlib, 'do_request',
-                               side_effect=nsx_exc.MaintenanceInProgress):
-            net_req = self.new_create_request('networks', data, self.fmt)
-            res = net_req.get_response(self.api)
-            self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
-                             res.status_int)
-
-    def test_update_network_with_admin_false(self):
-        data = {'network': {'admin_state_up': False}}
-        with self.network() as net:
-            plugin = manager.NeutronManager.get_plugin()
-            self.assertRaises(NotImplementedError,
-                              plugin.update_network,
-                              context.get_admin_context(),
-                              net['network']['id'], data)
-
-    def test_update_network_with_name_calls_nsx(self):
-        with mock.patch.object(
-            nsxlib.switch, 'update_lswitch') as update_lswitch_mock:
-            # don't worry about deleting this network, do not use
-            # context manager
-            ctx = context.Context(user_id=None, tenant_id='gonzalo')
-            plugin = manager.NeutronManager.get_plugin()
-            net = plugin.create_network(
-                ctx, {'network': {'name': 'xxx',
-                                  'admin_state_up': True,
-                                  'shared': False,
-                                  'port_security_enabled': True}})
-            plugin.update_network(ctx, net['id'],
-                                  {'network': {'name': 'yyy'}})
-        update_lswitch_mock.assert_called_once_with(
-            mock.ANY, mock.ANY, 'yyy')
-
-
-class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
-
-    def setUp(self):
-        test_lib.test_config['config_files'] = [
-            vmware.get_fake_conf('nsx.ini.test')]
-        # mock nsx api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        instance = self.mock_nsx.start()
-        instance.return_value.login.return_value = "the_cookie"
-        # Avoid runs of the synchronizer looping call
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        patch_sync.start()
-
-        instance.return_value.request.side_effect = self.fc.fake_request
-        super(SecurityGroupsTestCase, self).setUp(vmware.PLUGIN_NAME)
-
-
-class TestSecurityGroup(ext_sg.TestSecurityGroups, SecurityGroupsTestCase):
-
-    def test_create_security_group_name_exceeds_40_chars(self):
-        name = 'this_is_a_secgroup_whose_name_is_longer_than_40_chars'
-        with self.security_group(name=name) as sg:
-            # Assert Neutron name is not truncated
-            self.assertEqual(sg['security_group']['name'], name)
-
-    def test_create_security_group_rule_bad_input(self):
-        name = 'foo security group'
-        description = 'foo description'
-        with self.security_group(name, description) as sg:
-            security_group_id = sg['security_group']['id']
-            protocol = 200
-            min_range = 32
-            max_range = 4343
-            rule = self._build_security_group_rule(
-                security_group_id, 'ingress', protocol,
-                min_range, max_range)
-            res = self._create_security_group_rule(self.fmt, rule)
-            self.deserialize(self.fmt, res)
-            self.assertEqual(res.status_int, 400)
-
-
-class TestL3ExtensionManager(object):
-
-    def get_resources(self):
-        # Simulate extension of L3 attribute map
-        # First apply attribute extensions
-        for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
-            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
-                l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
-            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
-                dvr.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
-        # Finally add l3 resources to the global attribute map
-        attributes.RESOURCE_ATTRIBUTE_MAP.update(
-            l3.RESOURCE_ATTRIBUTE_MAP)
-        return l3.L3.get_resources()
-
-    def get_actions(self):
-        return []
-
-    def get_request_extensions(self):
-        return []
-
-
-class TestL3SecGrpExtensionManager(TestL3ExtensionManager):
-    """A fake extension manager for L3 and Security Group extensions.
-
-    Includes also NSX specific L3 attributes.
-    """
-
-    def get_resources(self):
-        resources = super(TestL3SecGrpExtensionManager,
-                          self).get_resources()
-        resources.extend(secgrp.Securitygroup.get_resources())
-        return resources
-
-
-def backup_l3_attribute_map():
-    """Return a backup of the original l3 attribute map."""
-    return dict((res, attrs.copy()) for
-                (res, attrs) in l3.RESOURCE_ATTRIBUTE_MAP.iteritems())
-
-
-def restore_l3_attribute_map(map_to_restore):
-    """Ensure changes made by fake ext mgrs are reverted."""
-    l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore
-
-
-class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxPluginV2TestCase):
-
-    def _restore_l3_attribute_map(self):
-        l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
-
-    def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None,
-              service_plugins=None):
-        self._l3_attribute_map_bk = {}
-        for item in l3.RESOURCE_ATTRIBUTE_MAP:
-            self._l3_attribute_map_bk[item] = (
-                l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
-        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
-        l3_attribute_map_bk = backup_l3_attribute_map()
-        self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk)
-        ext_mgr = ext_mgr or TestL3ExtensionManager()
-        super(L3NatTest, self).setUp(
-            plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
-        plugin_instance = manager.NeutronManager.get_plugin()
-        self._plugin_name = "%s.%s" % (
-            plugin_instance.__module__,
-            plugin_instance.__class__.__name__)
-        self._plugin_class = plugin_instance.__class__
-
-    def _create_l3_ext_network(self, vlan_id=None):
-        name = 'l3_ext_net'
-        net_type = utils.NetworkTypes.L3_EXT
-        providernet_args = {pnet.NETWORK_TYPE: net_type,
-                            pnet.PHYSICAL_NETWORK: 'l3_gw_uuid'}
-        if vlan_id:
-            providernet_args[pnet.SEGMENTATION_ID] = vlan_id
-        return self.network(name=name,
-                            router__external=True,
-                            providernet_args=providernet_args,
-                            arg_list=(pnet.NETWORK_TYPE,
-                                      pnet.PHYSICAL_NETWORK,
-                                      pnet.SEGMENTATION_ID))
-
-    #REVISIT: remove the following skips if external IP spec support is added
-    def test_router_create_with_gwinfo_ext_ip(self):
-        raise self.skipException('External IP specification unsupported')
-
-    def test_router_create_with_gwinfo_ext_ip_non_admin(self):
-        raise self.skipException('External IP specification unsupported')
-
-    def test_router_update_gateway_with_different_external_subnet(self):
-        raise self.skipException('External IP specification unsupported')
-
-    def test_router_create_with_gwinfo_ext_ip_subnet(self):
-        raise self.skipException('External IP specification unsupported')
-
-
-class TestL3NatTestCase(L3NatTest,
-                        test_l3_plugin.L3NatDBIntTestCase,
-                        NsxPluginV2TestCase):
-
-    def _test_create_l3_ext_network(self, vlan_id=0):
-        name = 'l3_ext_net'
-        net_type = utils.NetworkTypes.L3_EXT
-        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
-                    ('status', 'ACTIVE'), ('shared', False),
-                    (external_net.EXTERNAL, True),
-                    (pnet.NETWORK_TYPE, net_type),
-                    (pnet.PHYSICAL_NETWORK, 'l3_gw_uuid'),
-                    (pnet.SEGMENTATION_ID, vlan_id)]
-        with self._create_l3_ext_network(vlan_id) as net:
-            for k, v in expected:
-                self.assertEqual(net['network'][k], v)
-
-    def _nsx_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
-        """Verify data on fake NSX API client in order to validate
-        plugin did set them properly
-        """
-        # First find the NSX router ID
-        ctx = context.get_admin_context()
-        nsx_router_id = nsx_db.get_nsx_router_id(ctx.session, router_id)
-        ports = [port for port in self.fc._fake_lrouter_lport_dict.values()
-                 if (port['lr_uuid'] == nsx_router_id and
-                     port['att_type'] == "L3GatewayAttachment")]
-        self.assertEqual(len(ports), 1)
-        self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
-        self.assertEqual(ports[0].get('vlan_id'), vlan_id)
-
-    def test_create_l3_ext_network_without_vlan(self):
-        self._test_create_l3_ext_network()
-
-    def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None,
-                                                       validate_ext_gw=True):
-        with self._create_l3_ext_network(vlan_id) as net:
-            with self.subnet(network=net) as s:
-                data = {'router': {'tenant_id': 'whatever'}}
-                data['router']['name'] = 'router1'
-                data['router']['external_gateway_info'] = {
-                    'network_id': s['subnet']['network_id']}
-                router_req = self.new_create_request('routers', data,
-                                                     self.fmt)
-                try:
-                    res = router_req.get_response(self.ext_api)
-                    router = self.deserialize(self.fmt, res)
-                    self.assertEqual(
-                        s['subnet']['network_id'],
-                        (router['router']['external_gateway_info']
-                         ['network_id']))
-                    if validate_ext_gw:
-                        self._nsx_validate_ext_gw(router['router']['id'],
-                                                  'l3_gw_uuid', vlan_id)
-                finally:
-                    self._delete('routers', router['router']['id'])
-
-    def test_router_create_with_gwinfo_and_l3_ext_net(self):
-        self._test_router_create_with_gwinfo_and_l3_ext_net()
-
-    def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self):
-        self._test_router_create_with_gwinfo_and_l3_ext_net(444)
-
-    def _test_router_create_with_distributed(self, dist_input, dist_expected,
-                                             version='3.1', return_code=201):
-        self.mock_instance.return_value.get_version.return_value = (
-            version_module.Version(version))
-
-        data = {'tenant_id': 'whatever'}
-        data['name'] = 'router1'
-        data['distributed'] = dist_input
-        router_req = self.new_create_request(
-            'routers', {'router': data}, self.fmt)
-        try:
-            res = router_req.get_response(self.ext_api)
-            self.assertEqual(return_code, res.status_int)
-            if res.status_int == 201:
-                router = self.deserialize(self.fmt, res)
-                self.assertIn('distributed', router['router'])
-                self.assertEqual(dist_expected,
-                                 router['router']['distributed'])
-        finally:
-            if res.status_int == 201:
-                self._delete('routers', router['router']['id'])
-
-    def test_router_create_distributed_with_3_1(self):
-        self._test_router_create_with_distributed(True, True)
-
-    def test_router_create_distributed_with_new_nsx_versions(self):
-        with mock.patch.object(nsxlib.router, 'create_explicit_route_lrouter'):
-            self._test_router_create_with_distributed(True, True, '3.2')
-            self._test_router_create_with_distributed(True, True, '4.0')
-            self._test_router_create_with_distributed(True, True, '4.1')
-
-    def test_router_create_not_distributed(self):
-        self._test_router_create_with_distributed(False, False)
-
-    def test_router_create_distributed_unspecified(self):
-        self._test_router_create_with_distributed(None, False)
-
-    def test_router_create_distributed_returns_400(self):
-        self._test_router_create_with_distributed(True, None, '3.0', 400)
-
-    def test_router_create_on_obsolete_platform(self):
-
-        def obsolete_response(*args, **kwargs):
-            response = (nsxlib.router.
-                        _create_implicit_routing_lrouter(*args, **kwargs))
-            response.pop('distributed')
-            return response
-
-        with mock.patch.object(
-            nsxlib.router, 'create_lrouter', new=obsolete_response):
-            self._test_router_create_with_distributed(None, False, '2.2')
-
-    def _create_router_with_gw_info_for_test(self, subnet):
-        data = {'router': {'tenant_id': 'whatever',
-                           'name': 'router1',
-                           'external_gateway_info':
-                           {'network_id': subnet['subnet']['network_id']}}}
-        router_req = self.new_create_request(
-            'routers', data, self.fmt)
-        return router_req.get_response(self.ext_api)
-
-    def test_router_create_nsx_error_returns_500(self, vlan_id=None):
-        with mock.patch.object(nsxlib.router,
-                               'create_router_lport',
-                               side_effect=api_exc.NsxApiException):
-            with self._create_l3_ext_network(vlan_id) as net:
-                with self.subnet(network=net) as s:
-                    res = self._create_router_with_gw_info_for_test(s)
-                    self.assertEqual(
-                        webob.exc.HTTPInternalServerError.code,
-                        res.status_int)
-
-    def test_router_add_gateway_invalid_network_returns_404(self):
-        # NOTE(salv-orlando): This unit test has been overridden
-        # as the nsx plugin support the ext_gw_mode extension
-        # which mandates a uuid for the external network identifier
-        with self.router() as r:
-            self._add_external_gateway_to_router(
-                r['router']['id'],
-                uuidutils.generate_uuid(),
-                expected_code=webob.exc.HTTPNotFound.code)
-
-    def _verify_router_rollback(self):
-        # Check that nothing is left on DB
-        # TODO(salv-orlando): Verify whehter this is thread-safe
-        # w.r.t. sqllite and parallel testing
-        self._test_list_resources('router', [])
-        # Check that router is not in NSX
-        self.assertFalse(self.fc._fake_lrouter_dict)
-
-    def test_router_create_with_gw_info_neutron_fail_does_rollback(self):
-        # Simulate get subnet error while building list of ips with prefix
-        with mock.patch.object(self._plugin_class,
-                               '_build_ip_address_list',
-                               side_effect=ntn_exc.SubnetNotFound(
-                                   subnet_id='xxx')):
-            with self._create_l3_ext_network() as net:
-                with self.subnet(network=net) as s:
-                    res = self._create_router_with_gw_info_for_test(s)
-                    self.assertEqual(
-                        webob.exc.HTTPNotFound.code,
-                        res.status_int)
-                    self._verify_router_rollback()
-
-    def test_router_create_with_gw_info_nsx_fail_does_rollback(self):
-        # Simulate error while fetching nsx router gw port
-        with mock.patch.object(self._plugin_class,
-                               '_find_router_gw_port',
-                               side_effect=api_exc.NsxApiException):
-            with self._create_l3_ext_network() as net:
-                with self.subnet(network=net) as s:
-                    res = self._create_router_with_gw_info_for_test(s)
-                    self.assertEqual(
-                        webob.exc.HTTPInternalServerError.code,
-                        res.status_int)
-                    self._verify_router_rollback()
-
-    def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None,
-                                                  validate_ext_gw=True):
-        with self.router() as r:
-            with self.subnet() as s1:
-                with self._create_l3_ext_network(vlan_id) as net:
-                    with self.subnet(network=net) as s2:
-                        self._set_net_external(s1['subnet']['network_id'])
-                        try:
-                            self._add_external_gateway_to_router(
-                                r['router']['id'],
-                                s1['subnet']['network_id'])
-                            body = self._show('routers', r['router']['id'])
-                            net_id = (body['router']
-                                      ['external_gateway_info']['network_id'])
-                            self.assertEqual(net_id,
-                                             s1['subnet']['network_id'])
-                            # Plug network with external mapping
-                            self._set_net_external(s2['subnet']['network_id'])
-                            self._add_external_gateway_to_router(
-                                r['router']['id'],
-                                s2['subnet']['network_id'])
-                            body = self._show('routers', r['router']['id'])
-                            net_id = (body['router']
-                                      ['external_gateway_info']['network_id'])
-                            self.assertEqual(net_id,
-                                             s2['subnet']['network_id'])
-                            if validate_ext_gw:
-                                self._nsx_validate_ext_gw(
-                                    body['router']['id'],
-                                    'l3_gw_uuid', vlan_id)
-                        finally:
-                            # Cleanup
-                            self._remove_external_gateway_from_router(
-                                r['router']['id'],
-                                s2['subnet']['network_id'])
-
-    def test_router_update_gateway_on_l3_ext_net(self):
-        self._test_router_update_gateway_on_l3_ext_net()
-
-    def test_router_update_gateway_on_l3_ext_net_with_vlan(self):
-        self._test_router_update_gateway_on_l3_ext_net(444)
-
-    def test_router_list_by_tenant_id(self):
-        with contextlib.nested(self.router(tenant_id='custom'),
-                               self.router(),
-                               self.router()
-                               ) as routers:
-            self._test_list_resources('router', [routers[0]],
-                                      query_params="tenant_id=custom")
-
-    def test_create_l3_ext_network_with_vlan(self):
-        self._test_create_l3_ext_network(666)
-
-    def test_floatingip_with_assoc_fails(self):
-        self._test_floatingip_with_assoc_fails(
-            "%s.%s" % (self._plugin_name, "_update_fip_assoc"))
-
-    def test_floatingip_with_invalid_create_port(self):
-        self._test_floatingip_with_invalid_create_port(self._plugin_name)
-
-    def _metadata_setup(self):
-        cfg.CONF.set_override('metadata_mode', 'access_network', 'NSX')
-
-    def _metadata_teardown(self):
-        cfg.CONF.set_override('metadata_mode', None, 'NSX')
-
-    def test_create_router_name_exceeds_40_chars(self):
-        name = 'this_is_a_router_whose_name_is_longer_than_40_chars'
-        with self.router(name=name) as rtr:
-            # Assert Neutron name is not truncated
-            self.assertEqual(rtr['router']['name'], name)
-
-    def test_router_add_interface_subnet_with_metadata_access(self):
-        self._metadata_setup()
-        self.test_router_add_interface_subnet()
-        self._metadata_teardown()
-
-    def test_router_add_interface_port_with_metadata_access(self):
-        self._metadata_setup()
-        self.test_router_add_interface_port()
-        self._metadata_teardown()
-
-    def test_router_add_interface_dupsubnet_returns_400_with_metadata(self):
-        self._metadata_setup()
-        self.test_router_add_interface_dup_subnet1_returns_400()
-        self._metadata_teardown()
-
-    def test_router_add_interface_overlapped_cidr_returns_400_with(self):
-        self._metadata_setup()
-        self.test_router_add_interface_overlapped_cidr_returns_400()
-        self._metadata_teardown()
-
-    def test_router_remove_interface_inuse_returns_409_with_metadata(self):
-        self._metadata_setup()
-        self.test_router_remove_interface_inuse_returns_409()
-        self._metadata_teardown()
-
-    def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self):
-        self._metadata_setup()
-        self.test_router_remove_interface_wrong_subnet_returns_400()
-        self._metadata_teardown()
-
-    def test_router_delete_with_metadata_access(self):
-        self._metadata_setup()
-        self.test_router_delete()
-        self._metadata_teardown()
-
-    def test_router_delete_with_port_existed_returns_409_with_metadata(self):
-        self._metadata_setup()
-        self.test_router_delete_with_port_existed_returns_409()
-        self._metadata_teardown()
-
-    def test_metadatata_network_created_with_router_interface_add(self):
-        self._metadata_setup()
-        with mock.patch.object(self._plugin_class, 'schedule_network') as f:
-            with self.router() as r:
-                with self.subnet() as s:
-                    self._router_interface_action('add',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  None)
-                    r_ports = self._list('ports')['ports']
-                    self.assertEqual(len(r_ports), 2)
-                    ips = []
-                    for port in r_ports:
-                        ips.extend([netaddr.IPAddress(fixed_ip['ip_address'])
-                                    for fixed_ip in port['fixed_ips']])
-                    meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
-                    self.assertTrue(any([ip in meta_cidr for ip in ips]))
-                    # Needed to avoid 409
-                    self._router_interface_action('remove',
-                                                  r['router']['id'],
-                                                  s['subnet']['id'],
-                                                  None)
-            # Verify that the metadata network gets scheduled first, so that
-            # an active dhcp agent can pick it up
-            expected_meta_net = {
-                'status': 'ACTIVE',
-                'subnets': [],
-                'name': 'meta-%s' % r['router']['id'],
-                'admin_state_up': True,
-                'tenant_id': '',
-                'port_security_enabled': False,
-                'shared': False,
-                'id': mock.ANY
-            }
-            f.assert_called_once_with(mock.ANY, expected_meta_net)
-        self._metadata_teardown()
-
-    def test_metadata_network_create_rollback_on_create_subnet_failure(self):
-        self._metadata_setup()
-        with self.router() as r:
-            with self.subnet() as s:
-                # Raise a NeutronException (eg: NotFound)
-                with mock.patch.object(self._plugin_class,
-                                       'create_subnet',
-                                       side_effect=ntn_exc.NotFound):
-                    self._router_interface_action(
-                        'add', r['router']['id'], s['subnet']['id'], None)
-                # Ensure metadata network was removed
-                nets = self._list('networks')['networks']
-                self.assertEqual(len(nets), 1)
-                # Needed to avoid 409
-                self._router_interface_action('remove',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None)
-        self._metadata_teardown()
-
-    def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self):
-        self._metadata_setup()
-        with self.router() as r:
-            with self.subnet() as s:
-                # Raise a NeutronException when adding metadata subnet
-                # to router
-                # save function being mocked
-                real_func = self._plugin_class.add_router_interface
-                plugin_instance = manager.NeutronManager.get_plugin()
-
-                def side_effect(*args):
-                    if args[-1]['subnet_id'] == s['subnet']['id']:
-                        # do the real thing
-                        return real_func(plugin_instance, *args)
-                    # otherwise raise
-                    raise api_exc.NsxApiException()
-
-                with mock.patch.object(self._plugin_class,
-                                       'add_router_interface',
-                                       side_effect=side_effect):
-                    self._router_interface_action(
-                        'add', r['router']['id'], s['subnet']['id'], None)
-                # Ensure metadata network was removed
-                nets = self._list('networks')['networks']
-                self.assertEqual(len(nets), 1)
-                # Needed to avoid 409
-                self._router_interface_action('remove',
-                                              r['router']['id'],
-                                              s['subnet']['id'],
-                                              None)
-        self._metadata_teardown()
-
-    def test_metadata_network_removed_with_router_interface_remove(self):
-        self._metadata_setup()
-        with self.router() as r:
-            with self.subnet() as s:
-                self._router_interface_action('add', r['router']['id'],
-                                              s['subnet']['id'], None)
-                subnets = self._list('subnets')['subnets']
-                self.assertEqual(len(subnets), 2)
-                meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
-                for subnet in subnets:
-                    cidr = netaddr.IPNetwork(subnet['cidr'])
-                    if meta_cidr == cidr or meta_cidr in cidr.supernet(16):
-                        meta_sub_id = subnet['id']
-                        meta_net_id = subnet['network_id']
-                ports = self._list(
-                    'ports',
-                    query_params='network_id=%s' % meta_net_id)['ports']
-                self.assertEqual(len(ports), 1)
-                meta_port_id = ports[0]['id']
-                self._router_interface_action('remove', r['router']['id'],
-                                              s['subnet']['id'], None)
-                self._show('networks', meta_net_id,
-                           webob.exc.HTTPNotFound.code)
-                self._show('ports', meta_port_id,
-                           webob.exc.HTTPNotFound.code)
-                self._show('subnets', meta_sub_id,
-                           webob.exc.HTTPNotFound.code)
-        self._metadata_teardown()
-
-    def test_metadata_network_remove_rollback_on_failure(self):
-        self._metadata_setup()
-        with self.router() as r:
-            with self.subnet() as s:
-                self._router_interface_action('add', r['router']['id'],
-                                              s['subnet']['id'], None)
-                networks = self._list('networks')['networks']
-                for network in networks:
-                    if network['id'] != s['subnet']['network_id']:
-                        meta_net_id = network['id']
-                ports = self._list(
-                    'ports',
-                    query_params='network_id=%s' % meta_net_id)['ports']
-                meta_port_id = ports[0]['id']
-                # Raise a NeutronException when removing
-                # metadata subnet from router
-                # save function being mocked
-                real_func = self._plugin_class.remove_router_interface
-                plugin_instance = manager.NeutronManager.get_plugin()
-
-                def side_effect(*args):
-                    if args[-1].get('subnet_id') == s['subnet']['id']:
-                        # do the real thing
-                        return real_func(plugin_instance, *args)
-                    # otherwise raise
-                    raise api_exc.NsxApiException()
-
-                with mock.patch.object(self._plugin_class,
-                                       'remove_router_interface',
-                                       side_effect=side_effect):
-                    self._router_interface_action('remove', r['router']['id'],
-                                                  s['subnet']['id'], None)
-                # Metadata network and subnet should still be there
-                self._show('networks', meta_net_id,
-                           webob.exc.HTTPOk.code)
-                self._show('ports', meta_port_id,
-                           webob.exc.HTTPOk.code)
-        self._metadata_teardown()
-
-    def test_metadata_dhcp_host_route(self):
-        cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NSX')
-        subnets = self._list('subnets')['subnets']
-        with self.subnet() as s:
-            with self.port(subnet=s, device_id='1234',
-                           device_owner=constants.DEVICE_OWNER_DHCP) as port:
-                subnets = self._list('subnets')['subnets']
-                self.assertEqual(len(subnets), 1)
-                self.assertEqual(subnets[0]['host_routes'][0]['nexthop'],
-                                 '10.0.0.2')
-                self.assertEqual(subnets[0]['host_routes'][0]['destination'],
-                                 '169.254.169.254/32')
-            self._delete('ports', port['port']['id'])
-            subnets = self._list('subnets')['subnets']
-            # Test that route is deleted after dhcp port is removed
-            self.assertEqual(len(subnets[0]['host_routes']), 0)
-
-    def _test_floatingip_update(self, expected_status):
-        super(TestL3NatTestCase, self).test_floatingip_update(
-            expected_status)
-
-    def test_floatingip_update(self):
-        self._test_floatingip_update(constants.FLOATINGIP_STATUS_DOWN)
-
-    def test_floatingip_disassociate(self):
-        with self.port() as p:
-            private_sub = {'subnet': {'id':
-                                      p['port']['fixed_ips'][0]['subnet_id']}}
-            plugin = manager.NeutronManager.get_plugin()
-            with mock.patch.object(plugin, 'notify_routers_updated') as notify:
-                with self.floatingip_no_assoc(private_sub) as fip:
-                    port_id = p['port']['id']
-                    body = self._update('floatingips', fip['floatingip']['id'],
-                                        {'floatingip': {'port_id': port_id}})
-                    self.assertEqual(body['floatingip']['port_id'], port_id)
-                    # Floating IP status should be active
-                    self.assertEqual(constants.FLOATINGIP_STATUS_ACTIVE,
-                                     body['floatingip']['status'])
-                    # Disassociate
-                    body = self._update('floatingips', fip['floatingip']['id'],
-                                        {'floatingip': {'port_id': None}})
-                    body = self._show('floatingips', fip['floatingip']['id'])
-                    self.assertIsNone(body['floatingip']['port_id'])
-                    self.assertIsNone(body['floatingip']['fixed_ip_address'])
-                    # Floating IP status should be down
-                    self.assertEqual(constants.FLOATINGIP_STATUS_DOWN,
-                                     body['floatingip']['status'])
-
-                # check that notification was not requested
-                self.assertFalse(notify.called)
-
-    def test_create_router_maintenance_returns_503(self):
-        with self._create_l3_ext_network() as net:
-            with self.subnet(network=net) as s:
-                with mock.patch.object(
-                    nsxlib,
-                    'do_request',
-                    side_effect=nsx_exc.MaintenanceInProgress):
-                    data = {'router': {'tenant_id': 'whatever'}}
-                    data['router']['name'] = 'router1'
-                    data['router']['external_gateway_info'] = {
-                        'network_id': s['subnet']['network_id']}
-                    router_req = self.new_create_request(
-                        'routers', data, self.fmt)
-                    res = router_req.get_response(self.ext_api)
-                    self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
-                                     res.status_int)
-
-    def test_router_add_interface_port_removes_security_group(self):
-        with self.router() as r:
-            with self.port() as p:
-                body = self._router_interface_action('add',
-                                                     r['router']['id'],
-                                                     None,
-                                                     p['port']['id'])
-                self.assertIn('port_id', body)
-                self.assertEqual(body['port_id'], p['port']['id'])
-
-                # fetch port and confirm no security-group on it.
-                body = self._show('ports', p['port']['id'])
-                self.assertEqual(body['port']['security_groups'], [])
-                self.assertFalse(body['port']['port_security_enabled'])
-                # clean-up
-                self._router_interface_action('remove',
-                                              r['router']['id'],
-                                              None,
-                                              p['port']['id'])
-
-
-class ExtGwModeTestCase(NsxPluginV2TestCase,
-                        test_ext_gw_mode.ExtGwModeIntTestCase):
-    pass
-
-
-class NeutronNsxOutOfSync(NsxPluginV2TestCase,
-                          test_l3_plugin.L3NatTestCaseMixin,
-                          ext_sg.SecurityGroupsTestCase):
-
-    def setUp(self):
-        l3_attribute_map_bk = backup_l3_attribute_map()
-        self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk)
-        super(NeutronNsxOutOfSync, self).setUp(
-            ext_mgr=TestL3SecGrpExtensionManager())
-
-    def test_delete_network_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        self.fc._fake_lswitch_dict.clear()
-        req = self.new_delete_request('networks', net1['network']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-
-    def test_show_network_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net = self.deserialize('json', res)
-        self.fc._fake_lswitch_dict.clear()
-        req = self.new_show_request('networks', net['network']['id'],
-                                    fields=['id', 'status'])
-        net = self.deserialize('json', req.get_response(self.api))
-        self.assertEqual(net['network']['status'],
-                         constants.NET_STATUS_ERROR)
-
-    def test_delete_port_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        res = self._create_port('json', net1['network']['id'])
-        port = self.deserialize('json', res)
-        self.fc._fake_lswitch_lport_dict.clear()
-        req = self.new_delete_request('ports', port['port']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-
-    def test_show_port_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        res = self._create_port('json', net1['network']['id'])
-        port = self.deserialize('json', res)
-        self.fc._fake_lswitch_lport_dict.clear()
-        self.fc._fake_lswitch_lportstatus_dict.clear()
-        req = self.new_show_request('ports', port['port']['id'],
-                                    fields=['id', 'status'])
-        net = self.deserialize('json', req.get_response(self.api))
-        self.assertEqual(net['port']['status'],
-                         constants.PORT_STATUS_ERROR)
-
-    def test_create_port_on_network_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        self.fc._fake_lswitch_dict.clear()
-        res = self._create_port('json', net1['network']['id'])
-        port = self.deserialize('json', res)
-        self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
-
-    def test_update_port_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        res = self._create_port('json', net1['network']['id'])
-        port = self.deserialize('json', res)
-        self.fc._fake_lswitch_lport_dict.clear()
-        data = {'port': {'name': 'error_port'}}
-        req = self.new_update_request('ports', data, port['port']['id'])
-        port = self.deserialize('json', req.get_response(self.api))
-        self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
-        self.assertEqual(port['port']['name'], 'error_port')
-
-    def test_delete_port_and_network_not_in_nsx(self):
-        res = self._create_network('json', 'net1', True)
-        net1 = self.deserialize('json', res)
-        res = self._create_port('json', net1['network']['id'])
-        port = self.deserialize('json', res)
-        self.fc._fake_lswitch_dict.clear()
-        self.fc._fake_lswitch_lport_dict.clear()
-        req = self.new_delete_request('ports', port['port']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-        req = self.new_delete_request('networks', net1['network']['id'])
-        res = req.get_response(self.api)
-        self.assertEqual(res.status_int, 204)
-
-    def test_delete_router_not_in_nsx(self):
-        res = self._create_router('json', 'tenant')
-        router = self.deserialize('json', res)
-        self.fc._fake_lrouter_dict.clear()
-        req = self.new_delete_request('routers', router['router']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 204)
-
-    def test_show_router_not_in_nsx(self):
-        res = self._create_router('json', 'tenant')
-        router = self.deserialize('json', res)
-        self.fc._fake_lrouter_dict.clear()
-        req = self.new_show_request('routers', router['router']['id'],
-                                    fields=['id', 'status'])
-        router = self.deserialize('json', req.get_response(self.ext_api))
-        self.assertEqual(router['router']['status'],
-                         constants.NET_STATUS_ERROR)
-
-    def _create_network_and_subnet(self, cidr, external=False):
-        net_res = self._create_network('json', 'ext_net', True)
-        net = self.deserialize('json', net_res)
-        net_id = net['network']['id']
-        if external:
-            self._update('networks', net_id,
-                         {'network': {external_net.EXTERNAL: True}})
-        sub_res = self._create_subnet('json', net_id, cidr)
-        sub = self.deserialize('json', sub_res)
-        return net_id, sub['subnet']['id']
-
-    def test_clear_gateway_nat_rule_not_in_nsx(self):
-        # Create external network and subnet
-        ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0]
-        # Create internal network and subnet
-        int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1]
-        res = self._create_router('json', 'tenant')
-        router = self.deserialize('json', res)
-        # Add interface to router (needed to generate NAT rule)
-        req = self.new_action_request(
-            'routers',
-            {'subnet_id': int_sub_id},
-            router['router']['id'],
-            "add_router_interface")
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-        # Set gateway for router
-        req = self.new_update_request(
-            'routers',
-            {'router': {'external_gateway_info':
-                        {'network_id': ext_net_id}}},
-            router['router']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-        # Delete NAT rule from NSX, clear gateway
-        # and verify operation still succeeds
-        self.fc._fake_lrouter_nat_dict.clear()
-        req = self.new_update_request(
-            'routers',
-            {'router': {'external_gateway_info': {}}},
-            router['router']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-
-    def _test_remove_router_interface_nsx_out_of_sync(self, unsync_action):
-        # Create external network and subnet
-        ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0]
-        # Create internal network and subnet
-        int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1]
-        res = self._create_router('json', 'tenant')
-        router = self.deserialize('json', res)
-        # Set gateway and add interface to router (needed to generate NAT rule)
-        req = self.new_update_request(
-            'routers',
-            {'router': {'external_gateway_info':
-                        {'network_id': ext_net_id}}},
-            router['router']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-        req = self.new_action_request(
-            'routers',
-            {'subnet_id': int_sub_id},
-            router['router']['id'],
-            "add_router_interface")
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-        unsync_action()
-        req = self.new_action_request(
-            'routers',
-            {'subnet_id': int_sub_id},
-            router['router']['id'],
-            "remove_router_interface")
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 200)
-
-    def test_remove_router_interface_not_in_nsx(self):
-
-        def unsync_action():
-            self.fc._fake_lrouter_dict.clear()
-            self.fc._fake_lrouter_nat_dict.clear()
-
-        self._test_remove_router_interface_nsx_out_of_sync(unsync_action)
-
-    def test_remove_router_interface_nat_rule_not_in_nsx(self):
-        self._test_remove_router_interface_nsx_out_of_sync(
-            self.fc._fake_lrouter_nat_dict.clear)
-
-    def test_remove_router_interface_duplicate_nat_rules_in_nsx(self):
-
-        def unsync_action():
-            # duplicate every entry in the nat rule dict
-            for (_rule_id, rule) in self.fc._fake_lrouter_nat_dict.items():
-                self.fc._fake_lrouter_nat_dict[uuid.uuid4()] = rule
-
-        self._test_remove_router_interface_nsx_out_of_sync(unsync_action)
-
-    def test_update_router_not_in_nsx(self):
-        res = self._create_router('json', 'tenant')
-        router = self.deserialize('json', res)
-        self.fc._fake_lrouter_dict.clear()
-        req = self.new_update_request(
-            'routers',
-            {'router': {'name': 'goo'}},
-            router['router']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 500)
-        req = self.new_show_request('routers', router['router']['id'])
-        router = self.deserialize('json', req.get_response(self.ext_api))
-        self.assertEqual(router['router']['status'],
-                         constants.NET_STATUS_ERROR)
-
-    def test_delete_security_group_not_in_nsx(self):
-        res = self._create_security_group('json', 'name', 'desc')
-        sec_group = self.deserialize('json', res)
-        self.fc._fake_securityprofile_dict.clear()
-        req = self.new_delete_request(
-            'security-groups',
-            sec_group['security_group']['id'])
-        res = req.get_response(self.ext_api)
-        self.assertEqual(res.status_int, 204)
diff --git a/neutron/tests/unit/vmware/test_nsx_sync.py b/neutron/tests/unit/vmware/test_nsx_sync.py
deleted file mode 100644 (file)
index 5388f13..0000000
+++ /dev/null
@@ -1,722 +0,0 @@
-# Copyright 2013 VMware, Inc.
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import contextlib
-import time
-
-import mock
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import constants
-from neutron.common import exceptions as n_exc
-from neutron import context
-from neutron.extensions import l3
-from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import client
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.dbexts import db
-from neutron.plugins.vmware import nsx_cluster as cluster
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware import plugin
-from neutron.tests import base
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import testlib_api
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.apiclient import fake
-
-LOG = log.getLogger(__name__)
-
-_uuid = test_api_v2._uuid
-LSWITCHES = [{'uuid': _uuid(), 'name': 'ls-1'},
-             {'uuid': _uuid(), 'name': 'ls-2'}]
-LSWITCHPORTS = [{'uuid': _uuid(), 'name': 'lp-1'},
-                {'uuid': _uuid(), 'name': 'lp-2'}]
-LROUTERS = [{'uuid': _uuid(), 'name': 'lr-1'},
-            {'uuid': _uuid(), 'name': 'lr-2'}]
-
-
-class CacheTestCase(base.BaseTestCase):
-    """Test suite providing coverage for the Cache class."""
-
-    def setUp(self):
-        self.nsx_cache = sync.NsxCache()
-        for lswitch in LSWITCHES:
-            self.nsx_cache._uuid_dict_mappings[lswitch['uuid']] = (
-                self.nsx_cache._lswitches)
-            self.nsx_cache._lswitches[lswitch['uuid']] = (
-                {'data': lswitch,
-                 'hash': hash(jsonutils.dumps(lswitch))})
-        for lswitchport in LSWITCHPORTS:
-            self.nsx_cache._uuid_dict_mappings[lswitchport['uuid']] = (
-                self.nsx_cache._lswitchports)
-            self.nsx_cache._lswitchports[lswitchport['uuid']] = (
-                {'data': lswitchport,
-                 'hash': hash(jsonutils.dumps(lswitchport))})
-        for lrouter in LROUTERS:
-            self.nsx_cache._uuid_dict_mappings[lrouter['uuid']] = (
-                self.nsx_cache._lrouters)
-            self.nsx_cache._lrouters[lrouter['uuid']] = (
-                {'data': lrouter,
-                 'hash': hash(jsonutils.dumps(lrouter))})
-        super(CacheTestCase, self).setUp()
-
-    def test_get_lswitches(self):
-        ls_uuids = self.nsx_cache.get_lswitches()
-        self.assertEqual(set(ls_uuids),
-                         set([ls['uuid'] for ls in LSWITCHES]))
-
-    def test_get_lswitchports(self):
-        lp_uuids = self.nsx_cache.get_lswitchports()
-        self.assertEqual(set(lp_uuids),
-                         set([lp['uuid'] for lp in LSWITCHPORTS]))
-
-    def test_get_lrouters(self):
-        lr_uuids = self.nsx_cache.get_lrouters()
-        self.assertEqual(set(lr_uuids),
-                         set([lr['uuid'] for lr in LROUTERS]))
-
-    def test_get_lswitches_changed_only(self):
-        ls_uuids = self.nsx_cache.get_lswitches(changed_only=True)
-        self.assertEqual(0, len(ls_uuids))
-
-    def test_get_lswitchports_changed_only(self):
-        lp_uuids = self.nsx_cache.get_lswitchports(changed_only=True)
-        self.assertEqual(0, len(lp_uuids))
-
-    def test_get_lrouters_changed_only(self):
-        lr_uuids = self.nsx_cache.get_lrouters(changed_only=True)
-        self.assertEqual(0, len(lr_uuids))
-
-    def _verify_update(self, new_resource, changed=True, hit=True):
-        cached_resource = self.nsx_cache[new_resource['uuid']]
-        self.assertEqual(new_resource, cached_resource['data'])
-        self.assertEqual(hit, cached_resource.get('hit', False))
-        self.assertEqual(changed,
-                         cached_resource.get('changed', False))
-
-    def test_update_lswitch_new_item(self):
-        new_switch_uuid = _uuid()
-        new_switch = {'uuid': new_switch_uuid, 'name': 'new_switch'}
-        self.nsx_cache.update_lswitch(new_switch)
-        self.assertIn(new_switch_uuid, self.nsx_cache._lswitches.keys())
-        self._verify_update(new_switch)
-
-    def test_update_lswitch_existing_item(self):
-        switch = LSWITCHES[0]
-        switch['name'] = 'new_name'
-        self.nsx_cache.update_lswitch(switch)
-        self.assertIn(switch['uuid'], self.nsx_cache._lswitches.keys())
-        self._verify_update(switch)
-
-    def test_update_lswitchport_new_item(self):
-        new_switchport_uuid = _uuid()
-        new_switchport = {'uuid': new_switchport_uuid,
-                          'name': 'new_switchport'}
-        self.nsx_cache.update_lswitchport(new_switchport)
-        self.assertIn(new_switchport_uuid,
-                      self.nsx_cache._lswitchports.keys())
-        self._verify_update(new_switchport)
-
-    def test_update_lswitchport_existing_item(self):
-        switchport = LSWITCHPORTS[0]
-        switchport['name'] = 'new_name'
-        self.nsx_cache.update_lswitchport(switchport)
-        self.assertIn(switchport['uuid'],
-                      self.nsx_cache._lswitchports.keys())
-        self._verify_update(switchport)
-
-    def test_update_lrouter_new_item(self):
-        new_router_uuid = _uuid()
-        new_router = {'uuid': new_router_uuid,
-                      'name': 'new_router'}
-        self.nsx_cache.update_lrouter(new_router)
-        self.assertIn(new_router_uuid,
-                      self.nsx_cache._lrouters.keys())
-        self._verify_update(new_router)
-
-    def test_update_lrouter_existing_item(self):
-        router = LROUTERS[0]
-        router['name'] = 'new_name'
-        self.nsx_cache.update_lrouter(router)
-        self.assertIn(router['uuid'],
-                      self.nsx_cache._lrouters.keys())
-        self._verify_update(router)
-
-    def test_process_updates_initial(self):
-        # Clear cache content to simulate first-time filling
-        self.nsx_cache._lswitches.clear()
-        self.nsx_cache._lswitchports.clear()
-        self.nsx_cache._lrouters.clear()
-        self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            self._verify_update(resource)
-
-    def test_process_updates_no_change(self):
-        self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            self._verify_update(resource, changed=False)
-
-    def test_process_updates_with_changes(self):
-        LSWITCHES[0]['name'] = 'altered'
-        self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            changed = (True if resource['uuid'] == LSWITCHES[0]['uuid']
-                       else False)
-            self._verify_update(resource, changed=changed)
-
-    def _test_process_updates_with_removals(self):
-        lswitches = LSWITCHES[:]
-        lswitch = lswitches.pop()
-        self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            hit = (False if resource['uuid'] == lswitch['uuid']
-                   else True)
-            self._verify_update(resource, changed=False, hit=hit)
-        return (lswitch, lswitches)
-
-    def test_process_updates_with_removals(self):
-        self._test_process_updates_with_removals()
-
-    def test_process_updates_cleanup_after_delete(self):
-        deleted_lswitch, lswitches = self._test_process_updates_with_removals()
-        self.nsx_cache.process_deletes()
-        self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
-        self.assertNotIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
-
-    def test_update_resource_does_not_cleanup_deleted_resources(self):
-        deleted_lswitch, lswitches = self._test_process_updates_with_removals()
-        self.nsx_cache.process_deletes()
-        self.nsx_cache.update_lswitch(deleted_lswitch)
-        self.assertIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
-
-    def _verify_delete(self, resource, deleted=True, hit=True):
-        cached_resource = self.nsx_cache[resource['uuid']]
-        data_field = 'data_bk' if deleted else 'data'
-        self.assertEqual(resource, cached_resource[data_field])
-        self.assertEqual(hit, cached_resource.get('hit', False))
-        self.assertEqual(deleted,
-                         cached_resource.get('changed', False))
-
-    def _set_hit(self, resources, uuid_to_delete=None):
-        for resource in resources:
-            if resource['data']['uuid'] != uuid_to_delete:
-                resource['hit'] = True
-
-    def test_process_deletes_no_change(self):
-        # Mark all resources as hit
-        self._set_hit(self.nsx_cache._lswitches.values())
-        self._set_hit(self.nsx_cache._lswitchports.values())
-        self._set_hit(self.nsx_cache._lrouters.values())
-        self.nsx_cache.process_deletes()
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            self._verify_delete(resource, hit=False, deleted=False)
-
-    def test_process_deletes_with_removals(self):
-        # Mark all resources but one as hit
-        uuid_to_delete = LSWITCHPORTS[0]['uuid']
-        self._set_hit(self.nsx_cache._lswitches.values(),
-                      uuid_to_delete)
-        self._set_hit(self.nsx_cache._lswitchports.values(),
-                      uuid_to_delete)
-        self._set_hit(self.nsx_cache._lrouters.values(),
-                      uuid_to_delete)
-        self.nsx_cache.process_deletes()
-        for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
-            deleted = resource['uuid'] == uuid_to_delete
-            self._verify_delete(resource, hit=False, deleted=deleted)
-
-
-class SyncLoopingCallTestCase(base.BaseTestCase):
-
-    def test_looping_calls(self):
-        # Avoid runs of the synchronization process - just start
-        # the looping call
-        with mock.patch.object(
-            sync.NsxSynchronizer, '_synchronize_state', return_value=0.01):
-            synchronizer = sync.NsxSynchronizer(mock.ANY, mock.ANY,
-                                                100, 0, 0)
-            time.sleep(0.03)
-            # stop looping call before asserting
-            synchronizer._sync_looping_call.stop()
-            # Just verify the looping call has been called, trying
-            # to assess the exact number of calls would be unreliable
-            self.assertTrue(synchronizer._synchronize_state.call_count)
-
-
-class SyncTestCase(testlib_api.SqlTestCase):
-
-    def setUp(self):
-        # mock api client
-        self.fc = fake.FakeClient(vmware.STUBS_PATH)
-        mock_api = mock.patch(vmware.NSXAPI_NAME, autospec=True)
-        # Avoid runs of the synchronizer looping call
-        # These unit tests will excplicitly invoke synchronization
-        patch_sync = mock.patch.object(sync, '_start_loopingcall')
-        self.mock_api = mock_api.start()
-        patch_sync.start()
-        self.mock_api.return_value.login.return_value = "the_cookie"
-        # Emulate tests against NSX 3.x
-        self.mock_api.return_value.get_version.return_value = (
-            version.Version("3.1"))
-
-        self.mock_api.return_value.request.side_effect = self.fc.fake_request
-        self.fake_cluster = cluster.NSXCluster(
-            name='fake-cluster', nsx_controllers=['1.1.1.1:999'],
-            default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar')
-        self.fake_cluster.api_client = client.NsxApiClient(
-            ('1.1.1.1', '999', True),
-            self.fake_cluster.nsx_user, self.fake_cluster.nsx_password,
-            http_timeout=self.fake_cluster.http_timeout,
-            retries=self.fake_cluster.retries,
-            redirects=self.fake_cluster.redirects)
-        # Instantiate Neutron plugin
-        # and setup needed config variables
-        args = ['--config-file', vmware.get_fake_conf('neutron.conf.test'),
-                '--config-file', vmware.get_fake_conf('nsx.ini.test')]
-        self.config_parse(args=args)
-        cfg.CONF.set_override('allow_overlapping_ips', True)
-        dhcp_periodic_p = mock.patch('neutron.db.agentschedulers_db.'
-                                     'DhcpAgentSchedulerDbMixin.'
-                                     'start_periodic_dhcp_agent_status_check')
-        dhcp_periodic_p.start()
-        self._plugin = plugin.NsxPlugin()
-        # Mock neutron manager plugin load functions to speed up tests
-        mock_nm_get_plugin = mock.patch('neutron.manager.NeutronManager.'
-                                        'get_plugin')
-        mock_nm_get_service_plugins = mock.patch(
-            'neutron.manager.NeutronManager.get_service_plugins')
-        self.mock_nm_get_plugin = mock_nm_get_plugin.start()
-        self.mock_nm_get_plugin.return_value = self._plugin
-        mock_nm_get_service_plugins.start()
-        super(SyncTestCase, self).setUp()
-        self.addCleanup(self.fc.reset_all)
-
-    @contextlib.contextmanager
-    def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2):
-
-        def network(idx):
-            return {'network': {'name': 'net-%s' % idx,
-                                'admin_state_up': True,
-                                'shared': False,
-                                'port_security_enabled': True,
-                                'tenant_id': 'foo'}}
-
-        def subnet(idx, net_id):
-            return {'subnet':
-                    {'cidr': '10.10.%s.0/24' % idx,
-                     'name': 'sub-%s' % idx,
-                     'gateway_ip': attr.ATTR_NOT_SPECIFIED,
-                     'allocation_pools': attr.ATTR_NOT_SPECIFIED,
-                     'ip_version': 4,
-                     'dns_nameservers': attr.ATTR_NOT_SPECIFIED,
-                     'host_routes': attr.ATTR_NOT_SPECIFIED,
-                     'enable_dhcp': True,
-                     'network_id': net_id,
-                     'tenant_id': 'foo'}}
-
-        def port(idx, net_id):
-            return {'port': {'network_id': net_id,
-                             'name': 'port-%s' % idx,
-                             'admin_state_up': True,
-                             'device_id': 'miao',
-                             'device_owner': 'bau',
-                             'fixed_ips': attr.ATTR_NOT_SPECIFIED,
-                             'mac_address': attr.ATTR_NOT_SPECIFIED,
-                             'tenant_id': 'foo'}}
-
-        def router(idx):
-            # Use random uuids as names
-            return {'router': {'name': 'rtr-%s' % idx,
-                               'admin_state_up': True,
-                               'tenant_id': 'foo'}}
-
-        networks = []
-        ports = []
-        routers = []
-        for i in range(net_size):
-            net = self._plugin.create_network(ctx, network(i))
-            networks.append(net)
-            self._plugin.create_subnet(ctx, subnet(i, net['id']))
-            for j in range(port_size):
-                ports.append(self._plugin.create_port(
-                    ctx, port("%s-%s" % (i, j), net['id'])))
-        for i in range(router_size):
-            routers.append(self._plugin.create_router(ctx, router(i)))
-        # Do not return anything as the user does need the actual
-        # data created
-        yield
-
-        # Remove everything
-        for router in routers:
-            self._plugin.delete_router(ctx, router['id'])
-        for port in ports:
-            self._plugin.delete_port(ctx, port['id'])
-        # This will remove networks and subnets
-        for network in networks:
-            self._plugin.delete_network(ctx, network['id'])
-
-    def _get_tag_dict(self, tags):
-        return dict((tag['scope'], tag['tag']) for tag in tags)
-
-    def _test_sync(self, exp_net_status,
-                   exp_port_status, exp_router_status,
-                   action_callback=None, sp=None):
-        ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
-        neutron_net_id = self._get_tag_dict(
-            self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
-        lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
-        neutron_port_id = self._get_tag_dict(
-            self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
-        lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
-        neutron_rtr_id = self._get_tag_dict(
-            self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
-        if action_callback:
-            action_callback(ls_uuid, lp_uuid, lr_uuid)
-        # Make chunk big enough to read everything
-        if not sp:
-            sp = sync.SyncParameters(100)
-        self._plugin._synchronizer._synchronize_state(sp)
-        # Verify element is in expected status
-        # TODO(salv-orlando): Verify status for all elements
-        ctx = context.get_admin_context()
-        neutron_net = self._plugin.get_network(ctx, neutron_net_id)
-        neutron_port = self._plugin.get_port(ctx, neutron_port_id)
-        neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id)
-        self.assertEqual(exp_net_status, neutron_net['status'])
-        self.assertEqual(exp_port_status, neutron_port['status'])
-        self.assertEqual(exp_router_status, neutron_rtr['status'])
-
-    def _action_callback_status_down(self, ls_uuid, lp_uuid, lr_uuid):
-        self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
-        self.fc._fake_lswitch_lport_dict[lp_uuid]['status'] = 'false'
-        self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
-
-    def test_initial_sync(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            self._test_sync(
-                constants.NET_STATUS_ACTIVE,
-                constants.PORT_STATUS_ACTIVE,
-                constants.NET_STATUS_ACTIVE)
-
-    def test_initial_sync_with_resources_down(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            self._test_sync(
-                constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
-                constants.NET_STATUS_DOWN, self._action_callback_status_down)
-
-    def test_resync_with_resources_down(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            sp = sync.SyncParameters(100)
-            self._plugin._synchronizer._synchronize_state(sp)
-            # Ensure the synchronizer performs a resync
-            sp.init_sync_performed = True
-            self._test_sync(
-                constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
-                constants.NET_STATUS_DOWN, self._action_callback_status_down,
-                sp=sp)
-
-    def _action_callback_del_resource(self, ls_uuid, lp_uuid, lr_uuid):
-        del self.fc._fake_lswitch_dict[ls_uuid]
-        del self.fc._fake_lswitch_lport_dict[lp_uuid]
-        del self.fc._fake_lrouter_dict[lr_uuid]
-
-    def test_initial_sync_with_resources_removed(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            self._test_sync(
-                constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
-                constants.NET_STATUS_ERROR, self._action_callback_del_resource)
-
-    def test_resync_with_resources_removed(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            sp = sync.SyncParameters(100)
-            self._plugin._synchronizer._synchronize_state(sp)
-            # Ensure the synchronizer performs a resync
-            sp.init_sync_performed = True
-            self._test_sync(
-                constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
-                constants.NET_STATUS_ERROR, self._action_callback_del_resource,
-                sp=sp)
-
-    def _test_sync_with_chunk_larger_maxpagesize(
-        self, net_size, port_size, router_size, chunk_size, exp_calls):
-        ctx = context.get_admin_context()
-        real_func = nsxlib.get_single_query_page
-        sp = sync.SyncParameters(chunk_size)
-        with self._populate_data(ctx, net_size=net_size,
-                                 port_size=port_size,
-                                 router_size=router_size):
-            with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15):
-                # The following mock is just for counting calls,
-                # but we will still run the actual function
-                with mock.patch.object(
-                    nsxlib, 'get_single_query_page',
-                    side_effect=real_func) as mock_get_page:
-                    self._test_sync(
-                        constants.NET_STATUS_ACTIVE,
-                        constants.PORT_STATUS_ACTIVE,
-                        constants.NET_STATUS_ACTIVE,
-                        sp=sp)
-            # As each resource type does not exceed the maximum page size,
-            # the method should be called once for each resource type
-            self.assertEqual(exp_calls, mock_get_page.call_count)
-
-    def test_sync_chunk_larger_maxpagesize_no_multiple_requests(self):
-        # total resource size = 20
-        # total size for each resource does not exceed max page size (15)
-        self._test_sync_with_chunk_larger_maxpagesize(
-            net_size=5, port_size=2, router_size=5,
-            chunk_size=20, exp_calls=3)
-
-    def test_sync_chunk_larger_maxpagesize_triggers_multiple_requests(self):
-        # total resource size = 48
-        # total size for each resource does exceed max page size (15)
-        self._test_sync_with_chunk_larger_maxpagesize(
-            net_size=16, port_size=1, router_size=16,
-            chunk_size=48, exp_calls=6)
-
-    def test_sync_multi_chunk(self):
-        # The fake NSX API client cannot be used for this test
-        ctx = context.get_admin_context()
-        # Generate 4 networks, 1 port per network, and 4 routers
-        with self._populate_data(ctx, net_size=4, port_size=1, router_size=4):
-            fake_lswitches = jsonutils.loads(
-                self.fc.handle_get('/ws.v1/lswitch'))['results']
-            fake_lrouters = jsonutils.loads(
-                self.fc.handle_get('/ws.v1/lrouter'))['results']
-            fake_lswitchports = jsonutils.loads(
-                self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results']
-            return_values = [
-                # Chunk 0 - lswitches
-                (fake_lswitches, None, 4),
-                # Chunk 0 - lrouters
-                (fake_lrouters[:2], 'xxx', 4),
-                # Chunk 0 - lports (size only)
-                ([], 'start', 4),
-                # Chunk 1 - lrouters (2 more) (lswitches are skipped)
-                (fake_lrouters[2:], None, None),
-                # Chunk 1 - lports
-                (fake_lswitchports, None, 4)]
-
-            def fake_fetch_data(*args, **kwargs):
-                return return_values.pop(0)
-
-            # 2 Chunks, with 6 resources each.
-            # 1st chunk lswitches and lrouters
-            # 2nd chunk lrouters and lports
-            # Mock _fetch_data
-            with mock.patch.object(
-                self._plugin._synchronizer, '_fetch_data',
-                side_effect=fake_fetch_data):
-                sp = sync.SyncParameters(6)
-
-                def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor):
-                    self._plugin._synchronizer._synchronize_state(sp)
-                    self.assertEqual(chunk_idx, sp.current_chunk)
-                    self.assertEqual(ls_cursor, sp.ls_cursor)
-                    self.assertEqual(lr_cursor, sp.lr_cursor)
-                    self.assertEqual(lp_cursor, sp.lp_cursor)
-
-                # check 1st chunk
-                do_chunk(1, None, 'xxx', 'start')
-                # check 2nd chunk
-                do_chunk(0, None, None, None)
-                # Chunk size should have stayed the same
-                self.assertEqual(sp.chunk_size, 6)
-
-    def test_synchronize_network(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a network down to verify synchronization
-            ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
-            q_net_id = self._get_tag_dict(
-                self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
-            self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
-            q_net_data = self._plugin._get_network(ctx, q_net_id)
-            self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
-            # Reload from db
-            q_nets = self._plugin.get_networks(ctx)
-            for q_net in q_nets:
-                if q_net['id'] == q_net_id:
-                    exp_status = constants.NET_STATUS_DOWN
-                else:
-                    exp_status = constants.NET_STATUS_ACTIVE
-                self.assertEqual(exp_status, q_net['status'])
-
-    def test_synchronize_network_not_found_in_db_no_raise(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a network down to verify synchronization
-            ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
-            q_net_id = self._get_tag_dict(
-                self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
-            self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
-            q_net_data = self._plugin._get_network(ctx, q_net_id)
-            with mock.patch.object(self._plugin,
-                                   '_get_network') as _get_network:
-                _get_network.side_effect = n_exc.NetworkNotFound(
-                    net_id=q_net_data['id'])
-                self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
-
-    def test_synchronize_network_on_get(self):
-        cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a network down to verify punctual synchronization
-            ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
-            q_net_id = self._get_tag_dict(
-                self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
-            self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
-            q_net_data = self._plugin.get_network(ctx, q_net_id)
-            self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])
-
-    def test_synchronize_port_not_found_in_db_no_raise(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a port down to verify synchronization
-            lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
-            lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
-            q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
-            lport['status'] = 'true'
-            q_port_data = self._plugin._get_port(ctx, q_port_id)
-            with mock.patch.object(self._plugin,
-                                   '_get_port') as _get_port:
-                _get_port.side_effect = n_exc.PortNotFound(
-                    port_id=q_port_data['id'])
-                self._plugin._synchronizer.synchronize_port(ctx, q_port_data)
-
-    def test_synchronize_port(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a port down to verify synchronization
-            lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
-            lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
-            q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
-            lport['status'] = 'true'
-            q_port_data = self._plugin._get_port(ctx, q_port_id)
-            self._plugin._synchronizer.synchronize_port(ctx, q_port_data)
-            # Reload from db
-            q_ports = self._plugin.get_ports(ctx)
-            for q_port in q_ports:
-                if q_port['id'] == q_port_id:
-                    exp_status = constants.PORT_STATUS_ACTIVE
-                else:
-                    exp_status = constants.PORT_STATUS_DOWN
-                self.assertEqual(exp_status, q_port['status'])
-
-    def test_synchronize_port_on_get(self):
-        cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a port down to verify punctual synchronization
-            lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
-            lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
-            q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
-            lport['status'] = 'false'
-            q_port_data = self._plugin.get_port(ctx, q_port_id)
-            self.assertEqual(constants.PORT_STATUS_DOWN,
-                             q_port_data['status'])
-
-    def test_synchronize_routernot_found_in_db_no_raise(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a router down to verify synchronization
-            lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
-            q_rtr_id = self._get_tag_dict(
-                self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
-            self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
-            q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
-            with mock.patch.object(self._plugin,
-                                   '_get_router') as _get_router:
-                _get_router.side_effect = l3.RouterNotFound(
-                    router_id=q_rtr_data['id'])
-                self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
-
-    def test_synchronize_router(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a router down to verify synchronization
-            lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
-            q_rtr_id = self._get_tag_dict(
-                self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
-            self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
-            q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
-            self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
-            # Reload from db
-            q_routers = self._plugin.get_routers(ctx)
-            for q_rtr in q_routers:
-                if q_rtr['id'] == q_rtr_id:
-                    exp_status = constants.NET_STATUS_DOWN
-                else:
-                    exp_status = constants.NET_STATUS_ACTIVE
-                self.assertEqual(exp_status, q_rtr['status'])
-
-    def test_synchronize_router_nsx_mapping_not_found(self):
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a router down to verify synchronization
-            lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
-            q_rtr_id = self._get_tag_dict(
-                self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
-            self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
-            q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
-
-            # delete router mapping from db.
-            db.delete_neutron_nsx_router_mapping(ctx.session, q_rtr_id)
-            # pop router from fake nsx client
-            router_data = self.fc._fake_lrouter_dict.pop(lr_uuid)
-
-            self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
-            # Reload from db
-            q_routers = self._plugin.get_routers(ctx)
-            for q_rtr in q_routers:
-                if q_rtr['id'] == q_rtr_id:
-                    exp_status = constants.NET_STATUS_ERROR
-                else:
-                    exp_status = constants.NET_STATUS_ACTIVE
-                self.assertEqual(exp_status, q_rtr['status'])
-            # put the router database since we don't handle missing
-            # router data in the fake nsx api_client
-            self.fc._fake_lrouter_dict[lr_uuid] = router_data
-
-    def test_synchronize_router_on_get(self):
-        cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
-        ctx = context.get_admin_context()
-        with self._populate_data(ctx):
-            # Put a router down to verify punctual synchronization
-            lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
-            q_rtr_id = self._get_tag_dict(
-                self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
-            self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
-            q_rtr_data = self._plugin.get_router(ctx, q_rtr_id)
-            self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status'])
-
-    def test_sync_nsx_failure_backoff(self):
-        self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout
-        # chunk size won't matter here
-        sp = sync.SyncParameters(999)
-        for i in range(10):
-            self.assertEqual(
-                min(64, 2 ** i),
-                self._plugin._synchronizer._synchronize_state(sp))
diff --git a/neutron/tests/unit/vmware/test_nsx_utils.py b/neutron/tests/unit/vmware/test_nsx_utils.py
deleted file mode 100644 (file)
index b75bb23..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-# Copyright (c) 2013 VMware.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from neutron.db import api as db_api
-from neutron.extensions import multiprovidernet as mpnet
-from neutron.extensions import providernet as pnet
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.dbexts import nsx_models
-from neutron.plugins.vmware import nsxlib
-from neutron.tests import base
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.nsxlib import base as nsx_base
-
-
-class NsxUtilsTestCase(base.BaseTestCase):
-
-    def _mock_port_mapping_db_calls(self, ret_value):
-        # Mock relevant db calls
-        # This will allow for avoiding setting up the plugin
-        # for creating db entries
-        mock.patch(vmware.nsx_method('get_nsx_switch_and_port_id',
-                                     module_name='dbexts.db'),
-                   return_value=ret_value).start()
-        mock.patch(vmware.nsx_method('add_neutron_nsx_port_mapping',
-                                     module_name='dbexts.db')).start()
-        mock.patch(vmware.nsx_method('delete_neutron_nsx_port_mapping',
-                                     module_name='dbexts.db')).start()
-
-    def _mock_network_mapping_db_calls(self, ret_value):
-        # Mock relevant db calls
-        # This will allow for avoiding setting up the plugin
-        # for creating db entries
-        mock.patch(vmware.nsx_method('get_nsx_switch_ids',
-                                     module_name='dbexts.db'),
-                   return_value=ret_value).start()
-        mock.patch(vmware.nsx_method('add_neutron_nsx_network_mapping',
-                                     module_name='dbexts.db')).start()
-
-    def _mock_router_mapping_db_calls(self, ret_value):
-        # Mock relevant db calls
-        # This will allow for avoiding setting up the plugin
-        # for creating db entries
-        mock.patch(vmware.nsx_method('get_nsx_router_id',
-                                     module_name='dbexts.db'),
-                   return_value=ret_value).start()
-        mock.patch(vmware.nsx_method('add_neutron_nsx_router_mapping',
-                                     module_name='dbexts.db')).start()
-
-    def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
-        # The nsxlib and db calls are mocked, therefore the cluster
-        # and the neutron_port_id parameters can be set to None
-        ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
-            db_api.get_session(), None, None)
-        self.assertEqual(exp_ls_uuid, ls_uuid)
-        self.assertEqual(exp_lp_uuid, lp_uuid)
-
-    def _verify_get_nsx_switch_ids(self, exp_ls_uuids):
-        # The nsxlib and db calls are mocked, therefore the cluster
-        # and the neutron_router_id parameters can be set to None
-        ls_uuids = nsx_utils.get_nsx_switch_ids(
-            db_api.get_session(), None, None)
-        for ls_uuid in ls_uuids or []:
-            self.assertIn(ls_uuid, exp_ls_uuids)
-            exp_ls_uuids.remove(ls_uuid)
-        self.assertFalse(exp_ls_uuids)
-
-    def _verify_get_nsx_router_id(self, exp_lr_uuid):
-        # The nsxlib and db calls are  mocked, therefore the cluster
-        # and the neutron_router_id parameters can be set to None
-        lr_uuid = nsx_utils.get_nsx_router_id(db_api.get_session(), None, None)
-        self.assertEqual(exp_lr_uuid, lr_uuid)
-
-    def test_get_nsx_switch_and_port_id_from_db_mappings(self):
-        # This test is representative of the 'standard' case in which both the
-        # switch and the port mappings were stored in the neutron db
-        exp_ls_uuid = uuidutils.generate_uuid()
-        exp_lp_uuid = uuidutils.generate_uuid()
-        ret_value = exp_ls_uuid, exp_lp_uuid
-        self._mock_port_mapping_db_calls(ret_value)
-        self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
-
-    def test_get_nsx_switch_and_port_id_only_port_db_mapping(self):
-        # This test is representative of the case in which a port with a nsx
-        # db mapping in the havana db was upgraded to icehouse
-        exp_ls_uuid = uuidutils.generate_uuid()
-        exp_lp_uuid = uuidutils.generate_uuid()
-        ret_value = None, exp_lp_uuid
-        self._mock_port_mapping_db_calls(ret_value)
-        with mock.patch(vmware.nsx_method('query_lswitch_lports',
-                                          module_name='nsxlib.switch'),
-                        return_value=[{'uuid': exp_lp_uuid,
-                                       '_relations': {
-                                           'LogicalSwitchConfig': {
-                                               'uuid': exp_ls_uuid}
-                                       }}]):
-            self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
-
-    def test_get_nsx_switch_and_port_id_no_db_mapping(self):
-        # This test is representative of the case where db mappings where not
-        # found for a given port identifier
-        exp_ls_uuid = uuidutils.generate_uuid()
-        exp_lp_uuid = uuidutils.generate_uuid()
-        ret_value = None, None
-        self._mock_port_mapping_db_calls(ret_value)
-        with mock.patch(vmware.nsx_method('query_lswitch_lports',
-                                          module_name='nsxlib.switch'),
-                        return_value=[{'uuid': exp_lp_uuid,
-                                       '_relations': {
-                                           'LogicalSwitchConfig': {
-                                               'uuid': exp_ls_uuid}
-                                       }}]):
-            self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
-
-    def test_get_nsx_switch_and_port_id_no_mappings_returns_none(self):
-        # This test verifies that the function return (None, None) if the
-        # mappings are not found both in the db and the backend
-        ret_value = None, None
-        self._mock_port_mapping_db_calls(ret_value)
-        with mock.patch(vmware.nsx_method('query_lswitch_lports',
-                                          module_name='nsxlib.switch'),
-                        return_value=[]):
-            self._verify_get_nsx_switch_and_port_id(None, None)
-
-    def test_get_nsx_switch_ids_from_db_mappings(self):
-        # This test is representative of the 'standard' case in which the
-        # lswitch mappings were stored in the neutron db
-        exp_ls_uuids = [uuidutils.generate_uuid()]
-        self._mock_network_mapping_db_calls(exp_ls_uuids)
-        self._verify_get_nsx_switch_ids(exp_ls_uuids)
-
-    def test_get_nsx_switch_ids_no_db_mapping(self):
-        # This test is representative of the case where db mappings where not
-        # found for a given network identifier
-        exp_ls_uuids = [uuidutils.generate_uuid()]
-        self._mock_network_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('get_lswitches',
-                                          module_name='nsxlib.switch'),
-                        return_value=[{'uuid': uuid}
-                                      for uuid in exp_ls_uuids]):
-            self._verify_get_nsx_switch_ids(exp_ls_uuids)
-
-    def test_get_nsx_switch_ids_no_mapping_returns_None(self):
-        # This test verifies that the function returns None if the mappings
-        # are not found both in the db and in the backend
-        self._mock_network_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('get_lswitches',
-                                          module_name='nsxlib.switch'),
-                        return_value=[]):
-            self._verify_get_nsx_switch_ids(None)
-
-    def test_get_nsx_router_id_from_db_mappings(self):
-        # This test is representative of the 'standard' case in which the
-        # router mapping was stored in the neutron db
-        exp_lr_uuid = uuidutils.generate_uuid()
-        self._mock_router_mapping_db_calls(exp_lr_uuid)
-        self._verify_get_nsx_router_id(exp_lr_uuid)
-
-    def test_get_nsx_router_id_no_db_mapping(self):
-        # This test is representative of the case where db mappings where not
-        # found for a given port identifier
-        exp_lr_uuid = uuidutils.generate_uuid()
-        self._mock_router_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('query_lrouters',
-                                          module_name='nsxlib.router'),
-                        return_value=[{'uuid': exp_lr_uuid}]):
-            self._verify_get_nsx_router_id(exp_lr_uuid)
-
-    def test_get_nsx_router_id_no_mapping_returns_None(self):
-        # This test verifies that the function returns None if the mapping
-        # are not found both in the db and in the backend
-        self._mock_router_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('query_lrouters',
-                                          module_name='nsxlib.router'),
-                        return_value=[]):
-            self._verify_get_nsx_router_id(None)
-
-    def test_check_and_truncate_name_with_none(self):
-        name = None
-        result = utils.check_and_truncate(name)
-        self.assertEqual('', result)
-
-    def test_check_and_truncate_name_with_short_name(self):
-        name = 'foo_port_name'
-        result = utils.check_and_truncate(name)
-        self.assertEqual(name, result)
-
-    def test_check_and_truncate_name_long_name(self):
-        name = 'this_is_a_port_whose_name_is_longer_than_40_chars'
-        result = utils.check_and_truncate(name)
-        self.assertEqual(len(result), utils.MAX_DISPLAY_NAME_LEN)
-
-    def test_build_uri_path_plain(self):
-        result = nsxlib._build_uri_path('RESOURCE')
-        self.assertEqual("%s/%s" % (nsxlib.URI_PREFIX, 'RESOURCE'), result)
-
-    def test_build_uri_path_with_field(self):
-        result = nsxlib._build_uri_path('RESOURCE', fields='uuid')
-        expected = "%s/%s?fields=uuid" % (nsxlib.URI_PREFIX, 'RESOURCE')
-        self.assertEqual(expected, result)
-
-    def test_build_uri_path_with_filters(self):
-        filters = {"tag": 'foo', "tag_scope": "scope_foo"}
-        result = nsxlib._build_uri_path('RESOURCE', filters=filters)
-        expected = (
-            "%s/%s?tag=foo&tag_scope=scope_foo" %
-            (nsxlib.URI_PREFIX, 'RESOURCE'))
-        self.assertEqual(expected, result)
-
-    def test_build_uri_path_with_resource_id(self):
-        res = 'RESOURCE'
-        res_id = 'resource_id'
-        result = nsxlib._build_uri_path(res, resource_id=res_id)
-        expected = "%s/%s/%s" % (nsxlib.URI_PREFIX, res, res_id)
-        self.assertEqual(expected, result)
-
-    def test_build_uri_path_with_parent_and_resource_id(self):
-        parent_res = 'RESOURCE_PARENT'
-        child_res = 'RESOURCE_CHILD'
-        res = '%s/%s' % (child_res, parent_res)
-        par_id = 'parent_resource_id'
-        res_id = 'resource_id'
-        result = nsxlib._build_uri_path(
-            res, parent_resource_id=par_id, resource_id=res_id)
-        expected = ("%s/%s/%s/%s/%s" %
-                    (nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id))
-        self.assertEqual(expected, result)
-
-    def test_build_uri_path_with_attachment(self):
-        parent_res = 'RESOURCE_PARENT'
-        child_res = 'RESOURCE_CHILD'
-        res = '%s/%s' % (child_res, parent_res)
-        par_id = 'parent_resource_id'
-        res_id = 'resource_id'
-        result = nsxlib._build_uri_path(res, parent_resource_id=par_id,
-                                        resource_id=res_id, is_attachment=True)
-        expected = ("%s/%s/%s/%s/%s/%s" %
-                    (nsxlib.URI_PREFIX, parent_res,
-                     par_id, child_res, res_id, 'attachment'))
-        self.assertEqual(expected, result)
-
-    def test_build_uri_path_with_extra_action(self):
-        parent_res = 'RESOURCE_PARENT'
-        child_res = 'RESOURCE_CHILD'
-        res = '%s/%s' % (child_res, parent_res)
-        par_id = 'parent_resource_id'
-        res_id = 'resource_id'
-        result = nsxlib._build_uri_path(res, parent_resource_id=par_id,
-                                        resource_id=res_id, extra_action='doh')
-        expected = ("%s/%s/%s/%s/%s/%s" %
-                    (nsxlib.URI_PREFIX, parent_res,
-                     par_id, child_res, res_id, 'doh'))
-        self.assertEqual(expected, result)
-
-    def _mock_sec_group_mapping_db_calls(self, ret_value):
-        mock.patch(vmware.nsx_method('get_nsx_security_group_id',
-                                     module_name='dbexts.db'),
-                   return_value=ret_value).start()
-        mock.patch(vmware.nsx_method('add_neutron_nsx_security_group_mapping',
-                                     module_name='dbexts.db')).start()
-
-    def _verify_get_nsx_sec_profile_id(self, exp_sec_prof_uuid):
-        # The nsxlib and db calls are  mocked, therefore the cluster
-        # and the neutron_id parameters can be set to None
-        sec_prof_uuid = nsx_utils.get_nsx_security_group_id(
-            db_api.get_session(), None, None)
-        self.assertEqual(exp_sec_prof_uuid, sec_prof_uuid)
-
-    def test_get_nsx_sec_profile_id_from_db_mappings(self):
-        # This test is representative of the 'standard' case in which the
-        # security group mapping was stored in the neutron db
-        exp_sec_prof_uuid = uuidutils.generate_uuid()
-        self._mock_sec_group_mapping_db_calls(exp_sec_prof_uuid)
-        self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid)
-
-    def test_get_nsx_sec_profile_id_no_db_mapping(self):
-        # This test is representative of the case where db mappings where not
-        # found for a given security profile identifier
-        exp_sec_prof_uuid = uuidutils.generate_uuid()
-        self._mock_sec_group_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('query_security_profiles',
-                                          module_name='nsxlib.secgroup'),
-                        return_value=[{'uuid': exp_sec_prof_uuid}]):
-            self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid)
-
-    def test_get_nsx_sec_profile_id_no_mapping_returns_None(self):
-        # This test verifies that the function returns None if the mapping
-        # are not found both in the db and in the backend
-        self._mock_sec_group_mapping_db_calls(None)
-        with mock.patch(vmware.nsx_method('query_security_profiles',
-                                          module_name='nsxlib.secgroup'),
-                        return_value=[]):
-            self._verify_get_nsx_sec_profile_id(None)
-
-    def test_convert_to_nsx_transport_zones_no_multiprovider(self):
-        test_net = {'id': 'whatever'}
-        results = nsx_utils.convert_to_nsx_transport_zones(
-            'meh_zone_uuid', test_net,
-            default_transport_type='meh_transport_type')
-        self.assertEqual(1, len(results))
-        result = results[0]
-        self.assertEqual('meh_zone_uuid', result['zone_uuid'])
-        self.assertEqual('meh_transport_type', result['transport_type'])
-
-    def _verify_nsx_transport_zones(self, results):
-        self.assertEqual(2, len(results))
-        result_1 = results[0]
-        self.assertEqual(utils.NetworkTypes.BRIDGE,
-                         result_1['transport_type'])
-        self.assertEqual([{'transport': 66}],
-                         result_1['binding_config']['vlan_translation'])
-        self.assertEqual('whatever_tz_1', result_1['zone_uuid'])
-        result_2 = results[1]
-        self.assertEqual(utils.NetworkTypes.STT,
-                         result_2['transport_type'])
-        self.assertNotIn('binding_config', result_2)
-        self.assertEqual('whatever_tz_2', result_2['zone_uuid'])
-
-    def test_convert_to_nsx_transport_zones_with_bindings(self):
-        binding_1 = nsx_models.TzNetworkBinding(
-            'whatever',
-            utils.NetworkTypes.VLAN,
-            'whatever_tz_1',
-            66)
-        binding_2 = nsx_models.TzNetworkBinding(
-            'whatever',
-            utils.NetworkTypes.STT,
-            'whatever_tz_2',
-            None)
-        results = nsx_utils.convert_to_nsx_transport_zones(
-            'meh_zone_uuid', None, bindings=[binding_1, binding_2])
-        self._verify_nsx_transport_zones(results)
-
-    def test_convert_to_nsx_transport_zones_with_multiprovider(self):
-        segments = [
-            {pnet.NETWORK_TYPE: utils.NetworkTypes.VLAN,
-             pnet.PHYSICAL_NETWORK: 'whatever_tz_1',
-             pnet.SEGMENTATION_ID: 66},
-            {pnet.NETWORK_TYPE: utils.NetworkTypes.STT,
-             pnet.PHYSICAL_NETWORK: 'whatever_tz_2'},
-        ]
-        results = nsx_utils.convert_to_nsx_transport_zones(
-            'meh_zone_uuid', {'id': 'whatever_net', mpnet.SEGMENTS: segments})
-        self._verify_nsx_transport_zones(results)
-
-
-class ClusterManagementTestCase(nsx_base.NsxlibTestCase):
-
-    def test_cluster_in_readonly_mode(self):
-        with mock.patch.object(self.fake_cluster.api_client,
-                               'request',
-                               side_effect=api_exc.ReadOnlyMode):
-            self.assertRaises(nsx_exc.MaintenanceInProgress,
-                              nsxlib.do_request, cluster=self.fake_cluster)
-
-    def test_cluster_method_not_implemented(self):
-        self.assertRaises(api_exc.NsxApiException,
-                          nsxlib.do_request,
-                          nsxlib.HTTP_GET,
-                          nsxlib._build_uri_path('MY_FAKE_RESOURCE',
-                                                 resource_id='foo'),
-                          cluster=self.fake_cluster)
diff --git a/neutron/tests/unit/vmware/vshield/__init__.py b/neutron/tests/unit/vmware/vshield/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/neutron/tests/unit/vmware/vshield/fake_vcns.py b/neutron/tests/unit/vmware/vshield/fake_vcns.py
deleted file mode 100644 (file)
index db64e2f..0000000
+++ /dev/null
@@ -1,602 +0,0 @@
-# Copyright 2013 VMware, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from oslo_serialization import jsonutils
-
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.vshield.common import exceptions
-
-
-class FakeVcns(object):
-
-    errors = {
-        303: exceptions.ResourceRedirect,
-        400: exceptions.RequestBad,
-        403: exceptions.Forbidden,
-        404: exceptions.ResourceNotFound,
-        415: exceptions.MediaTypeUnsupport,
-        503: exceptions.ServiceUnavailable
-    }
-
-    def __init__(self, unique_router_name=True):
-        self._jobs = {}
-        self._job_idx = 0
-        self._edges = {}
-        self._edge_idx = 0
-        self._lswitches = {}
-        self._unique_router_name = unique_router_name
-        self._fake_nsx_api = None
-        self.fake_firewall_dict = {}
-        self.temp_firewall = {
-            "firewallRules": {
-                "firewallRules": []
-            }
-        }
-        self.fake_ipsecvpn_dict = {}
-        self.temp_ipsecvpn = {
-            'featureType': "ipsec_4.0",
-            'enabled': True,
-            'sites': {'sites': []}}
-        self._fake_virtualservers_dict = {}
-        self._fake_pools_dict = {}
-        self._fake_monitors_dict = {}
-        self._fake_app_profiles_dict = {}
-        self._fake_loadbalancer_config = {}
-
-    def set_fake_nsx_api(self, fake_nsx_api):
-        self._fake_nsx_api = fake_nsx_api
-
-    def _validate_edge_name(self, name):
-        for edge_id, edge in self._edges.iteritems():
-            if edge['name'] == name:
-                return False
-        return True
-
-    def deploy_edge(self, request):
-        if (self._unique_router_name and
-            not self._validate_edge_name(request['name'])):
-            header = {
-                'status': 400
-            }
-            msg = ('Edge name should be unique for tenant. Edge %s '
-                   'already exists for default tenant.') % request['name']
-            response = {
-                'details': msg,
-                'errorCode': 10085,
-                'rootCauseString': None,
-                'moduleName': 'vShield Edge',
-                'errorData': None
-            }
-            return (header, jsonutils.dumps(response))
-
-        self._job_idx = self._job_idx + 1
-        job_id = "jobdata-%d" % self._job_idx
-        self._edge_idx = self._edge_idx + 1
-        edge_id = "edge-%d" % self._edge_idx
-        self._jobs[job_id] = edge_id
-        self._edges[edge_id] = {
-            'name': request['name'],
-            'request': request,
-            'nat_rules': None,
-            'nat_rule_id': 0
-        }
-        header = {
-            'status': 200,
-            'location': 'https://host/api/4.0/jobs/%s' % job_id
-        }
-        response = ''
-        return (header, response)
-
-    def get_edge_id(self, job_id):
-        if job_id not in self._jobs:
-            raise Exception(_("Job %s does not nexist") % job_id)
-
-        header = {
-            'status': 200
-        }
-        response = {
-            'edgeId': self._jobs[job_id]
-        }
-        return (header, response)
-
-    def get_edge_deploy_status(self, edge_id):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        header = {
-            'status': 200,
-        }
-        response = {
-            'systemStatus': 'good'
-        }
-        return (header, response)
-
-    def delete_edge(self, edge_id):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        del self._edges[edge_id]
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def update_interface(self, edge_id, vnic):
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def get_nat_config(self, edge_id):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        edge = self._edges[edge_id]
-        rules = edge['nat_rules']
-        if rules is None:
-            rules = {
-                'rules': {
-                    'natRulesDtos': []
-                },
-                'version': 1
-            }
-        header = {
-            'status': 200
-        }
-        rules['version'] = 1
-        return (header, rules)
-
-    def update_nat_config(self, edge_id, nat):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        edge = self._edges[edge_id]
-        max_rule_id = edge['nat_rule_id']
-        rules = copy.deepcopy(nat)
-        for rule in rules['rules']['natRulesDtos']:
-            rule_id = rule.get('ruleId', 0)
-            if rule_id > max_rule_id:
-                max_rule_id = rule_id
-        for rule in rules['rules']['natRulesDtos']:
-            if 'ruleId' not in rule:
-                max_rule_id = max_rule_id + 1
-                rule['ruleId'] = max_rule_id
-        edge['nat_rules'] = rules
-        edge['nat_rule_id'] = max_rule_id
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def delete_nat_rule(self, edge_id, rule_id):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-
-        edge = self._edges[edge_id]
-        rules = edge['nat_rules']
-        rule_to_delete = None
-        for rule in rules['rules']['natRulesDtos']:
-            if rule_id == rule['ruleId']:
-                rule_to_delete = rule
-                break
-        if rule_to_delete is None:
-            raise Exception(_("Rule id %d doest not exist") % rule_id)
-
-        rules['rules']['natRulesDtos'].remove(rule_to_delete)
-
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def get_edge_status(self, edge_id):
-        if edge_id not in self._edges:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-
-        header = {
-            'status': 200
-        }
-        response = {
-            'edgeStatus': 'GREEN'
-        }
-        return (header, response)
-
-    def get_edges(self):
-        header = {
-            'status': 200
-        }
-        edges = []
-        for edge_id in self._edges:
-            edges.append({
-                'id': edge_id,
-                'edgeStatus': 'GREEN'
-            })
-        response = {
-            'edgePage': {
-                'data': edges
-            }
-        }
-        return (header, response)
-
-    def update_routes(self, edge_id, routes):
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def create_lswitch(self, lsconfig):
-        # The lswitch is created via VCNS API so the fake nsx_api will not
-        # see it. Added to fake nsx_api here.
-        if self._fake_nsx_api:
-            lswitch = \
-                self._fake_nsx_api._add_lswitch(jsonutils.dumps(lsconfig))
-        else:
-            lswitch = lsconfig
-            lswitch['uuid'] = uuidutils.generate_uuid()
-        self._lswitches[lswitch['uuid']] = lswitch
-        header = {
-            'status': 200
-        }
-        lswitch['_href'] = '/api/ws.v1/lswitch/%s' % lswitch['uuid']
-        return (header, lswitch)
-
-    def delete_lswitch(self, id):
-        if id not in self._lswitches:
-            raise Exception(_("Lswitch %s does not exist") % id)
-        del self._lswitches[id]
-        if self._fake_nsx_api:
-            # TODO(fank): fix the hack
-            del self._fake_nsx_api._fake_lswitch_dict[id]
-        header = {
-            'status': 200
-        }
-        response = ''
-        return (header, response)
-
-    def update_firewall(self, edge_id, fw_req):
-        self.fake_firewall_dict[edge_id] = fw_req
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        index = 10
-        for rule in rules:
-            rule['ruleId'] = index
-            index += 10
-        header = {'status': 204}
-        response = ""
-        return self.return_helper(header, response)
-
-    def delete_firewall(self, edge_id):
-        header = {'status': 404}
-        if edge_id in self.fake_firewall_dict:
-            header = {'status': 204}
-            del self.fake_firewall_dict[edge_id]
-        response = ""
-        return self.return_helper(header, response)
-
-    def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req):
-        if edge_id not in self.fake_firewall_dict:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        header = {'status': 404}
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        for rule in rules:
-            if rule['ruleId'] == int(vcns_rule_id):
-                header['status'] = 204
-                rule.update(fwr_req)
-                break
-        response = ""
-        return self.return_helper(header, response)
-
-    def delete_firewall_rule(self, edge_id, vcns_rule_id):
-        if edge_id not in self.fake_firewall_dict:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        header = {'status': 404}
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        for index in range(len(rules)):
-            if rules[index]['ruleId'] == int(vcns_rule_id):
-                header['status'] = 204
-                del rules[index]
-                break
-        response = ""
-        return self.return_helper(header, response)
-
-    def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req):
-        if edge_id not in self.fake_firewall_dict:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        header = {'status': 404}
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        pre = 0
-        for index in range(len(rules)):
-            if rules[index]['ruleId'] == int(ref_vcns_rule_id):
-                rules.insert(index, fwr_req)
-                rules[index]['ruleId'] = (int(ref_vcns_rule_id) + pre) / 2
-                header = {
-                    'status': 204,
-                    'location': "https://host/api/4.0/edges/edge_id/firewall"
-                                "/config/rules/%s" % rules[index]['ruleId']}
-                break
-            pre = int(rules[index]['ruleId'])
-        response = ""
-        return self.return_helper(header, response)
-
-    def add_firewall_rule(self, edge_id, fwr_req):
-        if edge_id not in self.fake_firewall_dict:
-            self.fake_firewall_dict[edge_id] = self.temp_firewall
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        rules.append(fwr_req)
-        index = len(rules)
-        rules[index - 1]['ruleId'] = index * 10
-        header = {
-            'status': 204,
-            'location': "https://host/api/4.0/edges/edge_id/firewall"
-                        "/config/rules/%s" % rules[index - 1]['ruleId']}
-        response = ""
-        return self.return_helper(header, response)
-
-    def get_firewall(self, edge_id):
-        if edge_id not in self.fake_firewall_dict:
-            self.fake_firewall_dict[edge_id] = self.temp_firewall
-        header = {'status': 204}
-        response = self.fake_firewall_dict[edge_id]
-        return self.return_helper(header, response)
-
-    def get_firewall_rule(self, edge_id, vcns_rule_id):
-        if edge_id not in self.fake_firewall_dict:
-            raise Exception(_("Edge %s does not exist") % edge_id)
-        header = {'status': 404}
-        response = ""
-        rules = self.fake_firewall_dict[edge_id][
-            'firewallRules']['firewallRules']
-        for rule in rules:
-            if rule['ruleId'] == int(vcns_rule_id):
-                header['status'] = 204
-                response = rule
-                break
-        return self.return_helper(header, response)
-
-    def is_name_unique(self, objs_dict, name):
-        return name not in [obj_dict['name']
-                            for obj_dict in objs_dict.values()]
-
-    def create_vip(self, edge_id, vip_new):
-        header = {'status': 403}
-        response = ""
-        if not self._fake_virtualservers_dict.get(edge_id):
-            self._fake_virtualservers_dict[edge_id] = {}
-        if not self.is_name_unique(self._fake_virtualservers_dict[edge_id],
-                                   vip_new['name']):
-            return self.return_helper(header, response)
-        vip_vseid = uuidutils.generate_uuid()
-        self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new
-        header = {
-            'status': 204,
-            'location': "https://host/api/4.0/edges/edge_id"
-                        "/loadbalancer/config/%s" % vip_vseid}
-        return self.return_helper(header, response)
-
-    def get_vip(self, edge_id, vip_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_virtualservers_dict.get(edge_id) or (
-            not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        response = self._fake_virtualservers_dict[edge_id][vip_vseid]
-        return self.return_helper(header, response)
-
-    def update_vip(self, edge_id, vip_vseid, vip_new):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_virtualservers_dict.get(edge_id) or (
-            not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        self._fake_virtualservers_dict[edge_id][vip_vseid].update(
-            vip_new)
-        return self.return_helper(header, response)
-
-    def delete_vip(self, edge_id, vip_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_virtualservers_dict.get(edge_id) or (
-            not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        del self._fake_virtualservers_dict[edge_id][vip_vseid]
-        return self.return_helper(header, response)
-
-    def create_pool(self, edge_id, pool_new):
-        header = {'status': 403}
-        response = ""
-        if not self._fake_pools_dict.get(edge_id):
-            self._fake_pools_dict[edge_id] = {}
-        if not self.is_name_unique(self._fake_pools_dict[edge_id],
-                                   pool_new['name']):
-            return self.return_helper(header, response)
-        pool_vseid = uuidutils.generate_uuid()
-        self._fake_pools_dict[edge_id][pool_vseid] = pool_new
-        header = {
-            'status': 204,
-            'location': "https://host/api/4.0/edges/edge_id"
-                        "/loadbalancer/config/%s" % pool_vseid}
-        return self.return_helper(header, response)
-
-    def get_pool(self, edge_id, pool_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_pools_dict.get(edge_id) or (
-            not self._fake_pools_dict[edge_id].get(pool_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        response = self._fake_pools_dict[edge_id][pool_vseid]
-        return self.return_helper(header, response)
-
-    def update_pool(self, edge_id, pool_vseid, pool_new):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_pools_dict.get(edge_id) or (
-            not self._fake_pools_dict[edge_id].get(pool_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        self._fake_pools_dict[edge_id][pool_vseid].update(
-            pool_new)
-        return self.return_helper(header, response)
-
-    def delete_pool(self, edge_id, pool_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_pools_dict.get(edge_id) or (
-            not self._fake_pools_dict[edge_id].get(pool_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        del self._fake_pools_dict[edge_id][pool_vseid]
-        return self.return_helper(header, response)
-
-    def create_health_monitor(self, edge_id, monitor_new):
-        if not self._fake_monitors_dict.get(edge_id):
-            self._fake_monitors_dict[edge_id] = {}
-        monitor_vseid = uuidutils.generate_uuid()
-        self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new
-        header = {
-            'status': 204,
-            'location': "https://host/api/4.0/edges/edge_id"
-                        "/loadbalancer/config/%s" % monitor_vseid}
-        response = ""
-        return self.return_helper(header, response)
-
-    def get_health_monitor(self, edge_id, monitor_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_monitors_dict.get(edge_id) or (
-            not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        response = self._fake_monitors_dict[edge_id][monitor_vseid]
-        return self.return_helper(header, response)
-
-    def update_health_monitor(self, edge_id, monitor_vseid, monitor_new):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_monitors_dict.get(edge_id) or (
-            not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        self._fake_monitors_dict[edge_id][monitor_vseid].update(
-            monitor_new)
-        return self.return_helper(header, response)
-
-    def delete_health_monitor(self, edge_id, monitor_vseid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_monitors_dict.get(edge_id) or (
-            not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        del self._fake_monitors_dict[edge_id][monitor_vseid]
-        return self.return_helper(header, response)
-
-    def create_app_profile(self, edge_id, app_profile):
-        if not self._fake_app_profiles_dict.get(edge_id):
-            self._fake_app_profiles_dict[edge_id] = {}
-        app_profileid = uuidutils.generate_uuid()
-        self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile
-        header = {
-            'status': 204,
-            'location': "https://host/api/4.0/edges/edge_id"
-                        "/loadbalancer/config/%s" % app_profileid}
-        response = ""
-        return self.return_helper(header, response)
-
-    def update_app_profile(self, edge_id, app_profileid, app_profile):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_app_profiles_dict.get(edge_id) or (
-            not self._fake_app_profiles_dict[edge_id].get(app_profileid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        self._fake_app_profiles_dict[edge_id][app_profileid].update(
-            app_profile)
-        return self.return_helper(header, response)
-
-    def delete_app_profile(self, edge_id, app_profileid):
-        header = {'status': 404}
-        response = ""
-        if not self._fake_app_profiles_dict.get(edge_id) or (
-            not self._fake_app_profiles_dict[edge_id].get(app_profileid)):
-            return self.return_helper(header, response)
-        header = {'status': 204}
-        del self._fake_app_profiles_dict[edge_id][app_profileid]
-        return self.return_helper(header, response)
-
-    def get_loadbalancer_config(self, edge_id):
-        header = {'status': 204}
-        response = {'config': False}
-        if self._fake_loadbalancer_config[edge_id]:
-            response['config'] = self._fake_loadbalancer_config[edge_id]
-        return self.return_helper(header, response)
-
-    def update_ipsec_config(self, edge_id, ipsec_config):
-        self.fake_ipsecvpn_dict[edge_id] = ipsec_config
-        header = {'status': 204}
-        response = ""
-        return self.return_helper(header, response)
-
-    def delete_ipsec_config(self, edge_id):
-        header = {'status': 404}
-        if edge_id in self.fake_ipsecvpn_dict:
-            header = {'status': 204}
-            del self.fake_ipsecvpn_dict[edge_id]
-        response = ""
-        return self.return_helper(header, response)
-
-    def get_ipsec_config(self, edge_id):
-        if edge_id not in self.fake_ipsecvpn_dict:
-            self.fake_ipsecvpn_dict[edge_id] = self.temp_ipsecvpn
-        header = {'status': 204}
-        response = self.fake_ipsecvpn_dict[edge_id]
-        return self.return_helper(header, response)
-
-    def enable_service_loadbalancer(self, edge_id, config):
-        header = {'status': 204}
-        response = ""
-        self._fake_loadbalancer_config[edge_id] = True
-        return self.return_helper(header, response)
-
-    def return_helper(self, header, response):
-        status = int(header['status'])
-        if 200 <= status <= 300:
-            return (header, response)
-        if status in self.errors:
-            cls = self.errors[status]
-        else:
-            cls = exceptions.VcnsApiException
-        raise cls(
-            status=status, header=header, uri='fake_url', response=response)
-
-    def reset_all(self):
-        self._jobs.clear()
-        self._edges.clear()
-        self._lswitches.clear()
-        self.fake_firewall_dict = {}
-        self._fake_virtualservers_dict = {}
-        self._fake_pools_dict = {}
-        self._fake_monitors_dict = {}
-        self._fake_app_profiles_dict = {}
-        self._fake_loadbalancer_config = {}
diff --git a/neutron/tests/unit/vmware/vshield/test_vcns_driver.py b/neutron/tests/unit/vmware/vshield/test_vcns_driver.py
deleted file mode 100644 (file)
index c045171..0000000
+++ /dev/null
@@ -1,587 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from eventlet import greenthread
-import mock
-
-from neutron.plugins.vmware.vshield.common import constants as vcns_const
-from neutron.plugins.vmware.vshield.tasks import constants as ts_const
-from neutron.plugins.vmware.vshield.tasks import tasks as ts
-from neutron.plugins.vmware.vshield import vcns_driver
-from neutron.tests import base
-from neutron.tests.unit import vmware
-from neutron.tests.unit.vmware.vshield import fake_vcns
-
-VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test")
-
-ts.TaskManager.set_default_interval(100)
-
-
-class VcnsDriverTaskManagerTestCase(base.BaseTestCase):
-
-    def setUp(self):
-        super(VcnsDriverTaskManagerTestCase, self).setUp()
-        self.manager = ts.TaskManager()
-        self.manager.start(100)
-
-    def tearDown(self):
-        self.manager.stop()
-        # Task manager should not leave running threads around
-        # if _thread is None it means it was killed in stop()
-        self.assertIsNone(self.manager._thread)
-        super(VcnsDriverTaskManagerTestCase, self).tearDown()
-
-    def _test_task_manager_task_process_state(self, sync_exec=False):
-        def _task_failed(task, reason):
-            task.userdata['result'] = False
-            task.userdata['error'] = reason
-
-        def _check_state(task, exp_state):
-            if not task.userdata.get('result', True):
-                return False
-
-            state = task.userdata['state']
-            if state != exp_state:
-                msg = "state %d expect %d" % (
-                    state, exp_state)
-                _task_failed(task, msg)
-                return False
-
-            task.userdata['state'] = state + 1
-            return True
-
-        def _exec(task):
-            if not _check_state(task, 1):
-                return ts_const.TaskStatus.ERROR
-
-            if task.userdata['sync_exec']:
-                return ts_const.TaskStatus.COMPLETED
-            else:
-                return ts_const.TaskStatus.PENDING
-
-        def _status(task):
-            if task.userdata['sync_exec']:
-                _task_failed(task, "_status callback triggered")
-
-            state = task.userdata['state']
-            if state == 3:
-                _check_state(task, 3)
-                return ts_const.TaskStatus.PENDING
-            else:
-                _check_state(task, 4)
-                return ts_const.TaskStatus.COMPLETED
-
-        def _result(task):
-            if task.userdata['sync_exec']:
-                exp_state = 3
-            else:
-                exp_state = 5
-
-            _check_state(task, exp_state)
-
-        def _start_monitor(task):
-            _check_state(task, 0)
-
-        def _executed_monitor(task):
-            _check_state(task, 2)
-
-        def _result_monitor(task):
-            if task.userdata['sync_exec']:
-                exp_state = 4
-            else:
-                exp_state = 6
-
-            if _check_state(task, exp_state):
-                task.userdata['result'] = True
-            else:
-                task.userdata['result'] = False
-
-        userdata = {
-            'state': 0,
-            'sync_exec': sync_exec
-        }
-        task = ts.Task('name', 'res', _exec, _status, _result, userdata)
-        task.add_start_monitor(_start_monitor)
-        task.add_executed_monitor(_executed_monitor)
-        task.add_result_monitor(_result_monitor)
-
-        self.manager.add(task)
-
-        task.wait(ts_const.TaskState.RESULT)
-
-        self.assertTrue(userdata['result'])
-
-    def test_task_manager_task_sync_exec_process_state(self):
-        self._test_task_manager_task_process_state(sync_exec=True)
-
-    def test_task_manager_task_async_exec_process_state(self):
-        self._test_task_manager_task_process_state(sync_exec=False)
-
-    def test_task_manager_task_ordered_process(self):
-        def _task_failed(task, reason):
-            task.userdata['result'] = False
-            task.userdata['error'] = reason
-
-        def _exec(task):
-            task.userdata['executed'] = True
-            return ts_const.TaskStatus.PENDING
-
-        def _status(task):
-            return ts_const.TaskStatus.COMPLETED
-
-        def _result(task):
-            next_task = task.userdata.get('next')
-            if next_task:
-                if next_task.userdata.get('executed'):
-                    _task_failed(next_task, "executed premature")
-            if task.userdata.get('result', True):
-                task.userdata['result'] = True
-
-        tasks = []
-        prev = None
-        last_task = None
-        for i in range(5):
-            name = "name-%d" % i
-            task = ts.Task(name, 'res', _exec, _status, _result, {})
-            tasks.append(task)
-            if prev:
-                prev.userdata['next'] = task
-            prev = task
-            last_task = task
-
-        for task in tasks:
-            self.manager.add(task)
-
-        last_task.wait(ts_const.TaskState.RESULT)
-
-        for task in tasks:
-            self.assertTrue(task.userdata['result'])
-
-    def test_task_manager_task_parallel_process(self):
-        tasks = []
-
-        def _exec(task):
-            task.userdata['executed'] = True
-            return ts_const.TaskStatus.PENDING
-
-        def _status(task):
-            for t in tasks:
-                if not t.userdata.get('executed'):
-                    t.userdata['resut'] = False
-            return ts_const.TaskStatus.COMPLETED
-
-        def _result(task):
-            if (task.userdata.get('result') is None and
-                task.status == ts_const.TaskStatus.COMPLETED):
-                task.userdata['result'] = True
-            else:
-                task.userdata['result'] = False
-
-        for i in range(5):
-            name = "name-%d" % i
-            res = 'resource-%d' % i
-            task = ts.Task(name, res, _exec, _status, _result, {})
-            tasks.append(task)
-            self.manager.add(task)
-
-        for task in tasks:
-            task.wait(ts_const.TaskState.RESULT)
-            self.assertTrue(task.userdata['result'])
-
-    def _test_task_manager_stop(self, exec_wait=False, result_wait=False,
-                                stop_wait=0):
-        def _exec(task):
-            if exec_wait:
-                greenthread.sleep(0.01)
-            return ts_const.TaskStatus.PENDING
-
-        def _status(task):
-            greenthread.sleep(0.01)
-            return ts_const.TaskStatus.PENDING
-
-        def _result(task):
-            if result_wait:
-                greenthread.sleep(0)
-            pass
-
-        manager = ts.TaskManager().start(100)
-        manager.stop()
-        # Task manager should not leave running threads around
-        # if _thread is None it means it was killed in stop()
-        self.assertIsNone(manager._thread)
-        manager.start(100)
-
-        alltasks = {}
-        for i in range(100):
-            res = 'res-%d' % i
-            tasks = []
-            for i in range(100):
-                task = ts.Task('name', res, _exec, _status, _result)
-                manager.add(task)
-                tasks.append(task)
-            alltasks[res] = tasks
-
-        greenthread.sleep(stop_wait)
-        manager.stop()
-        # Task manager should not leave running threads around
-        # if _thread is None it means it was killed in stop()
-        self.assertIsNone(manager._thread)
-
-        for res, tasks in alltasks.iteritems():
-            for task in tasks:
-                self.assertEqual(task.status, ts_const.TaskStatus.ABORT)
-
-    def test_task_manager_stop_1(self):
-        self._test_task_manager_stop(True, True, 0)
-
-    def test_task_manager_stop_2(self):
-        self._test_task_manager_stop(True, True, 1)
-
-    def test_task_manager_stop_3(self):
-        self._test_task_manager_stop(False, False, 0)
-
-    def test_task_manager_stop_4(self):
-        self._test_task_manager_stop(False, False, 1)
-
-    def test_task_pending_task(self):
-        def _exec(task):
-            task.userdata['executing'] = True
-            while not task.userdata['tested']:
-                greenthread.sleep(0)
-            task.userdata['executing'] = False
-            return ts_const.TaskStatus.COMPLETED
-
-        userdata = {
-            'executing': False,
-            'tested': False
-        }
-        manager = ts.TaskManager().start(100)
-        task = ts.Task('name', 'res', _exec, userdata=userdata)
-        manager.add(task)
-
-        while not userdata['executing']:
-            greenthread.sleep(0)
-        self.assertTrue(manager.has_pending_task())
-
-        userdata['tested'] = True
-        while userdata['executing']:
-            greenthread.sleep(0)
-        self.assertFalse(manager.has_pending_task())
-
-
-class VcnsDriverTestCase(base.BaseTestCase):
-
-    def vcns_patch(self):
-        instance = self.mock_vcns.start()
-        instance.return_value.deploy_edge.side_effect = self.fc.deploy_edge
-        instance.return_value.get_edge_id.side_effect = self.fc.get_edge_id
-        instance.return_value.get_edge_deploy_status.side_effect = (
-            self.fc.get_edge_deploy_status)
-        instance.return_value.delete_edge.side_effect = self.fc.delete_edge
-        instance.return_value.update_interface.side_effect = (
-            self.fc.update_interface)
-        instance.return_value.get_nat_config.side_effect = (
-            self.fc.get_nat_config)
-        instance.return_value.update_nat_config.side_effect = (
-            self.fc.update_nat_config)
-        instance.return_value.delete_nat_rule.side_effect = (
-            self.fc.delete_nat_rule)
-        instance.return_value.get_edge_status.side_effect = (
-            self.fc.get_edge_status)
-        instance.return_value.get_edges.side_effect = self.fc.get_edges
-        instance.return_value.update_routes.side_effect = (
-            self.fc.update_routes)
-        instance.return_value.create_lswitch.side_effect = (
-            self.fc.create_lswitch)
-        instance.return_value.delete_lswitch.side_effect = (
-            self.fc.delete_lswitch)
-
-    def setUp(self):
-        super(VcnsDriverTestCase, self).setUp()
-
-        self.config_parse(args=['--config-file', VCNS_CONFIG_FILE])
-
-        self.fc = fake_vcns.FakeVcns()
-        self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
-        self.vcns_patch()
-
-        self.addCleanup(self.fc.reset_all)
-
-        self.vcns_driver = vcns_driver.VcnsDriver(self)
-
-        self.edge_id = None
-        self.result = None
-
-    def tearDown(self):
-        self.vcns_driver.task_manager.stop()
-        # Task manager should not leave running threads around
-        # if _thread is None it means it was killed in stop()
-        self.assertIsNone(self.vcns_driver.task_manager._thread)
-        super(VcnsDriverTestCase, self).tearDown()
-
-    def _deploy_edge(self):
-        task = self.vcns_driver.deploy_edge(
-            'router-id', 'myedge', 'internal-network', {}, wait_for_exec=True)
-        self.assertEqual(self.edge_id, 'edge-1')
-        task.wait(ts_const.TaskState.RESULT)
-        return task
-
-    def edge_deploy_started(self, task):
-        self.edge_id = task.userdata['edge_id']
-
-    def edge_deploy_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['edge_deploy_result'] = True
-
-    def edge_delete_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['edge_delete_result'] = True
-
-    def snat_create_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['snat_create_result'] = True
-
-    def snat_delete_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['snat_delete_result'] = True
-
-    def dnat_create_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['dnat_create_result'] = True
-
-    def dnat_delete_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['dnat_delete_result'] = True
-
-    def nat_update_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['nat_update_result'] = True
-
-    def routes_update_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['routes_update_result'] = True
-
-    def interface_update_result(self, task):
-        if task.status == ts_const.TaskStatus.COMPLETED:
-            task.userdata['jobdata']['interface_update_result'] = True
-
-    def test_deploy_edge(self):
-        jobdata = {}
-        task = self.vcns_driver.deploy_edge(
-            'router-id', 'myedge', 'internal-network', jobdata=jobdata,
-            wait_for_exec=True)
-        self.assertEqual(self.edge_id, 'edge-1')
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertEqual(task.status, ts_const.TaskStatus.COMPLETED)
-        self.assertTrue(jobdata.get('edge_deploy_result'))
-
-    def test_deploy_edge_fail(self):
-        task1 = self.vcns_driver.deploy_edge(
-            'router-1', 'myedge', 'internal-network', {}, wait_for_exec=True)
-        task2 = self.vcns_driver.deploy_edge(
-            'router-2', 'myedge', 'internal-network', {}, wait_for_exec=True)
-        task1.wait(ts_const.TaskState.RESULT)
-        task2.wait(ts_const.TaskState.RESULT)
-        self.assertEqual(task2.status, ts_const.TaskStatus.ERROR)
-
-    def test_get_edge_status(self):
-        self._deploy_edge()
-        status = self.vcns_driver.get_edge_status(self.edge_id)
-        self.assertEqual(status, vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE)
-
-    def test_get_edges(self):
-        self._deploy_edge()
-        edges = self.vcns_driver.get_edges_statuses()
-        found = False
-        for edge_id, status in edges.iteritems():
-            if edge_id == self.edge_id:
-                found = True
-                break
-        self.assertTrue(found)
-
-    def _create_nat_rule(self, edge_id, action, org, translated):
-        jobdata = {}
-        if action == 'snat':
-            task = self.vcns_driver.create_snat_rule(
-                'router-id', edge_id, org, translated, jobdata=jobdata)
-            key = 'snat_create_result'
-        else:
-            task = self.vcns_driver.create_dnat_rule(
-                'router-id', edge_id, org, translated, jobdata=jobdata)
-            key = 'dnat_create_result'
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get(key))
-
-    def _delete_nat_rule(self, edge_id, action, addr):
-        jobdata = {}
-        if action == 'snat':
-            task = self.vcns_driver.delete_snat_rule(
-                'router-id', edge_id, addr, jobdata=jobdata)
-            key = 'snat_delete_result'
-        else:
-            task = self.vcns_driver.delete_dnat_rule(
-                'router-id', edge_id, addr, jobdata=jobdata)
-            key = 'dnat_delete_result'
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get(key))
-
-    def _test_create_nat_rule(self, action):
-        self._deploy_edge()
-        addr = '192.168.1.1'
-        translated = '10.0.0.1'
-        self._create_nat_rule(self.edge_id, action, addr, translated)
-
-        natcfg = self.vcns_driver.get_nat_config(self.edge_id)
-        for rule in natcfg['rules']['natRulesDtos']:
-            if (rule['originalAddress'] == addr and
-                rule['translatedAddress'] == translated and
-                rule['action'] == action):
-                break
-        else:
-            self.assertTrue(False)
-
-    def _test_delete_nat_rule(self, action):
-        self._deploy_edge()
-        addr = '192.168.1.1'
-        translated = '10.0.0.1'
-        self._create_nat_rule(self.edge_id, action, addr, translated)
-        if action == 'snat':
-            self._delete_nat_rule(self.edge_id, action, addr)
-        else:
-            self._delete_nat_rule(self.edge_id, action, translated)
-        natcfg = self.vcns_driver.get_nat_config(self.edge_id)
-        for rule in natcfg['rules']['natRulesDtos']:
-            if (rule['originalAddress'] == addr and
-                rule['translatedAddress'] == translated and
-                rule['action'] == action):
-                self.assertTrue(False)
-                break
-
-    def test_create_snat_rule(self):
-        self._test_create_nat_rule('snat')
-
-    def test_delete_snat_rule(self):
-        self._test_delete_nat_rule('snat')
-
-    def test_create_dnat_rule(self):
-        self._test_create_nat_rule('dnat')
-
-    def test_delete_dnat_rule(self):
-        self._test_delete_nat_rule('dnat')
-
-    def test_update_nat_rules(self):
-        self._deploy_edge()
-        jobdata = {}
-        snats = [{
-            'src': '192.168.1.0/24',
-            'translated': '10.0.0.1'
-        }, {
-            'src': '192.168.2.0/24',
-            'translated': '10.0.0.2'
-        }, {
-            'src': '192.168.3.0/24',
-            'translated': '10.0.0.3'
-        }
-        ]
-        dnats = [{
-            'dst': '100.0.0.4',
-            'translated': '192.168.1.1'
-        }, {
-            'dst': '100.0.0.5',
-            'translated': '192.168.2.1'
-        }
-        ]
-        task = self.vcns_driver.update_nat_rules(
-            'router-id', self.edge_id, snats, dnats, jobdata=jobdata)
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get('nat_update_result'))
-
-        natcfg = self.vcns_driver.get_nat_config(self.edge_id)
-        rules = natcfg['rules']['natRulesDtos']
-        self.assertEqual(len(rules), 2 * len(dnats) + len(snats))
-        self.natEquals(rules[0], dnats[0])
-        self.natEquals(rules[1], self.snat_for_dnat(dnats[0]))
-        self.natEquals(rules[2], dnats[1])
-        self.natEquals(rules[3], self.snat_for_dnat(dnats[1]))
-        self.natEquals(rules[4], snats[0])
-        self.natEquals(rules[5], snats[1])
-        self.natEquals(rules[6], snats[2])
-
-    def snat_for_dnat(self, dnat):
-        return {
-            'src': dnat['translated'],
-            'translated': dnat['dst']
-        }
-
-    def natEquals(self, rule, exp):
-        addr = exp.get('src')
-        if not addr:
-            addr = exp.get('dst')
-
-        self.assertEqual(rule['originalAddress'], addr)
-        self.assertEqual(rule['translatedAddress'], exp['translated'])
-
-    def test_update_routes(self):
-        self._deploy_edge()
-        jobdata = {}
-        routes = [{
-            'cidr': '192.168.1.0/24',
-            'nexthop': '169.254.2.1'
-        }, {
-            'cidr': '192.168.2.0/24',
-            'nexthop': '169.254.2.1'
-        }, {
-            'cidr': '192.168.3.0/24',
-            'nexthop': '169.254.2.1'
-        }
-        ]
-        task = self.vcns_driver.update_routes(
-            'router-id', self.edge_id, '10.0.0.1', routes, jobdata=jobdata)
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get('routes_update_result'))
-
-    def test_update_interface(self):
-        self._deploy_edge()
-        jobdata = {}
-        task = self.vcns_driver.update_interface(
-            'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX,
-            'network-id', address='100.0.0.3', netmask='255.255.255.0',
-            jobdata=jobdata)
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get('interface_update_result'))
-
-    def test_delete_edge(self):
-        self._deploy_edge()
-        jobdata = {}
-        task = self.vcns_driver.delete_edge(
-            'router-id', self.edge_id, jobdata=jobdata)
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get('edge_delete_result'))
-
-    def test_create_lswitch(self):
-        tz_config = [{
-            'transport_zone_uuid': 'tz-uuid'
-        }]
-        lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config)
-        self.assertEqual(lswitch['display_name'], 'lswitch')
-        self.assertEqual(lswitch['type'], 'LogicalSwitchConfig')
-        self.assertIn('uuid', lswitch)
-
-    def test_delete_lswitch(self):
-        tz_config = {
-            'transport_zone_uuid': 'tz-uuid'
-        }
-        lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config)
-        self.vcns_driver.delete_lswitch(lswitch['uuid'])
index ac846309ef05ddc2695043567956786b8a4939d9..74615a70ac6d91103744b2d6bf75e2f6a4d505b8 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -90,7 +90,6 @@ setup-hooks =
 [entry_points]
 console_scripts =
     neutron-cisco-cfg-agent = neutron.plugins.cisco.cfg_agent.cfg_agent:main
-    neutron-check-nsx-config = neutron.plugins.vmware.check_nsx_config:main
     neutron-db-manage = neutron.db.migration.cli:main
     neutron-debug = neutron.debug.shell:main
     neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main
@@ -103,7 +102,6 @@ console_scripts =
     neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main
     neutron-netns-cleanup = neutron.cmd.netns_cleanup:main
     neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main
-    neutron-nsx-manage = neutron.plugins.vmware.shell:main
     neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main
     neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main
     neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main
@@ -128,7 +126,7 @@ neutron.core_plugins =
     metaplugin = neutron.plugins.metaplugin.meta_neutron_plugin:MetaPluginV2
     oneconvergence = neutron.plugins.oneconvergence.plugin:OneConvergencePluginV2
     plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2
-    vmware = neutron.plugins.vmware.plugin:NsxPlugin
+    vmware = neutron.plugins.vmware.plugin:NsxMhPlugin
 neutron.service_plugins =
     dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin
     router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin