]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Update the Nicira NVP plugin to support the v2 Quantum API
authorAaron Rosen <arosen@nicira.com>
Mon, 6 Aug 2012 23:04:55 +0000 (16:04 -0700)
committerAaron Rosen <arosen@nicira.com>
Wed, 15 Aug 2012 06:02:00 +0000 (02:02 -0400)
blueprint: quantum-nvp-plugin-v2

Change-Id: I848ad7b7b99a24e19ea28e65b7d88261c21eac3a

30 files changed:
etc/quantum/plugins/nicira/nvp.ini
quantum/plugins/nicira/nicira_nvp_plugin/NvpApiClient.py
quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py
quantum/plugins/nicira/nicira_nvp_plugin/__init__.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/__init__.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/client.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/client_eventlet.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/common.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/request.py
quantum/plugins/nicira/nicira_nvp_plugin/api_client/request_eventlet.py
quantum/plugins/nicira/nicira_nvp_plugin/cli.py [deleted file]
quantum/plugins/nicira/nicira_nvp_plugin/common/__init__.py [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/common/config.py [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/nvp_plugin_version.py [moved from quantum/plugins/nicira/nicira_nvp_plugin/tests/test_check.py with 50% similarity]
quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py
quantum/plugins/nicira/nicira_nvp_plugin/run_tests.py [new file with mode: 0755]
quantum/plugins/nicira/nicira_nvp_plugin/tests/__init__.py
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport.json [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport_status.json [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lswitch.json [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_nvpapiclient.py [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lport.json [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lswitch.json [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/nvp.ini.test [new file with mode: 0644]
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_config.py [deleted file]
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_network.py [deleted file]
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_nvp_api_common.py
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_nvp_api_request.py
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_nvp_api_request_eventlet.py
quantum/plugins/nicira/nicira_nvp_plugin/tests/test_port.py [deleted file]

index f3e484fa19a9b2e7150fec652e784f35d0b938b0..bf4cdb328753bad5de028b0e2968a18b211bc5c7 100644 (file)
@@ -1,36 +1,59 @@
-# Example configuration:
-# [NVP]
-# DEFAULT_TZ_UUID = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
-# NVP_CONTROLLER_CONNECTIONS = NVP_CONN_1 NVP_CONN_2 NVP_CONN_3
-# NVP_CONN_1=10.0.1.2:443:admin:password:30:10:2:2
-# NVP_CONN_2=10.0.1.3:443:admin:password:30:10:2:2
-# NVP_CONN_3=10.0.1.4:443:admin:password:30:10:2:2
 [DEFAULT]
-# No default config for now.
+
+[DATABASE]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# sql_connection = mysql://root:quantum@127.0.0.1:3306/nvp_quantum
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main quantum server. (Leave it as is if the database runs on this host.)
+sql_connection = sqlite://
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# sql_max_retries = 10
+# Database reconnection interval in seconds - in event connectivity is lost
+reconnect_interval = 2
+
 [NVP]
-# This is the uuid of the default NVP Transport zone that will be used for
-# creating isolated "Quantum" networks.  The transport zone needs to be
-# created in NVP before starting Quantum with the plugin.
-DEFAULT_TZ_UUID = <insert default tz uuid>
-# This parameter is a space separated list of NVP_CONTROLLER_CONNECTIONS.
-NVP_CONTROLLER_CONNECTIONS = <space separated names of controller connections>
-# This parameter describes a connection to a single NVP controller.
+# The number of logical ports to create per bridged logical switch
+# max_lp_per_bridged_ls = 64
+# Time from when a connection pool is switched to another controller
+# during failure.
+# failover_time = 5
+# Number of connects to each controller node.
+# concurrent_connections = 3
+
+#[CLUSTER:example]
+# This is uuid of the default NVP Transport zone that will be used for
+# creating tunneled isolated "Quantum" networks.  It needs to be created in
+# NVP before starting Quantum with the nvp plugin.
+# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
+
+# Nova "zone" that maps to this NVP cluster.  This should map to the
+# node_availability_zone in your nova.conf for each nova cluster.  Each nova
+# cluster should have a unique node_availability_zone set.
+# nova_zone_id = zone1 # (Optional)
+
+# UUID of the cluster in NVP.  This can be retrieved from NVP management
+# console "admin" section.
+# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7 # (Optional)
+
+# This parameter describes a connection to a single NVP controller. Format:
+# <ip>:<port>:<user>:<pw>:<req_timeout>:<http_timeout>:<retries>:<redirects>
 # <ip> is the ip address of the controller
 # <port> is the port of the controller (default NVP port is 443)
 # <user> is the user name for this controller
-# <pass> is the user password.
-# <request_timeout>: The total time limit on all operations for a controller
+# <pw> is the user password.
+# <req_timeout>: The total time limit on all operations for a controller
 #   request (including retries, redirects from unresponsive controllers).
 #   Default is 30.
 # <http_timeout>: How long to wait before aborting an unresponsive controller
-#   (and allow for retries to another controller).
+#   (and allow for retries to another controller in the cluster).
 #   Default is 10.
 # <retries>: the maximum number of times to retry a particular request
 #   Default is 2.
 # <redirects>: the maximum number of times to follow a redirect response from a server.
 #   Default is 2.
-# There must be at least one NVP_CONTROLLER_CONNECTION per system.
-#
-# Here is an example:
-# NVP_CONTROLLER_CONNECTION_1=10.0.0.1:443:admin:password:30:10:2:2
-<connection name>=<ip>:<port>:<user>:<pass>:<api_call_timeout>:<http_timeout>:<retries>:<redirects>
+# There must be at least one nvp_controller_connection per system or per cluster.
+# nvp_controller_connection=10.0.1.2:443:admin:admin:30:10:2:2
+# nvp_controller_connection=10.0.1.3:443:admin:admin:30:10:2:2
+# nvp_controller_connection=10.0.1.4:443:admin:admin:30:10:2:2
index dd1387e6cd0fbe0004fc85ce4b9ecfc16e6e568f..1d21f56cdbb8c82739ebd3252ad4064fdff4bc18 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright 2012 Nicira Networks, Inc.
+# Copyright 2012 Nicira, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
-#@author: Somik Behera, Nicira Networks, Inc.
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# @author: Somik Behera, Nicira Networks, Inc.
 
 import httplib  # basic HTTP library for HTTPS connections
 import logging
-
-
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet \
-    import NvpApiClientEventlet
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet \
-    import NvpGenericRequestEventlet
-
+from quantum.plugins.nicira.nicira_nvp_plugin.api_client import (
+    client_eventlet, request_eventlet)
 
 LOG = logging.getLogger("NVPApiHelper")
 LOG.setLevel(logging.INFO)
 
 
-class NVPApiHelper(NvpApiClientEventlet):
+class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
     '''
     Helper class to do basic login, cookie management, and provide base
     method to send HTTP requests.
@@ -51,13 +49,14 @@ class NVPApiHelper(NvpApiClientEventlet):
             from unresponsive controllers, etc) should finish within this
             timeout.
         :param http_timeout: how long to wait before aborting an
-            unresponsive controller
+            unresponsive controller (and allow for retries to another
+            controller in the cluster)
         :param retries: the number of concurrent connections.
         :param redirects: the number of concurrent connections.
         :param failover_time: minimum time between controller failover and new
             connections allowed.
         '''
-        NvpApiClientEventlet.__init__(
+        client_eventlet.NvpApiClientEventlet.__init__(
             self, api_providers, user, password, concurrent_connections,
             failover_time=failover_time)
 
@@ -85,12 +84,12 @@ class NVPApiHelper(NvpApiClientEventlet):
         if password:
             self._password = password
 
-        return NvpApiClientEventlet.login(self)
+        return client_eventlet.NvpApiClientEventlet.login(self)
 
     def request(self, method, url, body="", content_type="application/json"):
         '''Issues request to controller.'''
 
-        g = NvpGenericRequestEventlet(
+        g = request_eventlet.NvpGenericRequestEventlet(
             self, method, url, body, content_type, auto_login=True,
             request_timeout=self._request_timeout,
             http_timeout=self._http_timeout,
@@ -127,9 +126,8 @@ class NVPApiHelper(NvpApiClientEventlet):
         # Continue processing for non-error condition.
         if (status != httplib.OK and status != httplib.CREATED
                 and status != httplib.NO_CONTENT):
-            LOG.error(
-                "%s to %s, unexpected response code: %d (content = '%s')" %
-                (method, url, response.status, response.body))
+            LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
+                      % (method, url, response.status, response.body))
             return None
 
         return response.body
@@ -149,16 +147,17 @@ class NVPApiHelper(NvpApiClientEventlet):
     def zero(self):
         raise NvpApiException()
 
-    error_codes = {
-        404: fourZeroFour,
-        409: fourZeroNine,
-        503: fiveZeroThree,
-        403: fourZeroThree,
-        301: zero,
-        307: zero,
-        400: zero,
-        500: zero,
-    }
+    # TODO(del): ensure error_codes are handled/raised appropriately
+    # in api_client.
+    error_codes = {404: fourZeroFour,
+                   409: fourZeroNine,
+                   503: fiveZeroThree,
+                   403: fourZeroThree,
+                   301: zero,
+                   307: zero,
+                   400: zero,
+                   500: zero,
+                   503: zero}
 
 
 class NvpApiException(Exception):
index 894cd9036ddf32cc9cace0aa850056c80bd23bbd..c16612a2a28a84e50286f10c6e9975e6055711ce 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright 2012 Nicira Networks, Inc.
+# Copyright 2012 Nicira, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 # @author: Somik Behera, Nicira Networks, Inc.
 # @author: Brad Hall, Nicira Networks, Inc.
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
 
 import ConfigParser
+import json
+import hashlib
 import logging
+import netaddr
 import os
 import sys
+import traceback
+import urllib
+import uuid
 
+
+from common import config
+from quantum.plugins.nicira.nicira_nvp_plugin.api_client import client_eventlet
 import NvpApiClient
 import nvplib
+from nvp_plugin_version import PLUGIN_VERSION
 
+from quantum.api.v2 import attributes
 from quantum.common import exceptions as exception
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet \
-    import (
-        DEFAULT_CONCURRENT_CONNECTIONS,
-        DEFAULT_FAILOVER_TIME,
-    )
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet \
-    import (
-        DEFAULT_REQUEST_TIMEOUT,
-        DEFAULT_HTTP_TIMEOUT,
-        DEFAULT_RETRIES,
-        DEFAULT_REDIRECTS,
-    )
-
-
-LOG = logging.getLogger("QuantumPlugin")
+from quantum.db import api as db
+from quantum.db import db_base_plugin_v2
+from quantum.db import models_v2
+from quantum.openstack.common import cfg
 
 
 CONFIG_FILE = "nvp.ini"
@@ -46,106 +51,56 @@ CONFIG_FILE_PATHS = []
 if os.environ.get('QUANTUM_HOME', None):
     CONFIG_FILE_PATHS.append('%s/etc' % os.environ['QUANTUM_HOME'])
 CONFIG_FILE_PATHS.append("/etc/quantum/plugins/nicira")
-CONFIG_KEYS = ["DEFAULT_TZ_UUID", "NVP_CONTROLLER_IP", "PORT", "USER",
-               "PASSWORD"]
+LOG = logging.getLogger("QuantumPlugin")
 
 
-def initConfig(cfile=None):
-    config = ConfigParser.ConfigParser()
-    if cfile is None:
-        if os.path.exists(CONFIG_FILE):
-            cfile = CONFIG_FILE
-        else:
-            cfile = find_config(os.path.abspath(os.path.dirname(__file__)))
-
-    if cfile is None:
-        raise Exception("Configuration file \"%s\" doesn't exist" % (cfile))
-    LOG.info("Using configuration file: %s" % cfile)
-    config.read(cfile)
-    LOG.debug("Config: %s" % config)
-    return config
-
-
-def find_config(basepath):
-    LOG.info("Looking for %s in %s" % (CONFIG_FILE, basepath))
-    for root, dirs, files in os.walk(basepath, followlinks=True):
-        if CONFIG_FILE in files:
-            return os.path.join(root, CONFIG_FILE)
-    for alternate_path in CONFIG_FILE_PATHS:
-        p = os.path.join(alternate_path, CONFIG_FILE)
-        if os.path.exists(p):
-            return p
-    return None
-
-
-def parse_config(config):
-    """Backwards compatible parsing.
-
-    :param config: ConfigParser object initilized with nvp.ini.
-    :returns: A tuple consisting of a control cluster object and a
-        plugin_config variable.
-    raises: In general, system exceptions are not caught but are propagated
-        up to the user. Config parsing is still very lightweight.
-        At some point, error handling needs to be significantly
-        enhanced to provide user friendly error messages, clean program
-        exists, rather than exceptions propagated to the user.
+def parse_config():
+    """Parse the supplied plugin configuration.
+`
+    :param config: a ConfigParser() object encapsulating nvp.ini.
+    :returns: A tuple: (clusters, plugin_config). 'clusters' is a list of
+        NVPCluster objects, 'plugin_config' is a dictionary with plugin
+        parameters (currently only 'max_lp_per_bridged_ls').
     """
-    # Extract plugin config parameters.
-    try:
-        failover_time = config.get('NVP', 'failover_time')
-    except ConfigParser.NoOptionError, e:
-        failover_time = str(DEFAULT_FAILOVER_TIME)
-
-    try:
-        concurrent_connections = config.get('NVP', 'concurrent_connections')
-    except ConfigParser.NoOptionError, e:
-        concurrent_connections = str(DEFAULT_CONCURRENT_CONNECTIONS)
-
-    plugin_config = {
-        'failover_time': failover_time,
-        'concurrent_connections': concurrent_connections,
-    }
-    LOG.info('parse_config(): plugin_config == "%s"' % plugin_config)
-
-    cluster = NVPCluster('cluster1')
-
-    # Extract connection information.
-    try:
-        defined_connections = config.get('NVP', 'NVP_CONTROLLER_CONNECTIONS')
-
-        for conn_key in defined_connections.split():
-            args = [config.get('NVP', 'DEFAULT_TZ_UUID')]
-            args.extend(config.get('NVP', conn_key).split(':'))
-            try:
-                cluster.add_controller(*args)
-            except Exception, e:
-                LOG.fatal('Invalid connection parameters: %s' % str(e))
-                sys.exit(1)
-
-        return cluster, plugin_config
-    except Exception, e:
-        LOG.info('No new style connections defined: %s' % e)
-
-        # Old style controller specification.
-        args = [config.get('NVP', k) for k in CONFIG_KEYS]
-        try:
-            cluster.add_controller(*args)
-        except Exception, e:
-            LOG.fatal('Invalid connection parameters.')
-            sys.exit(1)
-
-    return cluster, plugin_config
+    db_options = {"sql_connection": cfg.CONF.DATABASE.sql_connection}
+    db_options.update({'base': models_v2.model_base.BASEV2})
+    sql_max_retries = cfg.CONF.DATABASE.sql_max_retries
+    db_options.update({"sql_max_retries": sql_max_retries})
+    reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
+    db_options.update({"reconnect_interval": reconnect_interval})
+    nvp_options = {'max_lp_per_bridged_ls': cfg.CONF.NVP.max_lp_per_bridged_ls}
+    nvp_options.update({'failover_time': cfg.CONF.NVP.failover_time})
+    nvp_options.update({'concurrent_connections':
+                        cfg.CONF.NVP.concurrent_connections})
+
+    nvp_conf = config.ClusterConfigOptions(cfg.CONF)
+    cluster_names = config.register_cluster_groups(nvp_conf)
+    nvp_conf.log_opt_values(LOG, logging.DEBUG)
+
+    clusters_options = []
+    for cluster_name in cluster_names:
+        clusters_options.append(
+            {'name': cluster_name,
+             'default_tz_uuid':
+             nvp_conf[cluster_name].default_tz_uuid,
+             'nvp_cluster_uuid':
+             nvp_conf[cluster_name].nvp_cluster_uuid,
+             'nova_zone_id':
+             nvp_conf[cluster_name].nova_zone_id,
+             'nvp_controller_connection':
+             nvp_conf[cluster_name].nvp_controller_connection, })
+    LOG.debug("cluster options:%s", clusters_options)
+    return db_options, nvp_options, clusters_options
 
 
 class NVPCluster(object):
-    """Encapsulates controller connection and api_client.
+    """Encapsulates controller connection and api_client for a cluster.
 
-    Initialized within parse_config().
-    Accessed within the NvpPlugin class.
+    Accessed within the NvpPluginV2 class.
 
     Each element in the self.controllers list is a dictionary that
     contains the following keys:
-        ip, port, user, password, default_tz_uuid
+        ip, port, user, password, default_tz_uuid, uuid, zone
 
     There may be some redundancy here, but that has been done to provide
     future flexibility.
@@ -165,10 +120,9 @@ class NVPCluster(object):
         ss.append('] }')
         return ''.join(ss)
 
-    def add_controller(self, default_tz_uuid, ip, port, user, password,
-                       request_timeout=DEFAULT_REQUEST_TIMEOUT,
-                       http_timeout=DEFAULT_HTTP_TIMEOUT,
-                       retries=DEFAULT_RETRIES, redirects=DEFAULT_REDIRECTS):
+    def add_controller(self, ip, port, user, password, request_timeout,
+                       http_timeout, retries, redirects,
+                       default_tz_uuid, uuid=None, zone=None):
         """Add a new set of controller parameters.
 
         :param ip: IP address of controller.
@@ -181,12 +135,16 @@ class NVPCluster(object):
         :param redirects: maximum number of server redirect responses to
             follow.
         :param default_tz_uuid: default transport zone uuid.
+        :param uuid: UUID of this cluster (used in MDI configs).
+        :param zone: Zone of this cluster (used in MDI configs).
         """
 
-        keys = ['ip', 'port', 'user', 'password', 'default_tz_uuid']
+        keys = [
+            'ip', 'user', 'password', 'default_tz_uuid', 'uuid', 'zone']
         controller_dict = dict([(k, locals()[k]) for k in keys])
 
-        int_keys = ['request_timeout', 'http_timeout', 'retries', 'redirects']
+        int_keys = [
+            'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
         for k in int_keys:
             controller_dict[k] = int(locals()[k])
 
@@ -239,279 +197,615 @@ class NVPCluster(object):
     def default_tz_uuid(self):
         return self.controllers[0]['default_tz_uuid']
 
+    @property
+    def zone(self):
+        return self.controllers[0]['zone']
+
+    @property
+    def uuid(self):
+        return self.controllers[0]['uuid']
 
-class NvpPlugin(object):
+
+class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
     """
-    NvpPlugin is a Quantum plugin that provides L2 Virtual Network
+    NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
     functionality using NVP.
     """
-    supported_extension_aliases = ["portstats"]
 
-    def __init__(self, configfile=None, loglevel=None, cli=False):
+    def __init__(self, loglevel=None):
         if loglevel:
             logging.basicConfig(level=loglevel)
             nvplib.LOG.setLevel(loglevel)
             NvpApiClient.LOG.setLevel(loglevel)
 
-        config = initConfig(configfile)
-        self.controller, self.plugin_config = parse_config(config)
-        c = self.controller
-        api_providers = [(x['ip'], x['port'], True) for x in c.controllers]
-
-        c.api_client = NvpApiClient.NVPApiHelper(
-            api_providers, c.user, c.password,
-            request_timeout=c.request_timeout, http_timeout=c.http_timeout,
-            retries=c.retries, redirects=c.redirects,
-            failover_time=int(self.plugin_config['failover_time']),
-            concurrent_connections=int(
-                self.plugin_config['concurrent_connections']))
+        self.db_opts, self.nvp_opts, self.clusters_opts = parse_config()
+        self.clusters = []
+        for c_opts in self.clusters_opts:
+            # Password is guaranteed to be the same across all controllers
+            # in the same NVP cluster.
+            cluster = NVPCluster(c_opts['name'])
+            for controller_connection in c_opts['nvp_controller_connection']:
+                args = controller_connection.split(':')
+                try:
+                    args.extend([c_opts['default_tz_uuid'],
+                                 c_opts['nvp_cluster_uuid'],
+                                 c_opts['nova_zone_id']])
+                    cluster.add_controller(*args)
+                except Exception:
+                    LOG.exception("Invalid connection parameters for "
+                                  "controller %s in cluster %s",
+                                  controller_connection,
+                                  c_opts['name'])
+                    raise
+
+            api_providers = [(x['ip'], x['port'], True)
+                             for x in cluster.controllers]
+            cluster.api_client = NvpApiClient.NVPApiHelper(
+                api_providers, cluster.user, cluster.password,
+                request_timeout=cluster.request_timeout,
+                http_timeout=cluster.http_timeout,
+                retries=cluster.retries,
+                redirects=cluster.redirects,
+                failover_time=self.nvp_opts['failover_time'],
+                concurrent_connections=self.nvp_opts['concurrent_connections'])
+
+            # TODO(salvatore-orlando): do login at first request,
+            # not when plugin, is instantiated
+            cluster.api_client.login()
+
+            # TODO(pjb): What if the cluster isn't reachable this
+            # instant?  It isn't good to fall back to invalid cluster
+            # strings.
+            # Default for future-versions
+            self.clusters.append(cluster)
+
+        # Connect and configure ovs_quantum db
+        options = {
+            'sql_connection': self.db_opts['sql_connection'],
+            'sql_max_retries': self.db_opts['sql_max_retries'],
+            'reconnect_interval': self.db_opts['reconnect_interval'],
+            'base': models_v2.model_base.BASEV2,
+        }
+        db.configure_db(options)
 
-        c.api_client.login()
+    @property
+    def cluster(self):
+        if len(self.clusters):
+            return self.clusters[0]
+        return None
 
-        # For testing..
-        self.api_client = self.controller.api_client
+    def clear_state(self):
+        nvplib.clear_state(self.clusters[0])
 
     def get_all_networks(self, tenant_id, **kwargs):
-        """
-        Returns a dictionary containing all <network_uuid, network_name> for
-        the specified tenant.
-
-        :returns: a list of mapping sequences with the following signature:
-                     [{'net-id': uuid that uniquely identifies
-                                      the particular quantum network,
-                        'net-name': a human-readable name associated
-                                      with network referenced by net-id
-                      },
-                       ....
-                       {'net-id': uuid that uniquely identifies the
-                                       particular quantum network,
-                        'net-name': a human-readable name associated
-                                       with network referenced by net-id
-                      }
-                   ]
-        :raises: None
-        """
-        networks = nvplib.get_all_networks(self.controller, tenant_id, [])
-        LOG.debug("get_all_networks() completed for tenant %s: %s" %
-                  (tenant_id, networks))
+        networks = []
+        for c in self.clusters:
+            networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
+        LOG.debug("get_all_networks() completed for tenant %s: %s" % (
+            tenant_id, networks))
         return networks
 
-    def create_network(self, tenant_id, net_name, **kwargs):
+    def create_network(self, context, network):
         """
-        Creates a new Virtual Network, and assigns it a symbolic name.
         :returns: a sequence of mappings with the following signature:
-                    {'net-id': uuid that uniquely identifies the
-                                     particular quantum network,
-                     'net-name': a human-readable name associated
-                                    with network referenced by net-id
+                    {'id': UUID representing the network.
+                     'name': Human-readable name identifying the network.
+                     'tenant_id': Owner of network. only admin user
+                                  can specify a tenant_id other than its own.
+                     'admin_state_up': Sets admin state of network. if down,
+                                       network does not forward packets.
+                     'status': Indicates whether network is currently
+                               operational (limit values to "ACTIVE", "DOWN",
+                               "BUILD", and "ERROR"?
+                     'subnets': Subnets associated with this network. Plan
+                                to allow fully specified subnets as part of
+                                network create.
                    }
-        :raises:
+        :raises: exception.NoImplementedError
         """
-        kwargs["controller"] = self.controller
-        return nvplib.create_network(tenant_id, net_name, **kwargs)
-
-    def create_custom_network(self, tenant_id, net_name, transport_zone,
-                              controller):
-        return self.create_network(tenant_id, net_name,
-                                   network_type="custom",
-                                   transport_zone=transport_zone,
-                                   controller=controller)
-
-    def delete_network(self, tenant_id, netw_id):
+        # FIXME(arosen) implement admin_state_up = False in NVP
+        if network['network']['admin_state_up'] is False:
+            LOG.warning("Network with admin_state_up=False are not yet "
+                        "supported by this plugin. Ignoring setting for "
+                        "network %s",
+                        network['network'].get('name', '<unknown>'))
+
+        tenant_id = self._get_tenant_id_for_create(context, network)
+        # TODO(salvatore-orlando): if the network is shared this should be
+        # probably stored into the lswitch with a tag
+        # TODO(salvatore-orlando): Important - provider networks support
+        # (might require a bridged TZ)
+        net = nvplib.create_network(network['network']['tenant_id'],
+                                    network['network']['name'],
+                                    clusters=self.clusters)
+
+        network['network']['id'] = net['net-id']
+        return super(NvpPluginV2, self).create_network(context, network)
+
+    def delete_network(self, context, id):
         """
         Deletes the network with the specified network identifier
         belonging to the specified tenant.
 
-        :returns: a sequence of mappings with the following signature:
-                    {'net-id': uuid that uniquely identifies the
-                                 particular quantum network
-                   }
+        :returns: None
         :raises: exception.NetworkInUse
         :raises: exception.NetworkNotFound
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        nvplib.delete_network(self.controller, netw_id)
 
-        LOG.debug("delete_network() completed for tenant: %s" % tenant_id)
-        return {'net-id': netw_id}
+        super(NvpPluginV2, self).delete_network(context, id)
+        pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
+        for (cluster, switches) in pairs:
+            nvplib.delete_networks(cluster, id, switches)
 
-    def get_network_details(self, tenant_id, netw_id):
+        LOG.debug("delete_network() completed for tenant: %s" %
+                  context.tenant_id)
+
+    def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
+        """Figure out the set of lswitches on each cluster that maps to this
+           network id"""
+        pairs = []
+        for c in self.clusters:
+            lswitches = []
+            try:
+                ls = nvplib.get_network(c, netw_id)
+                lswitches.append(ls['uuid'])
+            except exception.NetworkNotFound:
+                continue
+            pairs.append((c, lswitches))
+        if len(pairs) == 0:
+            raise exception.NetworkNotFound(net_id=netw_id)
+        LOG.debug("Returning pairs for network: %s" % (pairs))
+        return pairs
+
+    def get_network(self, context, id, fields=None, verbose=None):
         """
-        Retrieves a list of all the remote vifs that
-        are attached to the network.
+        Retrieves all attributes of the network, NOT including
+        the ports of that network.
 
         :returns: a sequence of mappings with the following signature:
-                    {'net-id': uuid that uniquely identifies the
-                                particular quantum network
-                     'net-name': a human-readable name associated
-                                 with network referenced by net-id
-                     'net-ifaces': ['vif1_on_network_uuid',
-                                    'vif2_on_network_uuid',...,'vifn_uuid']
+                    {'id': UUID representing the network.
+                     'name': Human-readable name identifying the network.
+                     'tenant_id': Owner of network. only admin user
+                                  can specify a tenant_id other than its own.
+                     'admin_state_up': Sets admin state of network. if down,
+                                       network does not forward packets.
+                     'status': Indicates whether network is currently
+                               operational (limit values to "ACTIVE", "DOWN",
+                               "BUILD", and "ERROR"?
+                     'subnets': Subnets associated with this network. Plan
+                                to allow fully specified subnets as part of
+                                network create.
                    }
+
         :raises: exception.NetworkNotFound
         :raises: exception.QuantumException
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        result = None
-        remote_vifs = []
-        switch = netw_id
-        lports = nvplib.query_ports(self.controller, switch,
-                                    relations="LogicalPortAttachment")
-
-        for port in lports:
-            relation = port["_relations"]
-            vic = relation["LogicalPortAttachment"]
-            if "vif_uuid" in vic:
-                remote_vifs.append(vic["vif_uuid"])
-
-        if not result:
-            result = nvplib.get_network(self.controller, switch)
-
-        d = {
-            "net-id": netw_id,
-            "net-ifaces": remote_vifs,
-            "net-name": result["display_name"],
-            "net-op-status": "UP",
-        }
-        LOG.debug("get_network_details() completed for tenant %s: %s" %
-                  (tenant_id, d))
+        result = {}
+        lswitch_query = "&uuid=%s" % id
+        # always look for the tenant_id in the resource itself rather than
+        # the context, as with shared networks context.tenant_id and
+        # network['tenant_id'] might differ on GETs
+        # goto to the plugin DB and fecth the network
+        network = self._get_network(context, id, verbose)
+        # TODO(salvatore-orlando): verify whether the query on os_tid is
+        # redundant or not.
+        if context.is_admin is False:
+            tenant_query = ("&tag=%s&tag_scope=os_tid"
+                            % network['tenant_id'])
+        else:
+            tenant_query = ""
+        # Then fetch the correspondiong logical switch in NVP as well
+        # TODO(salvatore-orlando): verify whether the step on NVP
+        # can be completely avoided
+        lswitch_url_path = (
+            "/ws.v1/lswitch?"
+            "fields=uuid,display_name%s%s"
+            % (tenant_query, lswitch_query))
+        try:
+            for c in self.clusters:
+                lswitch_results = nvplib.get_all_query_pages(
+                    lswitch_url_path, c)
+                if lswitch_results:
+                    result['lswitch-display-name'] = (
+                        lswitch_results[0]['display_name'])
+                    break
+        except Exception:
+            LOG.error("Unable to get switches: %s" % traceback.format_exc())
+            raise exception.QuantumException()
+
+        if 'lswitch-display-name' not in result:
+            raise exception.NetworkNotFound(net_id=id)
+
+        d = {'id': id,
+             'name': result['lswitch-display-name'],
+             'tenant_id': network['tenant_id'],
+             'admin_state_up': True,
+             'status': 'ACTIVE',
+             'shared': network['shared'],
+             'subnets': []}
+
+        LOG.debug("get_network() completed for tenant %s: %s" % (
+                  context.tenant_id, d))
         return d
 
-    def update_network(self, tenant_id, netw_id, **kwargs):
+    def get_networks(self, context, filters=None, fields=None, verbose=None):
+        """
+        Retrieves all attributes of the network, NOT including
+        the ports of that network.
+
+        :returns: a sequence of mappings with the following signature:
+                    {'id': UUID representing the network.
+                     'name': Human-readable name identifying the network.
+                     'tenant_id': Owner of network. only admin user
+                                  can specify a tenant_id other than its own.
+                     'admin_state_up': Sets admin state of network. if down,
+                                       network does not forward packets.
+                     'status': Indicates whether network is currently
+                               operational (limit values to "ACTIVE", "DOWN",
+                               "BUILD", and "ERROR"?
+                     'subnets': Subnets associated with this network. Plan
+                                to allow fully specified subnets as part of
+                                network create.
+                   }
+
+        :raises: exception.NetworkNotFound
+        :raises: exception.QuantumException
+        """
+        result = {}
+        nvp_lswitches = []
+        quantum_lswitches = (
+            super(NvpPluginV2, self).get_networks(context, filters))
+
+        if context.is_admin and not filters.get("tenant_id"):
+            tenant_filter = ""
+        elif filters.get("tenant_id"):
+            tenant_filter = ""
+            for tenant in filters.get("tenant_id"):
+                tenant_filter += "&tag=%s&tag_scope=os_tid" % tenant
+        else:
+            tenant_filter = "&tag=%s&tag_scope=os_tid" % context.tenant_id
+
+        lswitch_filters = "uuid,display_name,fabric_status"
+        lswitch_url_path = (
+            "/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
+            % (lswitch_filters, tenant_filter))
+        try:
+            for c in self.clusters:
+                res = nvplib.get_all_query_pages(
+                    lswitch_url_path, c)
+
+                nvp_lswitches.extend(res)
+        except Exception:
+            LOG.error("Unable to get switches: %s" % traceback.format_exc())
+            raise exception.QuantumException()
+
+        # TODO (Aaron) This can be optimized
+        if filters.get("id"):
+            filtered_lswitches = []
+            for nvp_lswitch in nvp_lswitches:
+                for id in filters.get("id"):
+                    if id == nvp_lswitch['uuid']:
+                        filtered_lswitches.append(nvp_lswitch)
+            nvp_lswitches = filtered_lswitches
+
+        for quantum_lswitch in quantum_lswitches:
+            Found = False
+            for nvp_lswitch in nvp_lswitches:
+                if nvp_lswitch["uuid"] == quantum_lswitch["id"]:
+                    if (nvp_lswitch["_relations"]["LogicalSwitchStatus"]
+                            ["fabric_status"]):
+                        quantum_lswitch["status"] = "ACTIVE"
+                    else:
+                        quantum_lswitch["status"] = "DOWN"
+                    quantum_lswitch["name"] = nvp_lswitch["display_name"]
+                    nvp_lswitches.remove(nvp_lswitch)
+                    Found = True
+                    break
+
+            if not Found:
+                raise Exception("Quantum and NVP Databases are out of Sync!")
+        # do not make the case in which switches are found in NVP
+        # but not in Quantum catastrophic.
+        if len(nvp_lswitches):
+            LOG.warning("Found %s logical switches not bound "
+                        "to Quantum networks. Quantum and NVP are "
+                        "potentially out of sync", len(nvp_lswitches))
+
+        LOG.debug("get_networks() completed for tenant %s" % context.tenant_id)
+
+        if fields:
+            ret_fields = []
+            for quantum_lswitch in quantum_lswitches:
+                row = {}
+                for field in fields:
+                    row[field] = quantum_lswitch[field]
+                ret_fields.append(row)
+            return ret_fields
+
+        return quantum_lswitches
+
+    def update_network(self, context, id, network):
         """
         Updates the properties of a particular Virtual Network.
 
-        :returns: a sequence of mappings representing the new network
-                    attributes, with the following signature:
-                    {'net-id': uuid that uniquely identifies the
-                                 particular quantum network
-                     'net-name': the new human-readable name
-                                  associated with network referenced by net-id
+        :returns: a sequence of mappings with the following signature:
+        {'id': UUID representing the network.
+         'name': Human-readable name identifying the network.
+         'tenant_id': Owner of network. only admin user
+                      can specify a tenant_id other than its own.
+        'admin_state_up': Sets admin state of network. if down,
+                          network does not forward packets.
+        'status': Indicates whether network is currently
+                  operational (limit values to "ACTIVE", "DOWN",
+                               "BUILD", and "ERROR"?
+        'subnets': Subnets associated with this network. Plan
+                   to allow fully specified subnets as part of
+                   network create.
                    }
+
         :raises: exception.NetworkNotFound
+        :raises: exception.NoImplementedError
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        result = nvplib.update_network(self.controller, netw_id, **kwargs)
-        LOG.debug("update_network() completed for tenant: %s" % tenant_id)
-        return {
-            'net-id': netw_id,
-            'net-name': result["display_name"],
-            'net-op-status': "UP",
-        }
 
-    def get_all_ports(self, tenant_id, netw_id, **kwargs):
+        if network["network"].get("admin_state_up"):
+            if network['network']["admin_state_up"] is False:
+                raise exception.NotImplementedError("admin_state_up=False "
+                                                    "networks are not "
+                                                    "supported.")
+        params = {}
+        params["network"] = network["network"]
+        pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
+
+        #Only field to update in NVP is name
+        if network['network'].get("name"):
+            for (cluster, switches) in pairs:
+                for switch in switches:
+                    result = nvplib.update_network(cluster, switch, **params)
+
+        LOG.debug("update_network() completed for tenant: %s" %
+                  context.tenant_id)
+        return super(NvpPluginV2, self).update_network(context, id, network)
+
+    def get_ports(self, context, filters=None, fields=None, verbose=None):
         """
-        Retrieves all port identifiers belonging to the
-        specified Virtual Network.
+        Returns all ports from given tenant
+
+        :returns: a sequence of mappings with the following signature:
+        {'id': UUID representing the network.
+         'name': Human-readable name identifying the network.
+         'tenant_id': Owner of network. only admin user
+                      can specify a tenant_id other than its own.
+        'admin_state_up': Sets admin state of network. if down,
+                          network does not forward packets.
+        'status': Indicates whether network is currently
+                  operational (limit values to "ACTIVE", "DOWN",
+                               "BUILD", and "ERROR"?
+        'subnets': Subnets associated with this network. Plan
+                   to allow fully specified subnets as part of
+                   network create.
+                   }
 
-        :returns: a list of mapping sequences with the following signature:
-                     [{'port-id': uuid representing a particular port
-                                    on the specified quantum network
-                      },
-                       ....
-                       {'port-id': uuid representing a particular port
-                                     on the specified quantum network
-                      }
-                     ]
         :raises: exception.NetworkNotFound
         """
-        ids = []
-        filters = kwargs.get("filter_opts") or {}
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        LOG.debug("Getting logical ports on lswitch: %s" % netw_id)
-        lports = nvplib.query_ports(self.controller, netw_id, fields="uuid",
-                                    filters=filters)
-        for port in lports:
-            ids.append({"port-id": port["uuid"]})
-
-        # Delete from the filter so that Quantum doesn't attempt to filter on
-        # this too
-        if filters and "attachment" in filters:
-            del filters["attachment"]
-
-        LOG.debug("get_all_ports() completed for tenant: %s" % tenant_id)
-        LOG.debug("returning port listing:")
-        LOG.debug(ids)
-        return ids
-
-    def create_port(self, tenant_id, netw_id, port_init_state=None, **params):
+        quantum_lports = super(NvpPluginV2, self).get_ports(context, filters)
+        vm_filter = ""
+        tenant_filter = ""
+        # This is used when calling delete_network. Quantum checks to see if
+        # the network has any ports.
+        if filters.get("network_id"):
+            # FIXME (Aaron) If we get more than one network_id this won't work
+            lswitch = filters["network_id"][0]
+        else:
+            lswitch = "*"
+
+        if filters.get("device_id"):
+            for vm_id in filters.get("device_id"):
+                vm_filter = ("%stag_scope=vm_id&tag=%s&" % (vm_filter,
+                             hashlib.sha1(vm_id).hexdigest()))
+        else:
+            vm_id = ""
+
+        if filters.get("tenant_id"):
+            for tenant in filters.get("tenant_id"):
+                tenant_filter = ("%stag_scope=os_tid&tag=%s&" %
+                                 (tenant_filter, tenant))
+
+        nvp_lports = {}
+
+        lport_fields_str = ("tags,admin_status_enabled,display_name,"
+                            "fabric_status_up")
+        try:
+            for c in self.clusters:
+                lport_query_path = (
+                    "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
+                    "&relations=LogicalPortStatus" %
+                    (lswitch, lport_fields_str, vm_filter, tenant_filter))
+
+                ports = nvplib.get_all_query_pages(lport_query_path, c)
+                if ports:
+                    for port in ports:
+                        for tag in port["tags"]:
+                            if tag["scope"] == "q_port_id":
+                                nvp_lports[tag["tag"]] = port
+
+        except Exception:
+            LOG.error("Unable to get ports: %s" % traceback.format_exc())
+            raise exception.QuantumException()
+
+        lports = []
+        for quantum_lport in quantum_lports:
+            try:
+                quantum_lport["admin_state_up"] = (
+                    nvp_lports[quantum_lport["id"]]["admin_status_enabled"])
+
+                quantum_lport["name"] = (
+                    nvp_lports[quantum_lport["id"]]["display_name"])
+
+                if (nvp_lports[quantum_lport["id"]]
+                        ["_relations"]
+                        ["LogicalPortStatus"]
+                        ["fabric_status_up"]):
+                    quantum_lport["status"] = "ACTIVE"
+                else:
+                    quantum_lport["status"] = "DOWN"
+
+                del nvp_lports[quantum_lport["id"]]
+                lports.append(quantum_lport)
+            except KeyError:
+                raise Exception("Quantum and NVP Databases are out of Sync!")
+        # do not make the case in which ports are found in NVP
+        # but not in Quantum catastrophic.
+        if len(nvp_lports):
+            LOG.warning("Found %s logical ports not bound "
+                        "to Quantum ports. Quantum and NVP are "
+                        "potentially out of sync", len(nvp_lports))
+
+        if fields:
+            ret_fields = []
+            for lport in lports:
+                row = {}
+                for field in fields:
+                    row[field] = lport[field]
+                ret_fields.append(row)
+            return ret_fields
+        return lports
+
+    def create_port(self, context, port):
         """
         Creates a port on the specified Virtual Network.
+        Returns:
+
+        {"id": uuid represeting the port.
+         "network_id": uuid of network.
+         "tenant_id": tenant_id
+         "mac_address": mac address to use on this port.
+         "admin_state_up": Sets admin state of port. if down, port
+                           does not forward packets.
+         "status": dicates whether port is currently operational
+                   (limit values to "ACTIVE", "DOWN", "BUILD", and
+                   "ERROR"?)
+         "fixed_ips": list of subnet ID's and IP addresses to be used on
+                      this port
+         "device_id": identifies the device (e.g., virtual server) using
+                      this port.
+        }
 
-        :returns: a mapping sequence with the following signature:
-                    {'port-id': uuid representing the created port
-                                   on specified quantum network
-                   }
         :raises: exception.NetworkNotFound
         :raises: exception.StateInvalid
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        params["controller"] = self.controller
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        result = nvplib.create_port(tenant_id, netw_id, port_init_state,
-                                    **params)
-        d = {
-            "port-id": result["uuid"],
-            "port-op-status": result["port-op-status"],
-        }
-        LOG.debug("create_port() completed for tenant %s: %s" % (tenant_id, d))
-        return d
 
-    def update_port(self, tenant_id, netw_id, portw_id, **params):
+        # Set admin_state_up False since not created in NVP set
+        port["port"]["admin_state_up"] = False
+
+        # First we allocate port in quantum database
+        try:
+            quantum_db = super(NvpPluginV2, self).create_port(context, port)
+        except Exception as e:
+            raise e
+
+        # Update fields obtained from quantum db
+        port["port"].update(quantum_db)
+
+        # We want port to be up in NVP
+        port["port"]["admin_state_up"] = True
+        params = {}
+        params["max_lp_per_bridged_ls"] = \
+            self.nvp_opts["max_lp_per_bridged_ls"]
+        params["port"] = port["port"]
+        params["clusters"] = self.clusters
+        tenant_id = self._get_tenant_id_for_create(context, port["port"])
+
+        try:
+            port["port"], nvp_port_id = nvplib.create_port(tenant_id,
+                                                           **params)
+            nvplib.plug_interface(self.clusters, port["port"]["network_id"],
+                                  nvp_port_id, "VifAttachment",
+                                  port["port"]["id"])
+        except Exception as e:
+            # failed to create port in NVP delete port from quantum_db
+            super(NvpPluginV2, self).delete_port(context, port["port"]["id"])
+            raise e
+
+        d = {"port-id": port["port"]["id"],
+             "port-op-status": port["port"]["status"]}
+
+        LOG.debug("create_port() completed for tenant %s: %s" %
+                  (tenant_id, d))
+
+        # update port with admin_state_up True
+        port_update = {"port": {"admin_state_up": True}}
+        return super(NvpPluginV2, self).update_port(context,
+                                                    port["port"]["id"],
+                                                    port_update)
+
+    def update_port(self, context, id, port):
         """
         Updates the properties of a specific port on the
         specified Virtual Network.
+        Returns:
+
+        {"id": uuid represeting the port.
+         "network_id": uuid of network.
+         "tenant_id": tenant_id
+         "mac_address": mac address to use on this port.
+         "admin_state_up": sets admin state of port. if down, port
+                           does not forward packets.
+         "status": dicates whether port is currently operational
+                   (limit values to "ACTIVE", "DOWN", "BUILD", and
+                   "ERROR"?)
+        "fixed_ips": list of subnet ID's and IP addresses to be used on
+                     this port
+        "device_id": identifies the device (e.g., virtual server) using
+                     this port.
+        }
 
-        :returns: a mapping sequence with the following signature:
-                    {'port-id': uuid representing the
-                                 updated port on specified quantum network
-                     'port-state': update port state (UP or DOWN)
-                   }
         :raises: exception.StateInvalid
         :raises: exception.PortNotFound
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
+        params = {}
+
+        quantum_db = super(NvpPluginV2, self).get_port(context, id)
+
+        port_nvp, cluster = (
+            nvplib.get_port_by_quantum_tag(self.clusters,
+                                           quantum_db["network_id"], id))
+
         LOG.debug("Update port request: %s" % (params))
-        params["controller"] = self.controller
-        result = nvplib.update_port(netw_id, portw_id, **params)
-        LOG.debug("update_port() completed for tenant: %s" % tenant_id)
-        port = {
-            'port-id': portw_id,
-            'port-state': result["admin_status_enabled"],
-            'port-op-status': result["port-op-status"],
-        }
-        LOG.debug("returning updated port %s: " % port)
-        return port
 
-    def delete_port(self, tenant_id, netw_id, portw_id):
+        params["cluster"] = cluster
+        params["port"] = port["port"]
+        result = nvplib.update_port(quantum_db["network_id"],
+                                    port_nvp["uuid"], **params)
+        LOG.debug("update_port() completed for tenant: %s" % context.tenant_id)
+
+        return super(NvpPluginV2, self).update_port(context, id, port)
+
+    def delete_port(self, context, id):
         """
         Deletes a port on a specified Virtual Network,
         if the port contains a remote interface attachment,
         the remote interface is first un-plugged and then the port
         is deleted.
 
-        :returns: a mapping sequence with the following signature:
-                    {'port-id': uuid representing the deleted port
-                                 on specified quantum network
-                   }
+        :returns: None
         :raises: exception.PortInUse
         :raises: exception.PortNotFound
         :raises: exception.NetworkNotFound
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        nvplib.delete_port(self.controller, netw_id, portw_id)
-        LOG.debug("delete_port() completed for tenant: %s" % tenant_id)
-        return {"port-id": portw_id}
 
-    def get_port_details(self, tenant_id, netw_id, portw_id):
+        port, cluster = nvplib.get_port_by_quantum_tag(self.clusters,
+                                                       '*', id)
+        if port is None:
+            raise exception.PortNotFound(port_id=id)
+        # TODO(bgh): if this is a bridged network and the lswitch we just got
+        # back will have zero ports after the delete we should garbage collect
+        # the lswitch.
+        nvplib.delete_port(cluster, port)
+
+        LOG.debug("delete_port() completed for tenant: %s" % context.tenant_id)
+        return  super(NvpPluginV2, self).delete_port(context, id)
+
+    def get_port(self, context, id, fields=None, verbose=None):
         """
         This method allows the user to retrieve a remote interface
         that is attached to this particular port.
@@ -519,89 +813,31 @@ class NvpPlugin(object):
         :returns: a mapping sequence with the following signature:
                     {'port-id': uuid representing the port on
                                  specified quantum network
-                     'net-id': uuid representing the particular
-                                quantum network
                      'attachment': uuid of the virtual interface
                                    bound to the port, None otherwise
+                     'port-op-status': operational status of the port
+                     'port-state': admin status of the port
                     }
         :raises: exception.PortNotFound
         :raises: exception.NetworkNotFound
         """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        port = nvplib.get_port(self.controller, netw_id, portw_id,
-                               "LogicalPortAttachment")
-        state = "ACTIVE" if port["admin_status_enabled"] else "DOWN"
-        op_status = nvplib.get_port_status(self.controller, netw_id, portw_id)
-
-        relation = port["_relations"]
-        attach_type = relation["LogicalPortAttachment"]["type"]
-
-        vif_uuid = "None"
-        if attach_type == "VifAttachment":
-            vif_uuid = relation["LogicalPortAttachment"]["vif_uuid"]
-
-        d = {
-            "port-id": portw_id, "attachment": vif_uuid,
-            "net-id": netw_id, "port-state": state,
-            "port-op-status": op_status,
-        }
-        LOG.debug("Port details for tenant %s: %s" % (tenant_id, d))
-        return d
-
-    def plug_interface(self, tenant_id, netw_id, portw_id,
-                       remote_interface_id):
-        """
-        Attaches a remote interface to the specified port on the
-        specified Virtual Network.
-
-        :returns: None
-        :raises: exception.NetworkNotFound
-        :raises: exception.PortNotFound
-        :raises: exception.AlreadyAttached
-                    (? should the network automatically unplug/replug)
-        """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        result = nvplib.plug_interface(self.controller, netw_id,
-                                       portw_id, "VifAttachment",
-                                       attachment=remote_interface_id)
-        LOG.debug("plug_interface() completed for %s: %s" %
-                  (tenant_id, result))
 
-    def unplug_interface(self, tenant_id, netw_id, portw_id):
-        """
-        Detaches a remote interface from the specified port on the
-        specified Virtual Network.
+        quantum_db = super(NvpPluginV2, self).get_port(context, id, fields,
+                                                       verbose)
 
-        :returns: None
-        :raises: exception.NetworkNotFound
-        :raises: exception.PortNotFound
-        """
-        if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=netw_id)
-        result = nvplib.unplug_interface(self.controller, netw_id, portw_id)
+        port, cluster = (
+            nvplib.get_port_by_quantum_tag(self.clusters,
+                                           quantum_db["network_id"], id))
 
-        LOG.debug("unplug_interface() completed for tenant %s: %s" %
-                  (tenant_id, result))
+        quantum_db["admin_state_up"] = port["admin_status_enabled"]
+        if port["_relations"]["LogicalPortStatus"]["fabric_status_up"]:
+            quantum_db["status"] = "ACTIVE"
+        else:
+            quantum_db["status"] = "DOWN"
 
-    def get_port_stats(self, tenant_id, network_id, port_id):
-        """
-        Returns port statistics for a given port.
-
-        {
-          "rx_packets": 0,
-          "rx_bytes": 0,
-          "tx_errors": 0,
-          "rx_errors": 0,
-          "tx_bytes": 0,
-          "tx_packets": 0
-        }
+        LOG.debug("Port details for tenant %s: %s" %
+                  (context.tenant_id, quantum_db))
+        return quantum_db
 
-        :returns: dict() of stats
-        :raises: exception.NetworkNotFound
-        :raises: exception.PortNotFound
-        """
-        if not nvplib.check_tenant(self.controller, network_id, tenant_id):
-            raise exception.NetworkNotFound(net_id=network_id)
-        return nvplib.get_port_stats(self.controller, network_id, port_id)
+    def get_plugin_version(self):
+        return PLUGIN_VERSION
index 0697d2ebcd9b79d865388676a9640e2b3aa886c8..f76d4a9610e2c7bd901dd3ff35b9ce76808b87ff 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -11,3 +12,5 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f76d4a9610e2c7bd901dd3ff35b9ce76808b87ff 100644 (file)
@@ -0,0 +1,16 @@
+# Copyright 2012 Nicira Networks, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
index 545387c58c1feb88b4eed7684f8f23d271178fe1..e3cc9d1d0d86f3515382fcc26cd99e84135e5f4a 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2009-2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -12,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 # Author: David Lapsley <dlapsley@nicira.com>, Nicira Networks, Inc.
 
 from abc import ABCMeta
@@ -28,8 +31,6 @@ class NvpApiClient(object):
 
     __metaclass__ = ABCMeta
 
-    # Default connection timeout for a controller.  After CONN_IDLE_TIMEOUT
-    # seconds the client attempt to reconnect.
     CONN_IDLE_TIMEOUT = 60 * 15
 
     @abstractmethod
index 6a71c49cf3fdf242830cb6e8aac934f902ade1c5..c8ba5f7034d8bb88dd71667bd5df0b42b3fd2cf8 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2009-2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 
+import client
+import eventlet
 import httplib
 import logging
+import request_eventlet
 import time
 
-import eventlet
-
-import quantum.plugins.nicira.nicira_nvp_plugin.api_client.client as client
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.common import (
-    _conn_str,
-)
-import quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet
+from common import _conn_str
 
+eventlet.monkey_patch()
 
 logging.basicConfig(level=logging.INFO)
-LOG = logging.getLogger('nvp_api_client')
-
+lg = logging.getLogger('nvp_api_client')
 
 # Default parameters.
 DEFAULT_FAILOVER_TIME = 5
 DEFAULT_CONCURRENT_CONNECTIONS = 3
 DEFAULT_CONNECT_TIMEOUT = 5
+GENERATION_ID_TIMEOUT = -1  # if set to -1 then disabled
 
 
 class NvpApiClientEventlet(object):
-    """Eventlet-based implementation of NvpApiClient ABC."""
+    '''Eventlet-based implementation of NvpApiClient ABC.'''
 
     CONN_IDLE_TIMEOUT = 60 * 15
 
@@ -44,17 +46,24 @@ class NvpApiClientEventlet(object):
                  concurrent_connections=DEFAULT_CONCURRENT_CONNECTIONS,
                  use_https=True,
                  connect_timeout=DEFAULT_CONNECT_TIMEOUT,
-                 failover_time=DEFAULT_FAILOVER_TIME):
-        """Constructor
-
-        Args:
-            api_providers: a list of tuples of the form: (host, port, is_ssl).
-            user: login username.
-            password: login password.
-            concurrent_connections: total number of concurrent connections.
-            use_https: whether or not to use https for requests.
-            connect_timeout: connection timeout in seconds.
-        """
+                 failover_time=DEFAULT_FAILOVER_TIME,
+                 nvp_gen_timeout=GENERATION_ID_TIMEOUT):
+        '''Constructor
+
+        :param api_providers: a list of tuples of the form: (host, port,
+            is_ssl).
+        :param user: login username.
+        :param password: login password.
+        :param concurrent_connections: total number of concurrent connections.
+        :param use_https: whether or not to use https for requests.
+        :param connect_timeout: connection timeout in seconds.
+        :param failover_time: time from when a connection pool is switched to
+            the next connection released via acquire_connection().
+        :param nvp_gen_timeout controls how long the generation id is kept
+            if set to -1 the generation id is never timed out
+        '''
+        if not api_providers:
+            api_providers = []
         self._api_providers = set([tuple(p) for p in api_providers])
         self._user = user
         self._password = password
@@ -62,22 +71,27 @@ class NvpApiClientEventlet(object):
         self._use_https = use_https
         self._connect_timeout = connect_timeout
         self._failover_time = failover_time
-
-        # Connection pool is a queue. Head of the queue is the
-        # connection pool with the highest priority.
-        self._conn_pool = eventlet.queue.Queue()
-        for host, port, is_ssl in self._api_providers:
-            provider_conn_pool = eventlet.queue.Queue()
+        self._nvp_config_gen = None
+        self._nvp_config_gen_ts = None
+        self._nvp_gen_timeout = nvp_gen_timeout
+
+        # Connection pool is a list of queues.
+        self._conn_pool = list()
+        conn_pool_idx = 0
+        for host, port, is_ssl in api_providers:
+            provider_conn_pool = eventlet.queue.Queue(
+                maxsize=concurrent_connections)
             for i in range(concurrent_connections):
                 # All connections in a provider_conn_poool have the
                 # same priority (they connect to the same server).
                 conn = self._create_connection(host, port, is_ssl)
-                conn.conn_pool = provider_conn_pool
+                conn.idx = conn_pool_idx
                 provider_conn_pool.put(conn)
 
-            self._conn_pool.put(provider_conn_pool)
+            self._conn_pool.append(provider_conn_pool)
+            conn_pool_idx += 1
 
-        self._active_conn_pool = self._conn_pool.get()
+        self._active_conn_pool_idx = 0
 
         self._cookie = None
         self._need_login = True
@@ -106,81 +120,123 @@ class NvpApiClientEventlet(object):
     def password(self):
         return self._password
 
+    @property
+    def nvp_config_gen(self):
+        # If nvp_gen_timeout is not -1 then:
+        # Maintain a timestamp along with the generation ID.  Hold onto the
+        # ID long enough to be useful and block on sequential requests but
+        # not long enough to persist when Onix db is cleared, which resets
+        # the generation ID, causing the DAL to block indefinitely with some
+        # number that's higher than the cluster's value.
+        if self._nvp_gen_timeout != -1:
+            ts = self._nvp_config_gen_ts
+            if ts is not None:
+                if (time.time() - ts) > self._nvp_gen_timeout:
+                    return None
+        return self._nvp_config_gen
+
+    @nvp_config_gen.setter
+    def nvp_config_gen(self, value):
+        if self._nvp_config_gen != value:
+            if self._nvp_gen_timeout != -1:
+                self._nvp_config_gen_ts = time.time()
+        self._nvp_config_gen = value
+
     @property
     def auth_cookie(self):
         return self._cookie
 
-    def acquire_connection(self):
-        """Check out an available HTTPConnection instance.
+    def acquire_connection(self, rid=-1):
+        '''Check out an available HTTPConnection instance.
 
         Blocks until a connection is available.
 
-        Returns: An available HTTPConnection instance or None if no
+        :param rid: request id passed in from request eventlet.
+        :returns: An available HTTPConnection instance or None if no
                  api_providers are configured.
-        """
+        '''
         if not self._api_providers:
+            lg.warn("[%d] no API providers currently available." % rid)
             return None
 
         # The sleep time is to give controllers time to become consistent after
         # there has been a change in the controller used as the api_provider.
         now = time.time()
         if now < getattr(self, '_issue_conn_barrier', now):
-            LOG.info("acquire_connection() waiting for timer to expire.")
+            lg.warn("[%d] Waiting for failover timer to expire." % rid)
             time.sleep(self._issue_conn_barrier - now)
 
-        if self._active_conn_pool.empty():
-            LOG.debug("Waiting to acquire an API client connection")
+        # Print out a warning if all connections are in use.
+        if self._conn_pool[self._active_conn_pool_idx].empty():
+            lg.debug("[%d] Waiting to acquire client connection." % rid)
+
+        # Try to acquire a connection (block in get() until connection
+        # available or timeout occurs).
+        active_conn_pool_idx = self._active_conn_pool_idx
+        conn = self._conn_pool[active_conn_pool_idx].get()
 
-        # get() call is blocking.
-        conn = self._active_conn_pool.get()
+        if active_conn_pool_idx != self._active_conn_pool_idx:
+            # active_conn_pool became inactive while we were waiting.
+            # Put connection back on old pool and try again.
+            lg.warn("[%d] Active pool expired while waiting for connection: %s"
+                    % (rid, _conn_str(conn)))
+            self._conn_pool[active_conn_pool_idx].put(conn)
+            return self.acquire_connection(rid=rid)
+
+        # Check if the connection has been idle too long.
         now = time.time()
         if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
-            LOG.info("Connection %s idle for %0.2f seconds; reconnecting." %
-                     (_conn_str(conn), now - conn.last_used))
+            lg.info("[%d] Connection %s idle for %0.2f seconds; reconnecting."
+                    % (rid, _conn_str(conn), now - conn.last_used))
             conn = self._create_connection(*self._conn_params(conn))
 
             # Stash conn pool so conn knows where to go when it releases.
-            conn.conn_pool = self._active_conn_pool
+            conn.idx = self._active_conn_pool_idx
 
         conn.last_used = now
-        LOG.debug("API client connection %s acquired" % _conn_str(conn))
+        qsize = self._conn_pool[self._active_conn_pool_idx].qsize()
+        lg.debug("[%d] Acquired connection %s. %d connection(s) available."
+                 % (rid, _conn_str(conn), qsize))
         return conn
 
-    def release_connection(self, http_conn, bad_state=False):
-        """Mark HTTPConnection instance as available for check-out.
+    def release_connection(self, http_conn, bad_state=False, rid=-1):
+        '''Mark HTTPConnection instance as available for check-out.
 
-        Args:
-            http_conn: An HTTPConnection instance obtained from this
-                instance.
-            bad_state: True if http_conn is known to be in a bad state
+        :param http_conn: An HTTPConnection instance obtained from this
+            instance.
+        :param bad_state: True if http_conn is known to be in a bad state
                 (e.g. connection fault.)
-        """
+        :param rid: request id passed in from request eventlet.
+        '''
         if self._conn_params(http_conn) not in self._api_providers:
-            LOG.debug(("Released connection '%s' is no longer an API provider "
-                       "for the cluster") % _conn_str(http_conn))
+            lg.warn("[%d] Released connection '%s' is not an API provider "
+                    "for the cluster" % (rid, _conn_str(http_conn)))
             return
 
         # Retrieve "home" connection pool.
-        conn_pool = http_conn.conn_pool
+        conn_pool_idx = http_conn.idx
+        conn_pool = self._conn_pool[conn_pool_idx]
         if bad_state:
-            # reconnect
-            LOG.info("API connection fault, reconnecting to %s" %
-                     _conn_str(http_conn))
+            # Reconnect to provider.
+            lg.warn("[%d] Connection returned in bad state, reconnecting to %s"
+                    % (rid, _conn_str(http_conn)))
             http_conn = self._create_connection(*self._conn_params(http_conn))
-            http_conn.conn_pool = conn_pool
-            conn_pool.put(http_conn)
-
-            if self._active_conn_pool == http_conn.conn_pool:
-                # Get next connection from the connection pool and make it
-                # active.
-                LOG.info("API connection fault changing active_conn_pool.")
-                self._conn_pool.put(self._active_conn_pool)
-                self._active_conn_pool = self._conn_pool.get()
+            http_conn.idx = conn_pool_idx
+
+            if self._active_conn_pool_idx == http_conn.idx:
+                # This pool is no longer in a good state. Switch to next pool.
+                self._active_conn_pool_idx += 1
+                self._active_conn_pool_idx %= len(self._conn_pool)
+                lg.warn("[%d] Switched active_conn_pool from %d to %d."
+                        % (rid, http_conn.idx, self._active_conn_pool_idx))
+
+                # No connections to the new provider allowed until after this
+                # timer has expired (allow time for synchronization).
                 self._issue_conn_barrier = time.time() + self._failover_time
-        else:
-            conn_pool.put(http_conn)
 
-        LOG.debug("API client connection %s released" % _conn_str(http_conn))
+        conn_pool.put(http_conn)
+        lg.debug("[%d] Released connection %s. %d connection(s) available."
+                 % (rid, _conn_str(http_conn), conn_pool.qsize()))
 
     @property
     def need_login(self):
@@ -191,20 +247,19 @@ class NvpApiClientEventlet(object):
         self._need_login = val
 
     def wait_for_login(self):
+        '''Block until a login has occurred for the current API provider.'''
         if self._need_login:
             if self._doing_login_sem.acquire(blocking=False):
                 self.login()
                 self._doing_login_sem.release()
             else:
-                LOG.debug("Waiting for auth to complete")
+                lg.debug("Waiting for auth to complete")
                 self._doing_login_sem.acquire()
                 self._doing_login_sem.release()
         return self._cookie
 
     def login(self):
-        """Issue login request and update authentication cookie."""
-        request_eventlet = (quantum.plugins.nicira.nicira_nvp_plugin.
-                            api_client.request_eventlet)
+        '''Issue login request and update authentication cookie.'''
         g = request_eventlet.NvpLoginRequestEventlet(
             self, self._user, self._password)
         g.start()
@@ -212,16 +267,17 @@ class NvpApiClientEventlet(object):
 
         if ret:
             if isinstance(ret, Exception):
-                LOG.error('NvpApiClient: login error "%s"' % ret)
+                lg.error('NvpApiClient: login error "%s"' % ret)
                 raise ret
 
             self._cookie = None
             cookie = ret.getheader("Set-Cookie")
             if cookie:
-                LOG.debug("Saving new authentication cookie '%s'" % cookie)
+                lg.debug("Saving new authentication cookie '%s'" % cookie)
                 self._cookie = cookie
                 self._need_login = False
 
+        # TODO: or ret is an error.
         if not ret:
             return None
 
index 33c6503d1ec67aee412bfdacae5ad830a73423cd..cdf94174de611ae2c6b9a461ea3663a84100a99e 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2009-2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 
 import httplib
-
 import mock
 
 
index a09468a2358becc91e49ff22a992ab3e0abd03ea..fb18b5a158da8d42c4d81f61a4ef05c1ee874f7b 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2009-2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -11,6 +12,9 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 
 from abc import ABCMeta
 from abc import abstractmethod
index 6fcb9323024280f19ff54228b0157c6ba790ada7..f05ca92cf9481f6bd53a076486379da39d6dc63c 100644 (file)
@@ -1,4 +1,5 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright 2009-2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 
+import copy
+import eventlet
 import httplib
+import json
 import logging
-import time
 import urllib
 import urlparse
+import request
+import time
 
-import eventlet
+import client_eventlet
+from common import _conn_str
 from eventlet import timeout
 
-from quantum.openstack.common import jsonutils
-import quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet
-from quantum.plugins.nicira.nicira_nvp_plugin.api_client.common import (
-    _conn_str,
-)
-import quantum.plugins.nicira.nicira_nvp_plugin.api_client.request as request
-
+eventlet.monkey_patch()
 
 logging.basicConfig(level=logging.INFO)
-LOG = logging.getLogger("nvp_api_request")
-
-
-USER_AGENT = "NVP gevent client/1.0"
+lg = logging.getLogger("nvp_api_request")
+USER_AGENT = "NVP eventlet client/1.0"
 
 # Default parameters.
 DEFAULT_REQUEST_TIMEOUT = 30
 DEFAULT_HTTP_TIMEOUT = 10
 DEFAULT_RETRIES = 2
 DEFAULT_REDIRECTS = 2
-API_REQUEST_POOL_SIZE = 10000
+DEFAULT_API_REQUEST_POOL_SIZE = 1000
+DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
 
 
 class NvpApiRequestEventlet:
@@ -50,6 +52,7 @@ class NvpApiRequestEventlet:
     (e.g. those used by the Quantum NVP Plugin).
     '''
 
+    # List of allowed status codes.
     ALLOWED_STATUS_CODES = [
         httplib.OK,
         httplib.CREATED,
@@ -62,11 +65,23 @@ class NvpApiRequestEventlet:
         httplib.NOT_FOUND,
         httplib.CONFLICT,
         httplib.INTERNAL_SERVER_ERROR,
-        httplib.SERVICE_UNAVAILABLE,
+        httplib.SERVICE_UNAVAILABLE
     ]
 
+    # Maximum number of green threads present in the system at one time.
+    API_REQUEST_POOL_SIZE = DEFAULT_API_REQUEST_POOL_SIZE
+
+    # Pool of green threads. One green thread is allocated per incoming
+    # request. Incoming requests will block when the pool is empty.
     API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
 
+    # A unique id is assigned to each incoming request. When the current
+    # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
+    MAXIMUM_REQUEST_ID = DEFAULT_MAXIMUM_REQUEST_ID
+
+    # The request id for the next incoming request.
+    CURRENT_REQUEST_ID = 0
+
     def __init__(self, nvp_api_client, url, method="GET", body=None,
                  headers=None,
                  request_timeout=DEFAULT_REQUEST_TIMEOUT,
@@ -74,7 +89,7 @@ class NvpApiRequestEventlet:
                  auto_login=True,
                  redirects=DEFAULT_REDIRECTS,
                  http_timeout=DEFAULT_HTTP_TIMEOUT):
-
+        '''Constructor.'''
         self._api_client = nvp_api_client
         self._url = url
         self._method = method
@@ -93,27 +108,45 @@ class NvpApiRequestEventlet:
 
         self._green_thread = None
 
+        # Retrieve and store this instance's unique request id.
+        self._request_id = NvpApiRequestEventlet.CURRENT_REQUEST_ID
+
+        # Update the class variable that tracks request id.
+        # Request IDs wrap around at MAXIMUM_REQUEST_ID
+        next_request_id = self._request_id + 1
+        next_request_id %= NvpApiRequestEventlet.MAXIMUM_REQUEST_ID
+        NvpApiRequestEventlet.CURRENT_REQUEST_ID = next_request_id
+
     @classmethod
     def _spawn(cls, func, *args, **kwargs):
+        '''Allocate a green thread from the class pool.'''
         return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
 
     def spawn(self, func, *args, **kwargs):
+        '''Spawn a new green thread with the supplied function and args.'''
         return self.__class__._spawn(func, *args, **kwargs)
 
+    def _rid(self):
+        '''Return current request id.'''
+        return self._request_id
+
     @classmethod
     def joinall(cls):
+        '''Wait for all outstanding requests to complete.'''
         return cls.API_REQUEST_POOL.waitall()
 
     def join(self):
+        '''Wait for instance green thread to complete.'''
         if self._green_thread is not None:
             return self._green_thread.wait()
-        LOG.error('Joining on invalid green thread')
         return Exception('Joining an invalid green thread')
 
     def start(self):
+        '''Start request processing.'''
         self._green_thread = self.spawn(self._run)
 
     def copy(self):
+        '''Return a copy of this request instance.'''
         return NvpApiRequestEventlet(
             self._api_client, self._url, self._method, self._body,
             self._headers, self._request_timeout, self._retries,
@@ -121,32 +154,42 @@ class NvpApiRequestEventlet:
 
     @property
     def request_error(self):
+        '''Return any errors associated with this instance.'''
         return self._request_error
 
     def _run(self):
+        '''Method executed within green thread.'''
         if self._request_timeout:
             # No timeout exception escapes the with block.
             with timeout.Timeout(self._request_timeout, False):
                 return self._handle_request()
 
-            LOG.info('Request timeout handling request.')
+            lg.info('[%d] Request timeout.' % self._rid())
             self._request_error = Exception('Request timeout')
             return None
         else:
             return self._handle_request()
 
     def _request_str(self, conn, url):
+        '''Return string representation of connection.'''
         return "%s %s/%s" % (self._method, _conn_str(conn), url)
 
     def _issue_request(self):
-        conn = self._api_client.acquire_connection()
+        '''Issue a request to a provider.'''
+        conn = self._api_client.acquire_connection(rid=self._rid())
         if conn is None:
             error = Exception("No API connections available")
             self._request_error = error
             return error
 
+        # Preserve the acquired connection as conn may be over-written by
+        # redirects below.
+        acquired_conn = conn
+
         url = self._url
-        LOG.info("Issuing request '%s'" % self._request_str(conn, url))
+        lg.debug("[%d] Issuing - request '%s'" %
+                 (self._rid(),
+                 self._request_str(conn, url)))
         issued_time = time.time()
         is_conn_error = False
         try:
@@ -161,66 +204,92 @@ class NvpApiRequestEventlet:
                 elif conn.sock.gettimeout() != self._http_timeout:
                     conn.sock.settimeout(self._http_timeout)
 
+                headers = copy.copy(self._headers)
+                gen = self._api_client.nvp_config_gen
+                if gen:
+                    headers["X-Nvp-Wait-For-Config-Generation"] = gen
+                    lg.debug("Setting %s request header: %s" %
+                             ('X-Nvp-Wait-For-Config-Generation', gen))
                 try:
-                    conn.request(self._method, url, self._body, self._headers)
-                except Exception, e:
-                    LOG.info('_issue_request: conn.request() exception: %s' %
-                             e)
+                    conn.request(self._method, url, self._body, headers)
+                except Exception as e:
+                    lg.warn('[%d] Exception issuing request: %s' %
+                            (self._rid(), e))
                     raise e
 
                 response = conn.getresponse()
                 response.body = response.read()
                 response.headers = response.getheaders()
-                LOG.info("Request '%s' complete: %s (%0.2f seconds)"
-                         % (self._request_str(conn, url), response.status,
-                            time.time() - issued_time))
+                lg.debug("[%d] Completed request '%s': %s (%0.2f seconds)"
+                         % (self._rid(), self._request_str(conn, url),
+                            response.status, time.time() - issued_time))
+
+                new_gen = response.getheader('X-Nvp-Config-Generation', None)
+                if new_gen:
+                    lg.debug("Reading %s response header: %s" %
+                             ('X-Nvp-config-Generation', new_gen))
+                    if (self._api_client.nvp_config_gen is None or
+                            self._api_client.nvp_config_gen < int(new_gen)):
+                        self._api_client.nvp_config_gen = int(new_gen)
+
                 if response.status not in [httplib.MOVED_PERMANENTLY,
                                            httplib.TEMPORARY_REDIRECT]:
                     break
                 elif redirects >= self._redirects:
-                    LOG.warn("Maximum redirects exceeded, aborting request")
+                    lg.info("[%d] Maximum redirects exceeded, aborting request"
+                            % self._rid())
                     break
                 redirects += 1
+
+                # In the following call, conn is replaced by the connection
+                # specified in the redirect response from the server.
                 conn, url = self._redirect_params(conn, response.headers)
                 if url is None:
                     response.status = httplib.INTERNAL_SERVER_ERROR
                     break
-                LOG.info("Redirecting request to: %s" %
-                         self._request_str(conn, url))
-
-            # If we receive any of these responses, then our server did not
-            # process our request and may be in an errored state. Raise an
-            # exception, which will cause the the conn to be released with
-            # is_conn_error == True which puts the conn on the back of the
-            # client's priority queue.
+                lg.info("[%d] Redirecting request to: %s" %
+                        (self._rid(), self._request_str(conn, url)))
+
+            # FIX for #9415. If we receive any of these responses, then
+            # our server did not process our request and may be in an
+            # errored state. Raise an exception, which will cause the
+            # the conn to be released with is_conn_error == True
+            # which puts the conn on the back of the client's priority
+            # queue.
             if response.status >= 500:
-                LOG.warn("API Request '%s %s' received: %s" %
-                         (self._method, self._url, response.status))
+                lg.warn("[%d] Request '%s %s' received: %s"
+                        % (self._rid(), self._method, self._url,
+                           response.status))
                 raise Exception('Server error return: %s' %
                                 response.status)
             return response
-        except Exception, e:
+        except Exception as e:
             if isinstance(e, httplib.BadStatusLine):
                 msg = "Invalid server response"
             else:
                 msg = unicode(e)
-            LOG.warn("Request '%s' failed: %s (%0.2f seconds)"
-                     % (self._request_str(conn, url), msg,
-                        time.time() - issued_time))
+            lg.warn("[%d] Failed request '%s': %s (%0.2f seconds)"
+                    % (self._rid(), self._request_str(conn, url), msg,
+                       time.time() - issued_time))
             self._request_error = e
             is_conn_error = True
             return e
         finally:
-            self._api_client.release_connection(conn, is_conn_error)
+            # Make sure we release the original connection provided by the
+            # acquire_connection() call above.
+            self._api_client.release_connection(acquired_conn, is_conn_error,
+                                                rid=self._rid())
 
     def _redirect_params(self, conn, headers):
+        '''Process redirect params from a server response.'''
         url = None
         for name, value in headers:
             if name.lower() == "location":
                 url = value
                 break
         if not url:
-            LOG.warn("Received redirect status without location header field")
+            lg.warn("[%d] Received redirect status without location header"
+                    " field" % self._rid())
             return (conn, None)
         # Accept location with the following format:
         # 1. /path, redirect to same node
@@ -236,18 +305,17 @@ class NvpApiRequestEventlet:
                     url = result.path
                 return (conn, url)      # case 1
             else:
-                LOG.warn("Received invalid redirect location: %s" % url)
+                lg.warn("[%d] Received invalid redirect location: %s" %
+                        (self._rid(), url))
                 return (conn, None)     # case 3
         elif result.scheme not in ["http", "https"] or not result.hostname:
-            LOG.warn("Received malformed redirect location: %s" % url)
+            lg.warn("[%d] Received malformed redirect location: %s" %
+                    (self._rid(), url))
             return (conn, None)         # case 3
         # case 2, redirect location includes a scheme
         # so setup a new connection and authenticate
         use_https = result.scheme == "https"
         api_providers = [(result.hostname, result.port, use_https)]
-        client_eventlet = (
-            quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet
-        )
         api_client = client_eventlet.NvpApiClientEventlet(
             api_providers, self._api_client.user, self._api_client.password,
             use_https=use_https)
@@ -256,7 +324,7 @@ class NvpApiRequestEventlet:
             self._headers["Cookie"] = api_client.auth_cookie
         else:
             self._headers["Cookie"] = ""
-        conn = api_client.acquire_connection()
+        conn = api_client.acquire_connection(rid=self._rid())
         if result.query:
             url = "%s?%s" % (result.path, result.query)
         else:
@@ -264,6 +332,7 @@ class NvpApiRequestEventlet:
         return (conn, url)
 
     def _handle_request(self):
+        '''First level request handling.'''
         attempt = 0
         response = None
         while response is None and attempt <= self._retries:
@@ -272,34 +341,35 @@ class NvpApiRequestEventlet:
             if self._auto_login and self._api_client.need_login:
                 self._api_client.wait_for_login()
 
-            if self._api_client.auth_cookie and "Cookie" not in self._headers:
+            if self._api_client.auth_cookie:
                 self._headers["Cookie"] = self._api_client.auth_cookie
 
             req = self.spawn(self._issue_request).wait()
             # automatically raises any exceptions returned.
-            LOG.debug('req: %s' % type(req))
-
             if isinstance(req, httplib.HTTPResponse):
-                if ((req.status == httplib.UNAUTHORIZED
-                     or req.status == httplib.FORBIDDEN)):
+                if (req.status == httplib.UNAUTHORIZED
+                        or req.status == httplib.FORBIDDEN):
                     self._api_client.need_login = True
                     if attempt <= self._retries:
                         continue
                     # else fall through to return the error code
 
-                LOG.debug("API Request '%s %s' complete: %s" %
-                          (self._method, self._url, req.status))
+                lg.debug("[%d] Completed request '%s %s': %s"
+                         % (self._rid(), self._method, self._url, req.status))
                 self._request_error = None
                 response = req
             else:
-                LOG.info('_handle_request: caught an error - %s' % req)
+                lg.info('[%d] Error while handling request: %s' % (self._rid(),
+                                                                   req))
                 self._request_error = req
+                response = None
 
-        LOG.debug('_handle_request: response - %s' % response)
         return response
 
 
 class NvpLoginRequestEventlet(NvpApiRequestEventlet):
+    '''Process a login request.'''
+
     def __init__(self, nvp_client, user, password):
         headers = {"Content-Type": "application/x-www-form-urlencoded"}
         body = urllib.urlencode({"username": user, "password": password})
@@ -314,6 +384,8 @@ class NvpLoginRequestEventlet(NvpApiRequestEventlet):
 
 
 class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
+    '''Get a list of API providers.'''
+
     def __init__(self, nvp_client):
         url = "/ws.v1/control-cluster/node?fields=roles"
         NvpApiRequestEventlet.__init__(
@@ -332,7 +404,7 @@ class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
         try:
             if self.successful():
                 ret = []
-                body = jsonutils.loads(self.value.body)
+                body = json.loads(self.value.body)
                 for node in body.get('results', []):
                     for role in node.get('roles', []):
                         if role.get('role') == 'api_provider':
@@ -340,13 +412,15 @@ class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
                             if addr:
                                 ret.append(_provider_from_listen_addr(addr))
                 return ret
-        except Exception, e:
-            LOG.warn("Failed to parse API provider: %s" % e)
+        except Exception as e:
+            lg.warn("[%d] Failed to parse API provider: %s" % (self._rid(), e))
             # intentionally fall through
         return None
 
 
 class NvpGenericRequestEventlet(NvpApiRequestEventlet):
+    '''Handle a generic request.'''
+
     def __init__(self, nvp_client, method, url, body, content_type,
                  auto_login=False,
                  request_timeout=DEFAULT_REQUEST_TIMEOUT,
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/cli.py b/quantum/plugins/nicira/nicira_nvp_plugin/cli.py
deleted file mode 100644 (file)
index 2ce0434..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import logging
-from optparse import OptionParser
-import os
-import sys
-
-from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
-from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import (
-    NvpPlugin as QuantumManager,
-)
-
-
-logging.basicConfig(level=logging.INFO)
-LOG = logging.getLogger('nvp-plugin-cli')
-
-
-def print_help():
-    """Help for CLI"""
-    print "\nNVP Plugin Commands:"
-    for key in COMMANDS.keys():
-        print ("    %s %s" %
-              (key, " ".join(["<%s>" % y for y in COMMANDS[key]["args"]])))
-
-
-def build_args(cmd, cmdargs, arglist):
-    """Building the list of args for a particular CLI"""
-    args = []
-    orig_arglist = arglist[:]
-    try:
-        for cmdarg in cmdargs:
-            args.append(arglist[0])
-            del arglist[0]
-    except:
-        LOG.error("Not enough arguments for \"%s\" (expected: %d, got: %d)" % (
-                  cmd, len(cmdargs), len(orig_arglist)))
-        print ("Usage:\n    %s %s" %
-              (cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]])))
-        sys.exit()
-    if len(arglist) > 0:
-        LOG.error("Too many arguments for \"%s\" (expected: %d, got: %d)" % (
-                  cmd, len(cmdargs), len(orig_arglist)))
-        print ("Usage:\n    %s %s" %
-              (cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]])))
-        sys.exit()
-    return args
-
-
-def check_config(manager):
-    """A series of checks to make sure the plugin is correctly configured."""
-    checks = [{"function": nvplib.check_default_transport_zone,
-               "desc": "Transport zone check:"}]
-    any_failed = False
-    for c in checks:
-        result, msg = "PASS", ""
-        try:
-            c["function"]()
-        except Exception, e:
-            any_failed = True
-            result = "FAIL"
-            msg = "(%s)" % str(e)
-        print "%s %s%s" % (c["desc"], result, msg)
-    sys.exit({False: 0, True: 1}[any_failed])
-
-
-COMMANDS = {
-    "check_config": {
-        "need_login": True,
-        "func": check_config,
-        "args": []
-    },
-}
-
-
-def main():
-    usagestr = "Usage: %prog [OPTIONS] <command> [args]"
-    PARSER = OptionParser(usage=usagestr)
-    PARSER.add_option("-v", "--verbose", dest="verbose",
-                      action="store_true", default=False,
-                      help="turn on verbose logging")
-    PARSER.add_option("-c", "--configfile", dest="configfile", type="string",
-                      default="/etc/quantum/plugins/nvp/nvp.ini",
-                      help="nvp plugin config file path (nvp.ini)")
-    options, args = PARSER.parse_args()
-
-    loglevel = logging.INFO
-    if options.verbose:
-        loglevel = logging.DEBUG
-
-    LOG.setLevel(loglevel)
-
-    if len(args) < 1:
-        PARSER.print_help()
-        print_help()
-        sys.exit(1)
-
-    CMD = args[0]
-    if CMD not in COMMANDS.keys():
-        LOG.error("Unknown command: %s" % CMD)
-        print_help()
-        sys.exit(1)
-
-    args = build_args(CMD, COMMANDS[CMD]["args"], args[1:])
-
-    LOG.debug("Executing command \"%s\" with args: %s" % (CMD, args))
-
-    manager = None
-    if COMMANDS[CMD]["need_login"] is True:
-        if not os.path.exists(options.configfile):
-            LOG.error("NVP plugin configuration file \"%s\" doesn't exist!" %
-                      options.configfile)
-            sys.exit(1)
-        manager = QuantumManager(options.configfile, loglevel, cli=True)
-
-    COMMANDS[CMD]["func"](manager, *args)
-
-    sys.exit(0)
-
-if __name__ == "__main__":
-    main()
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/common/__init__.py b/quantum/plugins/nicira/nicira_nvp_plugin/common/__init__.py
new file mode 100644 (file)
index 0000000..5d93ae4
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2012 Nicira Networks, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/common/config.py b/quantum/plugins/nicira/nicira_nvp_plugin/common/config.py
new file mode 100644 (file)
index 0000000..da2e8d8
--- /dev/null
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Nicira, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from quantum.openstack.common import cfg
+
+
+database_opts = [
+    cfg.StrOpt('sql_connection', default='sqlite://'),
+    cfg.IntOpt('sql_max_retries', default=-1),
+    cfg.IntOpt('reconnect_interval', default=2),
+]
+
+nvp_opts = [
+    cfg.IntOpt('max_lp_per_bridged_ls', default=64),
+    cfg.IntOpt('concurrent_connections', default=5),
+    cfg.IntOpt('failover_time', default=240)
+]
+
+cluster_opts = [
+    cfg.StrOpt('default_tz_uuid'),
+    cfg.StrOpt('nvp_cluster_uuid'),
+    cfg.StrOpt('nova_zone_id'),
+    cfg.MultiStrOpt('nvp_controller_connection')
+]
+
+cfg.CONF.register_opts(database_opts, "DATABASE")
+cfg.CONF.register_opts(nvp_opts, "NVP")
+
+
+class ClusterConfigOptions(cfg.CommonConfigOpts):
+
+    def __init__(self, config_options):
+        super(ClusterConfigOptions, self).__init__()
+        self._group_mappings = {}
+        self._config_opts = config_options._config_opts
+        self._cparser = config_options._cparser
+        self._oparser = config_options._oparser
+        self.register_cli_opts(self._config_opts)
+
+    def _do_get(self, name, group=None):
+        """Look up an option value.
+
+        :param name: the opt name (or 'dest', more precisely)
+        :param group: an OptGroup
+        :returns: the option value, or a GroupAttr object
+        :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
+                 TemplateSubstitutionError
+        """
+        if group is None and name in self._groups:
+            return self.GroupAttr(self, self._get_group(name))
+        info = self._get_opt_info(name, group)
+        default, opt, override = [info[k] for k in sorted(info.keys())]
+        if override is not None:
+            return override
+
+        values = []
+        if self._cparser is not None:
+            section = group.name if group is not None else 'DEFAULT'
+            # Check if the name of the group maps to something else in
+            # the conf file.Otherwise leave the section name unchanged
+
+            section = self._group_mappings.get(section, section)
+            try:
+                value = opt._get_from_config_parser(self._cparser, section)
+            except KeyError:
+                pass
+            except ValueError as ve:
+                raise cfg.ConfigFileValueError(str(ve))
+            else:
+                if not opt.multi:
+                    # No need to continue since the last value wins
+                    return value[-1]
+                values.extend(value)
+
+        name = name if group is None else group.name + '_' + name
+        value = self._cli_values.get(name)
+        if value is not None:
+            if not opt.multi:
+                return value
+
+            return value + values
+
+        if values:
+            return values
+
+        if default is not None:
+            return default
+
+        return opt.default
+
+    def register_opts(self, opts, group_internal_name=None, group=None):
+        """Register multiple option schemas at once."""
+        if group_internal_name:
+            self._group_mappings[group] = group_internal_name
+        for opt in opts:
+            self.register_opt(opt, group, clear_cache=False)
+
+
+def _retrieve_extra_groups(conf, key=None, delimiter=':'):
+    """retrieve configuration groups not listed above."""
+    results = []
+    for parsed_file in cfg.CONF._cparser.parsed:
+        for parsed_item in parsed_file.keys():
+            if not parsed_item in cfg.CONF:
+                items = key and parsed_item.split(delimiter)
+                if not key or key == items[0]:
+                    results.append(parsed_item)
+    return results
+
+
+def register_cluster_groups(conf):
+    """retrieve configuration groups for nvp clusters."""
+    cluster_names = []
+    cluster_tags = _retrieve_extra_groups(conf, "CLUSTER")
+    for tag in cluster_tags:
+        cluster_name = tag.split(':')[1]
+        conf.register_opts(cluster_opts, tag, cluster_name)
+        cluster_names.append(cluster_name)
+    return cluster_names
similarity index 50%
rename from quantum/plugins/nicira/nicira_nvp_plugin/tests/test_check.py
rename to quantum/plugins/nicira/nicira_nvp_plugin/nvp_plugin_version.py
index e49927ed65c59a1223573c971e05566344b3418e..97d929595305ed236b9376ed12f6dcc23c7b0f59 100644 (file)
@@ -1,4 +1,5 @@
 # Copyright 2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
-# @author: Brad Hall, Nicira Networks, Inc.
-
-import logging
-import unittest
-
-from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
-from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
-
-
-logging.basicConfig(level=logging.DEBUG)
-LOG = logging.getLogger("test_check")
-
-
-class NvpTests(unittest.TestCase):
-    def setUp(self):
-        self.quantum = NvpPlugin()
-
-    def tearDown(self):
-        pass
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 
-    # These nvplib functions will throw an exception if the check fails
-    def test_check_default_transport_zone(self):
-        nvplib.check_default_transport_zone(self.quantum.controller)
+# This will get updated at build time.  Version 0 indicates developer build.
+PLUGIN_VERSION = "0"
index fae21a11bc8f5f7d2ce7dd08a1bd9d6473fab3d4..ce5d07fb7bff62042242e61ed2501b194e316f61 100644 (file)
@@ -1,4 +1,5 @@
 # Copyright 2012 Nicira Networks, Inc.
+# All Rights Reserved
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 # @author: Brad Hall, Nicira Networks, Inc.
+# @author: Dave Lapsley, Nicira Networks, Inc.
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
 
+# TODO(bgh): We should break this into separate files.  It will just keep
+# growing as we add more features :)
+
+from copy import copy
+import functools
+import json
+import hashlib
 import logging
+import random
+import re
+import uuid
 
-from quantum.common import exceptions as exception
-from quantum.openstack.common import jsonutils
-from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
+from eventlet import semaphore
+import NvpApiClient
 
+#FIXME(danwent): I'd like this file to get to the point where it has
+# no quantum-specific logic in it
+from quantum.common import exceptions as exception
 
-LOG = logging.getLogger("nvplib")
-LOG.setLevel(logging.INFO)
+LOCAL_LOGGING = False
+if LOCAL_LOGGING:
+    from logging.handlers import SysLogHandler
+    FORMAT = ("|%(levelname)s|%(filename)s|%(funcName)s|%(lineno)s"
+              "|%(message)s")
+    LOG = logging.getLogger(__name__)
+    formatter = logging.Formatter(FORMAT)
+    syslog = SysLogHandler(address="/dev/log")
+    syslog.setFormatter(formatter)
+    LOG.addHandler(syslog)
+    LOG.setLevel(logging.DEBUG)
+else:
+    LOG = logging.getLogger("nvplib")
+    LOG.setLevel(logging.INFO)
+
+# TODO(bgh): it would be more efficient to use a bitmap
+taken_context_ids = []
+
+_net_type_cache = {}  # cache of {net_id: network_type}
+# XXX Only cache default for now
+_lqueue_cache = {}
+
+
+def get_cluster_version(cluster):
+    """Return major/minor version #"""
+    # Get control-cluster nodes
+    uri = "/ws.v1/control-cluster/node?_page_length=1&fields=uuid"
+    try:
+        res = do_single_request("GET", uri, cluster=cluster)
+        res = json.loads(res)
+    except NvpApiClient.NvpApiException:
+        raise exception.QuantumException()
+    if res["result_count"] == 0:
+        return None
+    node_uuid = res["results"][0]["uuid"]
+    # Get control-cluster node status.  It's unsupported to have controllers
+    # running different version so we just need the first node version.
+    uri = "/ws.v1/control-cluster/node/%s/status" % node_uuid
+    try:
+        res = do_single_request("GET", uri, cluster=cluster)
+        res = json.loads(res)
+    except NvpApiClient.NvpApiException:
+        raise exception.QuantumException()
+    version_parts = res["version"].split(".")
+    version = "%s.%s" % tuple(version_parts[:2])
+    LOG.info("NVP controller cluster version: %s" % version)
+    return version
+
+
+def get_all_query_pages(path, c):
+    need_more_results = True
+    result_list = []
+    page_cursor = None
+    query_marker = "&" if (path.find("?") != -1) else "?"
+    while need_more_results:
+        page_cursor_str = (
+            "_page_cursor=%s" % page_cursor if page_cursor else "")
+        res = do_single_request("GET", "%s%s%s" %
+                                (path, query_marker, page_cursor_str),
+                                cluster=c)
+        body = json.loads(res)
+        page_cursor = body.get('page_cursor')
+        if not page_cursor:
+            need_more_results = False
+        result_list.extend(body['results'])
+    return result_list
 
 
 def do_single_request(*args, **kwargs):
-    """Issue a request to a specified controller if specified via kwargs
-       (controller=<controller>)."""
-    controller = kwargs["controller"]
-    LOG.debug("Issuing request to controller: %s" % controller.name)
-    return controller.api_client.request(*args)
-
-
-def check_default_transport_zone(c):
-    """Make sure the default transport zone specified in the config exists"""
-    msg = []
-    # This will throw an exception on failure and that's ok since it will
-    # just propogate to the cli.
-    resp = do_single_request(
-        "GET",
-        "/ws.v1/transport-zone?uuid=%s" % c.default_tz_uuid,
-        controller=c)
-    result = jsonutils.loads(resp)
-    if int(result["result_count"]) == 0:
-        msg.append("Unable to find zone \"%s\" for controller \"%s\"" %
-                   (c.default_tz_uuid, c.name))
-    if len(msg) > 0:
-        raise Exception(' '.join(msg))
-
-
-def check_tenant(controller, net_id, tenant_id):
-    """Return true if the tenant "owns" this network"""
-    net = get_network(controller, net_id)
-    for t in net["tags"]:
-        if t["scope"] == "os_tid" and t["tag"] == tenant_id:
-            return True
-    return False
+    """Issue a request to a specified cluster if specified via kwargs
+       (cluster=<cluster>)."""
+    cluster = kwargs["cluster"]
+    return cluster.api_client.request(*args)
+
+
+def do_multi_request(*args, **kwargs):
+    """Issue a request to all clusters"""
+    results = []
+    clusters = kwargs["clusters"]
+    for x in clusters:
+        LOG.debug("Issuing request to cluster: %s" % x.name)
+        rv = x.api_client.request(*args)
+        results.append(rv)
+    return results
+
 
 # -------------------------------------------------------------------
 # Network functions
 # -------------------------------------------------------------------
+def find_port_and_cluster(clusters, port_id):
+    """Return (url, cluster_id) of port or (None, None) if port does not exist.
+    """
+    for c in clusters:
+        query = "/ws.v1/lswitch/*/lport?uuid=%s&fields=*" % port_id
+        LOG.debug("Looking for lswitch with port id \"%s\" on: %s"
+                  % (port_id, c))
+        try:
+            res = do_single_request('GET', query, cluster=c)
+        except Exception as e:
+            LOG.error("get_port_cluster_and_url, exception: %s" % str(e))
+            continue
+        res = json.loads(res)
+        if len(res["results"]) == 1:
+            return (res["results"][0], c)
+    return (None, None)
 
 
-def get_network(controller, net_id):
+def find_lswitch_by_portid(clusters, port_id):
+    port, cluster = find_port_and_cluster(clusters, port_id)
+    if port and cluster:
+        href = port["_href"].split('/')
+        return (href[3], cluster)
+    return (None, None)
+
+
+def get_network(cluster, net_id):
     path = "/ws.v1/lswitch/%s" % net_id
     try:
-        resp_obj = do_single_request("GET", path, controller=controller)
-        network = jsonutils.loads(resp_obj)
-    except NvpApiClient.ResourceNotFound as e:
+        resp_obj = do_single_request("GET", path, cluster=cluster)
+        network = json.loads(resp_obj)
+        LOG.warning("### nw:%s", network)
+    except NvpApiClient.ResourceNotFound:
         raise exception.NetworkNotFound(net_id=net_id)
-    except NvpApiClient.NvpApiException as e:
+    except NvpApiClient.NvpApiException:
         raise exception.QuantumException()
     LOG.debug("Got network \"%s\": %s" % (net_id, network))
     return network
 
 
-def create_lswitch(controller, lswitch_obj):
-    LOG.debug("Creating lswitch: %s" % lswitch_obj)
+def create_lswitch(cluster, lswitch_obj):
+    LOG.info("Creating lswitch: %s" % lswitch_obj)
     # Warn if no tenant is specified
     found = "os_tid" in [x["scope"] for x in lswitch_obj["tags"]]
     if not found:
-        LOG.warn("No tenant-id tag specified in logical switch: %s" %
-                 lswitch_obj)
+        LOG.warn("No tenant-id tag specified in logical switch: %s" % (
+            lswitch_obj))
     uri = "/ws.v1/lswitch"
     try:
         resp_obj = do_single_request("POST", uri,
-                                     jsonutils.dumps(lswitch_obj),
-                                     controller=controller)
-    except NvpApiClient.NvpApiException as e:
+                                     json.dumps(lswitch_obj),
+                                     cluster=cluster)
+    except NvpApiClient.NvpApiException:
         raise exception.QuantumException()
 
-    r = jsonutils.loads(resp_obj)
+    r = json.loads(resp_obj)
     d = {}
-    d["net-id"] = r["uuid"]
-    d["net-name"] = r["display_name"]
+    d["net-id"] = r['uuid']
+    d["net-name"] = r['display_name']
     LOG.debug("Created logical switch: %s" % d["net-id"])
     return d
 
 
-def update_network(controller, network, **kwargs):
-    uri = "/ws.v1/lswitch/" + network
+def update_network(cluster, switch, **params):
+    uri = "/ws.v1/lswitch/" + switch
     lswitch_obj = {}
-    if "name" in kwargs:
-        lswitch_obj["display_name"] = kwargs["name"]
+    if params["network"]["name"]:
+        lswitch_obj["display_name"] = params["network"]["name"]
     try:
-        resp_obj = do_single_request("PUT",
-                                     uri,
-                                     jsonutils.dumps(lswitch_obj),
-                                     controller=controller)
+        resp_obj = do_single_request("PUT", uri, json.dumps(lswitch_obj),
+                                     cluster=cluster)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Network not found, Error: %s" % str(e))
         raise exception.NetworkNotFound(net_id=network)
     except NvpApiClient.NvpApiException as e:
         raise exception.QuantumException()
 
-    obj = jsonutils.loads(resp_obj)
+    obj = json.loads(resp_obj)
     return obj
 
 
-def get_all_networks(controller, tenant_id, networks):
-    """Append the quantum network uuids we can find in the given controller to
+def get_all_networks(cluster, tenant_id, networks):
+    """Append the quantum network uuids we can find in the given cluster to
        "networks"
        """
     uri = "/ws.v1/lswitch?fields=*&tag=%s&tag_scope=os_tid" % tenant_id
     try:
-        resp_obj = do_single_request("GET", uri, controller=controller)
-    except NvpApiClient.NvpApiException as e:
+        resp_obj = do_single_request("GET", uri, cluster=cluster)
+    except NvpApiClient.NvpApiException:
         raise exception.QuantumException()
     if not resp_obj:
         return []
-    lswitches = jsonutils.loads(resp_obj)["results"]
-    for lswitch in lswitches:
-        net_id = lswitch["uuid"]
-        if net_id not in [x["net-id"] for x in networks]:
-            networks.append({"net-id": net_id,
-                             "net-name": lswitch["display_name"]})
-    return networks
+    lswitches = json.loads(resp_obj)["results"]
+    networks_result = copy(networks)
+    return networks_result
 
 
-def query_networks(controller, tenant_id, fields="*", tags=None):
+def query_networks(cluster, tenant_id, fields="*", tags=None):
     uri = "/ws.v1/lswitch?fields=%s" % fields
     if tags:
         for t in tags:
             uri += "&tag=%s&tag_scope=%s" % (t[0], t[1])
     try:
-        resp_obj = do_single_request("GET", uri, controller=controller)
-    except NvpApiClient.NvpApiException as e:
+        resp_obj = do_single_request("GET", uri, cluster=cluster)
+    except NvpApiClient.NvpApiException:
         raise exception.QuantumException()
     if not resp_obj:
         return []
-    lswitches = jsonutils.loads(resp_obj)["results"]
-    nets = [{'net-id': lswitch["uuid"],
-             'net-name': lswitch["display_name"]}
+    lswitches = json.loads(resp_obj)["results"]
+    nets = [{'net-id': lswitch["uuid"], 'net-name': lswitch["display_name"]}
             for lswitch in lswitches]
     return nets
 
 
-def delete_network(controller, network):
-    delete_networks(controller, [network])
+def delete_network(cluster, net_id, lswitch_id):
+    delete_networks(cluster, net_id, [lswitch_id])
 
 
-def delete_networks(controller, networks):
-    for network in networks:
-        path = "/ws.v1/lswitch/%s" % network
+def delete_networks(cluster, net_id, lswitch_ids):
+    if net_id in _net_type_cache:
+        del _net_type_cache[net_id]
+    for ls_id in lswitch_ids:
+        path = "/ws.v1/lswitch/%s" % ls_id
 
         try:
-            do_single_request("DELETE", path, controller=controller)
+            do_single_request("DELETE", path, cluster=cluster)
         except NvpApiClient.ResourceNotFound as e:
             LOG.error("Network not found, Error: %s" % str(e))
-            raise exception.NetworkNotFound(net_id=network)
+            raise exception.NetworkNotFound(net_id=ls_id)
         except NvpApiClient.NvpApiException as e:
             raise exception.QuantumException()
 
 
 def create_network(tenant_id, net_name, **kwargs):
-    controller = kwargs["controller"]
+    clusters = kwargs["clusters"]
+    # Default to the primary cluster
+    cluster = clusters[0]
 
     transport_zone = kwargs.get("transport_zone",
-                                controller.default_tz_uuid)
-    transport_type = kwargs.get("transport_type", "gre")
-    lswitch_obj = {
-        "display_name": net_name,
-        "transport_zones": [{
-            "zone_uuid": transport_zone,
-            "transport_type": transport_type,
-        }],
-        "tags": [{"tag": tenant_id, "scope": "os_tid"}],
-    }
-
-    net = create_lswitch(controller, lswitch_obj)
+                                cluster.default_tz_uuid)
+    transport_type = kwargs.get("transport_type", "stt")
+    lswitch_obj = {"display_name": net_name,
+                   "transport_zones": [
+                   {"zone_uuid": transport_zone,
+                    "transport_type": transport_type}
+                   ],
+                   "tags": [{"tag": tenant_id, "scope": "os_tid"}]}
+
+    net = create_lswitch(cluster, lswitch_obj)
     net['net-op-status'] = "UP"
     return net
 
-#---------------------------------------------------------------------
-# Port functions
-#---------------------------------------------------------------------
 
-
-def get_port_stats(controller, network_id, port_id):
-    try:
-        do_single_request("GET", "/ws.v1/lswitch/%s" % (network_id),
-                          controller=controller)
-    except NvpApiClient.ResourceNotFound as e:
-        LOG.error("Network not found, Error: %s" % str(e))
-        raise exception.NetworkNotFound(net_id=network_id)
-    try:
-        path = "/ws.v1/lswitch/%s/lport/%s/statistic" % (network_id, port_id)
-        resp = do_single_request("GET", path, controller=controller)
-        stats = jsonutils.loads(resp)
-    except NvpApiClient.ResourceNotFound as e:
-        LOG.error("Port not found, Error: %s" % str(e))
-        raise exception.PortNotFound(port_id=port_id, net_id=network_id)
-    except NvpApiClient.NvpApiException as e:
-        raise exception.QuantumException()
-    LOG.debug("Returning stats for port \"%s\" on \"%s\": %s" % (port_id,
-                                                                 network_id,
-                                                                 stats))
-    return stats
-
-
-def check_port_state(state):
-    if state not in ["ACTIVE", "DOWN"]:
-        LOG.error("Invalid port state (ACTIVE and DOWN are valid states): %s" %
-                  state)
-        raise exception.StateInvalid(port_state=state)
-
-
-def query_ports(controller, network, relations=None, fields="*", filters=None):
+def query_ports(cluster, network, relations=None, fields="*", filters=None):
     uri = "/ws.v1/lswitch/" + network + "/lport?"
     if relations:
         uri += "relations=%s" % relations
@@ -235,44 +289,75 @@ def query_ports(controller, network, relations=None, fields="*", filters=None):
     if filters and "attachment" in filters:
         uri += "&attachment_vif_uuid=%s" % filters["attachment"]
     try:
-        resp_obj = do_single_request("GET", uri, controller=controller)
+        resp_obj = do_single_request("GET", uri, cluster=cluster)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Network not found, Error: %s" % str(e))
         raise exception.NetworkNotFound(net_id=network)
     except NvpApiClient.NvpApiException as e:
         raise exception.QuantumException()
-    return jsonutils.loads(resp_obj)["results"]
+    return json.loads(resp_obj)["results"]
 
 
-def delete_port(controller, network, port):
-    uri = "/ws.v1/lswitch/" + network + "/lport/" + port
+def delete_port(cluster, port):
     try:
-        do_single_request("DELETE", uri, controller=controller)
+        do_single_request("DELETE", port['_href'], cluster=cluster)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Port or Network not found, Error: %s" % str(e))
-        raise exception.PortNotFound(port_id=port, net_id=network)
+        raise exception.PortNotFound(port_id=port['uuid'])
     except NvpApiClient.NvpApiException as e:
         raise exception.QuantumException()
 
 
-def delete_all_ports(controller, ls_uuid):
-    res = do_single_request("GET", "/ws.v1/lswitch/%s/lport?fields=uuid" %
-                            ls_uuid, controller=controller)
-    res = jsonutils.loads(res)
-    for r in res["results"]:
-        do_single_request(
-            "DELETE",
-            "/ws.v1/lswitch/%s/lport/%s" % (ls_uuid, r["uuid"]),
-            controller=controller)
+def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
+    """Return (url, cluster_id) of port or raises ResourceNotFound
+    """
+    query = ("/ws.v1/lswitch/%s/lport?fields=admin_status_enabled,"
+             "fabric_status_up,uuid&tag=%s&tag_scope=q_port_id"
+             "&relations=LogicalPortStatus" % (lswitch, quantum_tag))
+
+    LOG.debug("Looking for port with q_tag \"%s\" on: %s"
+              % (quantum_tag, lswitch))
+    for c in clusters:
+        try:
+            res_obj = do_single_request('GET', query, cluster=c)
+        except Exception as e:
+            continue
+        res = json.loads(res_obj)
+        if len(res["results"]) == 1:
+            return (res["results"][0], c)
+
+    LOG.error("Port or Network not found, Error: %s" % str(e))
+    raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
+
+
+def get_port_by_display_name(clusters, lswitch, display_name):
+    """Return (url, cluster_id) of port or raises ResourceNotFound
+    """
+    query = ("/ws.v1/lswitch/%s/lport?display_name=%s&fields=*" %
+             (lswitch, display_name))
+    LOG.debug("Looking for port with display_name \"%s\" on: %s"
+              % (display_name, lswitch))
+    for c in clusters:
+        try:
+            res_obj = do_single_request('GET', query, cluster=c)
+        except Exception as e:
+            continue
+        res = json.loads(res_obj)
+        if len(res["results"]) == 1:
+            return (res["results"][0], c)
+
+    LOG.error("Port or Network not found, Error: %s" % str(e))
+    raise exception.PortNotFound(port_id=display_name, net_id=lswitch)
 
 
-def get_port(controller, network, port, relations=None):
+def get_port(cluster, network, port, relations=None):
+    LOG.info("get_port() %s %s" % (network, port))
     uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
     if relations:
         uri += "relations=%s" % relations
     try:
-        resp_obj = do_single_request("GET", uri, controller=controller)
-        port = jsonutils.loads(resp_obj)
+        resp_obj = do_single_request("GET", uri, cluster=cluster)
+        port = json.loads(resp_obj)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Port or Network not found, Error: %s" % str(e))
         raise exception.PortNotFound(port_id=port, net_id=network)
@@ -281,130 +366,75 @@ def get_port(controller, network, port, relations=None):
     return port
 
 
-def plug_interface(controller, network, port, type, attachment=None):
-    uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "/attachment"
-
-    lport_obj = {}
-    if attachment:
-        lport_obj["vif_uuid"] = attachment
-
-    lport_obj["type"] = type
-    try:
-        resp_obj = do_single_request("PUT",
-                                     uri,
-                                     jsonutils.dumps(lport_obj),
-                                     controller=controller)
-    except NvpApiClient.ResourceNotFound as e:
-        LOG.error("Port or Network not found, Error: %s" % str(e))
-        raise exception.PortNotFound(port_id=port, net_id=network)
-    except NvpApiClient.Conflict as e:
-        LOG.error("Conflict while making attachment to port, "
-                  "Error: %s" % str(e))
-        raise exception.AlreadyAttached(att_id=attachment,
-                                        port_id=port,
-                                        net_id=network,
-                                        att_port_id="UNKNOWN")
-    except NvpApiClient.NvpApiException as e:
-        raise exception.QuantumException()
-
-    result = jsonutils.dumps(resp_obj)
-    return result
-
-
-def unplug_interface(controller, network, port):
-    uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "/attachment"
-    lport_obj = {"type": "NoAttachment"}
-    try:
-        resp_obj = do_single_request("PUT",
-                                     uri,
-                                     jsonutils.dumps(lport_obj),
-                                     controller=controller)
-    except NvpApiClient.ResourceNotFound as e:
-        LOG.error("Port or Network not found, Error: %s" % str(e))
-        raise exception.PortNotFound(port_id=port, net_id=network)
-    except NvpApiClient.NvpApiException as e:
-        raise exception.QuantumException()
-    return jsonutils.loads(resp_obj)
-
-
 def update_port(network, port_id, **params):
-    controller = params["controller"]
+    cluster = params["cluster"]
     lport_obj = {}
 
-    if "state" in params:
-        state = params["state"]
-        check_port_state(state)
-        admin_status = True
-        if state == "DOWN":
-            admin_status = False
-        lport_obj["admin_status_enabled"] = admin_status
+    admin_state_up = params['port'].get('admin_state_up')
+    name = params["port"].get("name")
+    if admin_state_up:
+        lport_obj["admin_status_enabled"] = admin_state_up
+    if name:
+        lport_obj["display_name"] = name
 
     uri = "/ws.v1/lswitch/" + network + "/lport/" + port_id
     try:
-        resp_obj = do_single_request("PUT",
-                                     uri,
-                                     jsonutils.dumps(lport_obj),
-                                     controller=controller)
+        resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
+                                     cluster=cluster)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Port or Network not found, Error: %s" % str(e))
         raise exception.PortNotFound(port_id=port_id, net_id=network)
     except NvpApiClient.NvpApiException as e:
         raise exception.QuantumException()
 
-    obj = jsonutils.loads(resp_obj)
-    obj["port-op-status"] = get_port_status(controller, network, obj["uuid"])
+    obj = json.loads(resp_obj)
+    obj["port-op-status"] = get_port_status(cluster, network, obj["uuid"])
     return obj
 
 
-def create_port(tenant, network, port_init_state, **params):
-    # Check initial state -- this throws an exception if the port state is
-    # invalid
-    check_port_state(port_init_state)
-
-    controller = params["controller"]
-
-    ls_uuid = network
-
-    admin_status = True
-    if port_init_state == "DOWN":
-        admin_status = False
-    lport_obj = {"admin_status_enabled": admin_status}
-
+def create_port(tenant, **params):
+    print "create_port_nvplib"
+    print params
+    clusters = params["clusters"]
+    dest_cluster = clusters[0]  # primary cluster
+
+    ls_uuid = params["port"]["network_id"]
+    # device_id can be longer than 40 so we rehash it
+    device_id = hashlib.sha1(params["port"]["device_id"]).hexdigest()
+    lport_obj = dict(
+        admin_status_enabled=params["port"]["admin_state_up"],
+        display_name=params["port"]["name"],
+        tags=[dict(scope='os_tid', tag=tenant),
+              dict(scope='q_port_id', tag=params["port"]["id"]),
+              dict(scope='vm_id', tag=device_id)]
+    )
     path = "/ws.v1/lswitch/" + ls_uuid + "/lport"
+
     try:
-        resp_obj = do_single_request("POST",
-                                     path,
-                                     jsonutils.dumps(lport_obj),
-                                     controller=controller)
+        resp_obj = do_single_request("POST", path, json.dumps(lport_obj),
+                                     cluster=dest_cluster)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Network not found, Error: %s" % str(e))
-        raise exception.NetworkNotFound(net_id=network)
+        raise exception.NetworkNotFound(net_id=params["port"]["network_id"])
     except NvpApiClient.NvpApiException as e:
         raise exception.QuantumException()
 
-    result = jsonutils.loads(resp_obj)
-    result['port-op-status'] = get_port_status(controller, ls_uuid,
+    result = json.loads(resp_obj)
+    result['port-op-status'] = get_port_status(dest_cluster, ls_uuid,
                                                result['uuid'])
-    return result
+
+    params["port"].update({"admin_state_up": result["admin_status_enabled"],
+                           "status": result["port-op-status"]})
+    return (params["port"], result['uuid'])
 
 
-def get_port_status(controller, lswitch_id, port_id):
+def get_port_status(cluster, lswitch_id, port_id):
     """Retrieve the operational status of the port"""
-    # Make sure the network exists first
     try:
-        do_single_request("GET", "/ws.v1/lswitch/%s" % (lswitch_id),
-                          controller=controller)
-    except NvpApiClient.ResourceNotFound as e:
-        LOG.error("Network not found, Error: %s" % str(e))
-        raise exception.NetworkNotFound(net_id=lswitch_id)
-    except NvpApiClient.NvpApiException as e:
-        raise exception.QuantumException()
-    try:
-        r = do_single_request(
-            "GET",
-            "/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id),
-            controller=controller)
-        r = jsonutils.loads(r)
+        r = do_single_request("GET",
+                              "/ws.v1/lswitch/%s/lport/%s/status" %
+                              (lswitch_id, port_id), cluster=cluster)
+        r = json.loads(r)
     except NvpApiClient.ResourceNotFound as e:
         LOG.error("Port not found, Error: %s" % str(e))
         raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
@@ -414,3 +444,32 @@ def get_port_status(controller, lswitch_id, port_id):
         return "UP"
     else:
         return "DOWN"
+
+
+def plug_interface(clusters, lswitch_id, port, type, attachment=None):
+    dest_cluster = clusters[0]  # primary cluster
+    uri = "/ws.v1/lswitch/" + lswitch_id + "/lport/" + port + "/attachment"
+
+    lport_obj = {}
+    if attachment:
+        lport_obj["vif_uuid"] = attachment
+
+    lport_obj["type"] = type
+    try:
+        resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
+                                     cluster=dest_cluster)
+    except NvpApiClient.ResourceNotFound as e:
+        LOG.error("Port or Network not found, Error: %s" % str(e))
+        raise exception.PortNotFound(port_id=port, net_id=lswitch_id)
+    except NvpApiClient.Conflict as e:
+        LOG.error("Conflict while making attachment to port, "
+                  "Error: %s" % str(e))
+        raise exception.AlreadyAttached(att_id=attachment,
+                                        port_id=port,
+                                        net_id=lswitch_id,
+                                        att_port_id="UNKNOWN")
+    except NvpApiClient.NvpApiException as e:
+        raise exception.QuantumException()
+
+    result = json.dumps(resp_obj)
+    return result
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/run_tests.py b/quantum/plugins/nicira/nicira_nvp_plugin/run_tests.py
new file mode 100755 (executable)
index 0000000..bf15b09
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+
+"""Unittest runner for Nicira NVP plugin
+
+This file should be run from the top dir in the quantum directory
+
+To run all tests::
+    PLUGIN_DIR=quantum/plugins/nicira ./run_tests.sh
+"""
+
+import os
+import sys
+
+import mock
+from nose import config
+from nose import core
+
+CONFIG_FILE_OPT = "--config-file"
+NICIRA_PATH = "quantum/plugins/nicira/nicira_nvp_plugin"
+
+sys.path.append(os.getcwd())
+sys.path.append(os.path.dirname(__file__))
+sys.path.append(os.path.abspath(NICIRA_PATH))
+
+from quantum.common.test_lib import run_tests, test_config
+from quantum.openstack.common import cfg
+import quantum.tests.unit
+from quantum import version
+
+from tests import fake_nvpapiclient
+
+if __name__ == '__main__':
+    exit_status = False
+    do_mock = False
+    # remove the value
+    test_config['config_files'] = []
+
+    # if a single test case was specified,
+    # we should only invoked the tests once
+    invoke_once = len(sys.argv) > 1
+    # this will allow us to pass --config-file to run_tests.sh for
+    # running the unit tests against a real backend
+    # if --config-file has been specified, remove it from sys.argv
+    # otherwise nose will complain
+    while CONFIG_FILE_OPT in sys.argv:
+        test_config['config_files'].append(
+            sys.argv.pop(sys.argv.index(CONFIG_FILE_OPT) + 1))
+        # and the option itself
+        sys.argv.remove(CONFIG_FILE_OPT)
+
+    # if no config file available, inject one for fake backend tests
+    if not test_config.get('config_files'):
+        do_mock = True
+        test_config['config_files'] = [os.path.abspath('%s/tests/nvp.ini.test'
+                                                       % NICIRA_PATH)]
+
+    test_config['plugin_name_v2'] = "QuantumPlugin.NvpPluginV2"
+    cwd = os.getcwd()
+    c = config.Config(stream=sys.stdout,
+                      env=os.environ,
+                      verbosity=3,
+                      includeExe=True,
+                      traverseNamespace=True,
+                      plugins=core.DefaultPluginManager())
+    c.configureWhere(quantum.tests.unit.__path__)
+
+    # patch nvpapi client if not running against "real" back end
+    if do_mock:
+        fc = fake_nvpapiclient.FakeClient(os.path.abspath('%s/tests'
+                                                          % NICIRA_PATH))
+        mock_nvpapi = mock.patch('NvpApiClient.NVPApiHelper', autospec=True)
+        instance = mock_nvpapi.start()
+        instance.return_value.login.return_value = "the_cookie"
+
+        def _fake_request(*args, **kwargs):
+            return fc.fake_request(*args, **kwargs)
+
+        instance.return_value.request.side_effect = _fake_request
+
+    exit_status = run_tests(c)
+    if invoke_once:
+        sys.exit(0)
+
+    os.chdir(cwd)
+
+    working_dir = os.path.abspath(NICIRA_PATH)
+    c = config.Config(stream=sys.stdout,
+                      env=os.environ,
+                      verbosity=3,
+                      workingDir=working_dir)
+    exit_status = exit_status or run_tests(c)
+
+    # restore original nvpapi client (probably pleonastic here)
+    if do_mock:
+        mock_nvpapi.stop()
+    sys.exit(exit_status)
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..a82a6019e992775d860f0dae44db7d8676cebcd3 100644 (file)
@@ -0,0 +1,17 @@
+# Copyright 2012 Nicira Networks, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#@author: Brad Hall, Nicira Networks, Inc.
+#@author: Dave Lapsley, Nicira Networks, Inc.
+#@author: Aaron Rosen, Nicira Networks, Inc.
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport.json b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport.json
new file mode 100644 (file)
index 0000000..f9c29c3
--- /dev/null
@@ -0,0 +1,18 @@
+{"display_name": "%(uuid)s",
+   "_relations":
+   {"LogicalPortStatus":
+      {"type": "LogicalSwitchPortStatus",
+       "fabric_status_up": false,
+       "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status",
+       "_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}
+   },
+ "tags":
+   [{"scope": "q_port_id", "tag": "%(quantum_port_id)s"},
+    {"scope": "vm_id", "tag": "%(quantum_device_id)s"},
+    {"scope": "os_tid", "tag": "%(tenant_id)s"}],
+ "uuid": "%(uuid)s",
+ "admin_status_enabled": true,
+ "type": "LogicalSwitchPortConfig",
+ "_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
+ "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s"
+ }
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport_status.json b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lport_status.json
new file mode 100644 (file)
index 0000000..4836d36
--- /dev/null
@@ -0,0 +1,22 @@
+{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
+ "lswitch":
+    {"display_name": "%(ls_name)s",
+     "uuid": "%(ls_uuid)s",
+     "tags": [
+        {"scope": "os_tid",
+         "tag": "%(ls_tenant_id)s"}
+     ],
+     "type": "LogicalSwitchConfig",
+     "_schema": "/ws.v1/schema/LogicalSwitchConfig",
+     "port_isolation_enabled": false,
+     "transport_zones": [
+        {"zone_uuid": "%(ls_zone_uuid)s",
+         "transport_type": "stt"}
+     ],
+     "_href": "/ws.v1/lswitch/%(ls_uuid)s"},
+ "link_status_up": false,
+ "_schema": "/ws.v1/schema/LogicalSwitchPortStatus",
+ "admin_status_up": true,
+ "fabric_status_up": false,
+ "type": "LogicalSwitchPortStatus"
+}
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lswitch.json b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_get_lswitch.json
new file mode 100644 (file)
index 0000000..e53a15e
--- /dev/null
@@ -0,0 +1,10 @@
+{"display_name": "%(display_name)s",
+ "_href": "/ws.v1/lswitch/%(uuid)s",
+ "_schema": "/ws.v1/schema/LogicalSwitchConfig",
+ "_relations": {"LogicalSwitchStatus":
+     {"fabric_status": true,
+      "type": "LogicalSwitchStatus",
+      "_href": "/ws.v1/lswitch/%(uuid)s/status",
+      "_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
+ "type": "LogicalSwitchConfig",
+ "uuid": "%(uuid)s"}
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_nvpapiclient.py b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_nvpapiclient.py
new file mode 100644 (file)
index 0000000..9b79254
--- /dev/null
@@ -0,0 +1,234 @@
+# Copyright 2012 Nicira Networks, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+import json
+import logging
+import uuid
+import urlparse
+
+LOG = logging.getLogger("fake_nvpapiclient")
+LOG.setLevel(logging.DEBUG)
+
+
+class FakeClient:
+
+    FAKE_GET_RESPONSES = {
+        "lswitch": "fake_get_lswitch.json",
+        "lport": "fake_get_lport.json",
+        "lportstatus": "fake_get_lport_status.json"
+    }
+
+    FAKE_POST_RESPONSES = {
+        "lswitch": "fake_post_lswitch.json",
+        "lport": "fake_post_lport.json"
+    }
+
+    FAKE_PUT_RESPONSES = {
+        "lswitch": "fake_post_lswitch.json",
+        "lport": "fake_post_lport.json"
+    }
+
+    _fake_lswitch_dict = {}
+    _fake_lport_dict = {}
+    _fake_lportstatus_dict = {}
+
+    def __init__(self, fake_files_path):
+        self.fake_files_path = fake_files_path
+
+    def _get_tag(self, resource, scope):
+        tags = [tag['tag'] for tag in resource['tags']
+                if tag['scope'] == scope]
+        return len(tags) > 0 and tags[0]
+
+    def _get_filters(self, querystring):
+        if not querystring:
+            return (None, None)
+        params = urlparse.parse_qs(querystring)
+        tag_filter = None
+        attr_filter = None
+        if 'tag' in params and 'tag_scope' in params:
+            tag_filter = {'scope': params['tag_scope'][0],
+                          'tag': params['tag'][0]}
+        elif 'uuid' in params:
+            attr_filter = {'uuid': params['uuid'][0]}
+        return (tag_filter, attr_filter)
+
+    def _add_lswitch(self, body):
+        fake_lswitch = json.loads(body)
+        fake_lswitch['uuid'] = str(uuid.uuid4())
+        self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
+        # put the tenant_id and the zone_uuid in the main dict
+        # for simplyfying templating
+        zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
+        fake_lswitch['zone_uuid'] = zone_uuid
+        fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
+        return fake_lswitch
+
+    def _add_lport(self, body, ls_uuid):
+        fake_lport = json.loads(body)
+        fake_lport['uuid'] = str(uuid.uuid4())
+        # put the tenant_id and the ls_uuid in the main dict
+        # for simplyfying templating
+        fake_lport['ls_uuid'] = ls_uuid
+        fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
+        fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
+                                                      'q_port_id')
+        fake_lport['quantum_device_id'] = self._get_tag(fake_lport, 'vm_id')
+        self._fake_lport_dict[fake_lport['uuid']] = fake_lport
+
+        fake_lswitch = self._fake_lswitch_dict[ls_uuid]
+        fake_lport_status = fake_lport.copy()
+        fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
+        fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
+        fake_lport_status['ls_name'] = fake_lswitch['display_name']
+        fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
+        self._fake_lportstatus_dict[fake_lport['uuid']] = fake_lport_status
+        return fake_lport
+
+    def _get_resource_type(self, path):
+        uri_split = path.split('/')
+        resource_type = ('status' in uri_split and
+                         'lport' in uri_split and 'lportstatus'
+                         or 'lport' in uri_split and 'lport'
+                         or 'lswitch' in uri_split and 'lswitch')
+        switch_uuid = ('lswitch' in uri_split and
+                       len(uri_split) > 3 and uri_split[3])
+        port_uuid = ('lport' in uri_split and
+                     len(uri_split) > 5 and uri_split[5])
+        return (resource_type, switch_uuid, port_uuid)
+
+    def _list(self, resource_type, response_file,
+              switch_uuid=None, query=None):
+        (tag_filter, attr_filter) = self._get_filters(query)
+
+        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
+            response_template = f.read()
+            res_dict = getattr(self, '_fake_%s_dict' % resource_type)
+            if switch_uuid == "*":
+                switch_uuid = None
+
+            def _attr_match(res_uuid):
+                if not attr_filter:
+                    return True
+                item = res_dict[res_uuid]
+                for (attr, value) in attr_filter.iteritems():
+                    if item.get(attr) != value:
+                        return False
+                return True
+
+            def _tag_match(res_uuid):
+                if not tag_filter:
+                    return True
+                return any([x['scope'] == tag_filter['scope'] and
+                            x['tag'] == tag_filter['tag']
+                            for x in res_dict[res_uuid]['tags']])
+
+            def _lswitch_match(res_uuid):
+                if (not switch_uuid or
+                        res_dict[res_uuid].get('ls_uuid') == switch_uuid):
+                    return True
+                return False
+
+            items = [json.loads(response_template % res_dict[res_uuid])
+                     for res_uuid in res_dict
+                     if (_lswitch_match(res_uuid) and
+                         _tag_match(res_uuid) and
+                         _attr_match(res_uuid))]
+
+            return json.dumps({'results': items,
+                               'result_count': len(items)})
+
+    def _show(self, resource_type, response_file,
+              switch_uuid, port_uuid=None):
+        target_uuid = port_uuid or switch_uuid
+        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
+            response_template = f.read()
+            res_dict = getattr(self, '_fake_%s_dict' % resource_type)
+            items = [json.loads(response_template % res_dict[res_uuid])
+                     for res_uuid in res_dict if res_uuid == target_uuid]
+            if items:
+                return json.dumps(items[0])
+            raise Exception("show: resource %s:%s not found" %
+                            (resource_type, target_uuid))
+
+    def handle_get(self, url):
+        #TODO(salvatore-orlando): handle field selection
+        parsedurl = urlparse.urlparse(url)
+        (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
+        response_file = self.FAKE_GET_RESPONSES.get(res_type)
+        if not response_file:
+            raise Exception("resource not found")
+        if res_type == 'lport':
+            if p_uuid:
+                return self._show(res_type, response_file, s_uuid, p_uuid)
+            else:
+                return self._list(res_type, response_file, s_uuid,
+                                  query=parsedurl.query)
+        elif res_type == 'lportstatus':
+            return self._show(res_type, response_file, s_uuid, p_uuid)
+        elif res_type == 'lswitch':
+            if s_uuid:
+                return self._show(res_type, response_file, s_uuid)
+            else:
+                return self._list(res_type, response_file,
+                                  query=parsedurl.query)
+        else:
+            raise Exception("unknown resource:%s" % res_type)
+
+    def handle_post(self, url, body):
+        parsedurl = urlparse.urlparse(url)
+        (res_type, s_uuid, _p) = self._get_resource_type(parsedurl.path)
+        response_file = self.FAKE_POST_RESPONSES.get(res_type)
+        if not response_file:
+            raise Exception("resource not found")
+        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
+            response_template = f.read()
+            add_resource = getattr(self, '_add_%s' % res_type)
+            args = [body]
+            if s_uuid:
+                args.append(s_uuid)
+            response = response_template % add_resource(*args)
+            return response
+
+    def handle_put(self, url, body):
+        parsedurl = urlparse.urlparse(url)
+        (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
+        target_uuid = p_uuid or s_uuid
+        response_file = self.FAKE_PUT_RESPONSES.get(res_type)
+        if not response_file:
+            raise Exception("resource not found")
+        with open("%s/%s" % (self.fake_files_path, response_file)) as f:
+            response_template = f.read()
+            res_dict = getattr(self, '_fake_%s_dict' % res_type)
+            resource = res_dict[target_uuid]
+            resource.update(json.loads(body))
+            response = response_template % resource
+            return response
+
+    def handle_delete(self, url):
+        parsedurl = urlparse.urlparse(url)
+        (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
+        target_uuid = p_uuid or s_uuid
+        response_file = self.FAKE_PUT_RESPONSES.get(res_type)
+        if not response_file:
+            raise Exception("resource not found")
+        res_dict = getattr(self, '_fake_%s_dict' % res_type)
+        del res_dict[target_uuid]
+        return ""
+
+    def fake_request(self, *args, **kwargs):
+        method = args[0]
+        handler = getattr(self, "handle_%s" % method.lower())
+        return handler(*args[1:])
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lport.json b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lport.json
new file mode 100644 (file)
index 0000000..458adfb
--- /dev/null
@@ -0,0 +1,17 @@
+{
+ "display_name": "%(uuid)s",
+ "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
+ "security_profiles": [],
+ "tags":
+    [{"scope": "q_port_id", "tag": "%(quantum_port_id)s"},
+     {"scope": "vm_id", "tag": "%(quantum_device_id)s"},
+     {"scope": "os_tid", "tag": "%(tenant_id)s"}],
+ "portno": 1,
+ "queue_uuid": null,
+ "_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
+ "mirror_targets": [],
+ "allowed_address_pairs": [],
+ "admin_status_enabled": true,
+ "type": "LogicalSwitchPortConfig",
+ "uuid": "%(uuid)s"
+}
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lswitch.json b/quantum/plugins/nicira/nicira_nvp_plugin/tests/fake_post_lswitch.json
new file mode 100644 (file)
index 0000000..7d8f9e3
--- /dev/null
@@ -0,0 +1,12 @@
+{
+   "display_name": "%(display_name)s",
+   "uuid": "%(uuid)s",
+   "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
+   "type": "LogicalSwitchConfig",
+   "_schema": "/ws.v1/schema/LogicalSwitchConfig",
+   "port_isolation_enabled": false,
+   "transport_zones": [
+      {"zone_uuid": "%(zone_uuid)s",
+      "transport_type": "stt"}],
+   "_href": "/ws.v1/lswitch/%(uuid)s"
+}
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/nvp.ini.test b/quantum/plugins/nicira/nicira_nvp_plugin/tests/nvp.ini.test
new file mode 100644 (file)
index 0000000..8167078
--- /dev/null
@@ -0,0 +1,10 @@
+[DEFAULT]
+
+[DATABASE]
+sql_connection = sqlite://
+
+[CLUSTER:fake]
+default_tz_uuid = fake_tz_uuid
+nova_zone_id = whatever
+nvp_cluster_uuid = fake_cluster_uuid
+nvp_controller_connection=fake:443:admin:admin:30:10:2:2
\ No newline at end of file
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_config.py b/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_config.py
deleted file mode 100644 (file)
index a811aae..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2012 Nicira Networks, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import ConfigParser
-import StringIO
-import unittest
-
-from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import (
-    NVPCluster,
-    parse_config,
-)
-
-
-class ConfigParserTest(unittest.TestCase):
-    def setUp(self):
-        pass
-
-    def tearDown(self):
-        pass
-
-    def test_nvp_config_000(self):
-        nvpc = NVPCluster('cluster1')
-        for f in [
-            (
-                'default_tz_id1', 'ip1', 'port1', 'user1', 'passwd1', 42, 43,
-                44, 45),
-            (
-                'default_tz_id1', 'ip2', 'port2', 'user2', 'passwd2', 42, 43,
-                44, 45),
-            (
-                'default_tz_id1', 'ip3', 'port3', 'user3', 'passwd3', 42, 43,
-                44, 45),
-        ]:
-            nvpc.add_controller(*f)
-
-        self.assertTrue(nvpc.name == 'cluster1')
-        self.assertTrue(len(nvpc.controllers) == 3)
-
-    def test_old_config_parser_old_style(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_IP = <controller ip>
-PORT = <port>
-USER = <user>
-PASSWORD = <pass>
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-
-        self.assertTrue(cluster1.name == 'cluster1')
-        self.assertTrue(
-            cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
-        self.assertTrue(
-            cluster1.controllers[0]['port'] == '<port>')
-        self.assertTrue(
-            cluster1.controllers[0]['user'] == '<user>')
-        self.assertTrue(
-            cluster1.controllers[0]['password'] == '<pass>')
-        self.assertTrue(
-            cluster1.controllers[0]['request_timeout'] == 30)
-        self.assertTrue(
-            cluster1.controllers[0]['http_timeout'] == 10)
-        self.assertTrue(
-            cluster1.controllers[0]['retries'] == 2)
-        self.assertTrue(
-            cluster1.controllers[0]['redirects'] == 2)
-
-    def test_old_config_parser_new_style(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_CONNECTIONS = CONNECTION1
-CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-
-        self.assertTrue(cluster1.name == 'cluster1')
-        self.assertTrue(
-            cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
-        self.assertTrue(
-            cluster1.controllers[0]['port'] == '4242')
-        self.assertTrue(
-            cluster1.controllers[0]['user'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['password'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['request_timeout'] == 42)
-        self.assertTrue(
-            cluster1.controllers[0]['http_timeout'] == 43)
-        self.assertTrue(
-            cluster1.controllers[0]['retries'] == 44)
-        self.assertTrue(
-            cluster1.controllers[0]['redirects'] == 45)
-
-    def test_old_config_parser_both_styles(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-NVP_CONTROLLER_IP = <controller ip>
-PORT = <port>
-USER = <user>
-PASSWORD = <pass>
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_CONNECTIONS = CONNECTION1
-CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-
-        self.assertTrue(cluster1.name == 'cluster1')
-        self.assertTrue(
-            cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
-        self.assertTrue(
-            cluster1.controllers[0]['port'] == '4242')
-        self.assertTrue(
-            cluster1.controllers[0]['user'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['password'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['request_timeout'] == 42)
-        self.assertTrue(
-            cluster1.controllers[0]['http_timeout'] == 43)
-        self.assertTrue(
-            cluster1.controllers[0]['retries'] == 44)
-        self.assertTrue(
-            cluster1.controllers[0]['redirects'] == 45)
-
-    def test_old_config_parser_both_styles(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-NVP_CONTROLLER_IP = <controller ip>
-PORT = <port>
-USER = <user>
-PASSWORD = <pass>
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_CONNECTIONS = CONNECTION1
-CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-
-        self.assertTrue(cluster1.name == 'cluster1')
-        self.assertTrue(
-            cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
-        self.assertTrue(
-            cluster1.controllers[0]['port'] == '4242')
-        self.assertTrue(
-            cluster1.controllers[0]['user'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['password'] == 'admin')
-        self.assertTrue(
-            cluster1.controllers[0]['request_timeout'] == 42)
-        self.assertTrue(
-            cluster1.controllers[0]['http_timeout'] == 43)
-        self.assertTrue(
-            cluster1.controllers[0]['retries'] == 44)
-        self.assertTrue(
-            cluster1.controllers[0]['redirects'] == 45)
-
-    def test_failover_time(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_IP = <controller ip>
-PORT = 443
-USER = admin
-PASSWORD = admin
-FAILOVER_TIME = 10
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-        self.assertTrue(plugin_config['failover_time'] == '10')
-
-    def test_failover_time_new_style(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_CONNECTIONS = CONNECTION1
-CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
-FAILOVER_TIME = 10
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-        self.assertTrue(plugin_config['failover_time'] == '10')
-
-    def test_concurrent_connections_time(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_IP = <controller ip>
-PORT = 443
-USER = admin
-PASSWORD = admin
-CONCURRENT_CONNECTIONS = 5
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-        self.assertTrue(plugin_config['concurrent_connections'] == '5')
-
-    def test_concurrent_connections_time_new_style(self):
-        config = StringIO.StringIO("""
-[DEFAULT]
-[NVP]
-DEFAULT_TZ_UUID = <default uuid>
-NVP_CONTROLLER_CONNECTIONS = CONNECTION1
-CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
-CONCURRENT_CONNECTIONS = 5
-""")
-        cp = ConfigParser.ConfigParser()
-        cp.readfp(config)
-        cluster1, plugin_config = parse_config(cp)
-        self.assertTrue(plugin_config['concurrent_connections'] == '5')
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_network.py b/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_network.py
deleted file mode 100644 (file)
index f29e105..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright 2012 Nicira Networks, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Somik Behera, Nicira Networks, Inc.
-# @author: Brad Hall, Nicira Networks, Inc.
-
-import logging
-import os
-import unittest
-
-from quantum.common import exceptions as exception
-from quantum.openstack.common import jsonutils
-from quantum.plugins.nicira.nicira_nvp_plugin import (
-    NvpApiClient,
-    nvplib,
-)
-from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
-
-
-logging.basicConfig(level=logging.DEBUG)
-LOG = logging.getLogger("test_network")
-
-
-class NvpTests(unittest.TestCase):
-    def setUp(self):
-        self.quantum = NvpPlugin()
-        self.BRIDGE_TZ_UUID = self._create_tz("bridge")
-        self.DEFAULT_TZ_UUID = self._create_tz("default")
-
-        self.nets = []
-        self.ports = []
-
-    def tearDown(self):
-        self._delete_tz(self.BRIDGE_TZ_UUID)
-        self._delete_tz(self.DEFAULT_TZ_UUID)
-
-        for tenant, net, port in self.ports:
-            self.quantum.delete_port(tenant, net, port)
-        for tenant, net in self.nets:
-            self.quantum.delete_network(tenant, net)
-
-    def _create_tz(self, name):
-        post_uri = "/ws.v1/transport-zone"
-        body = {"display_name": name,
-                "tags": [{"tag": "plugin-test"}]}
-        try:
-            resp_obj = self.quantum.api_client.request("POST", post_uri,
-                                                       jsonutils.dumps(body))
-        except NvpApiClient.NvpApiException as e:
-            print("Unknown API Error: %s" % str(e))
-            raise exception.QuantumException()
-        return jsonutils.loads(resp_obj)["uuid"]
-
-    def _delete_tz(self, uuid):
-        post_uri = "/ws.v1/transport-zone/%s" % uuid
-        try:
-            resp_obj = self.quantum.api_client.request("DELETE", post_uri)
-        except NvpApiClient.NvpApiException as e:
-            LOG.error("Unknown API Error: %s" % str(e))
-            raise exception.QuantumException()
-
-    def test_create_multi_networks(self):
-
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        resp2 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantC")
-        resp3 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantD")
-        net_id = resp["net-id"]
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id1 = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id1)
-        old_vic = resp["attachment"]
-        self.assertTrue(old_vic == "None")
-
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id1,
-                                    "nova-instance-test-%s" % os.getpid())
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id1)
-        new_vic = resp["attachment"]
-        self.assertTrue(old_vic != new_vic)
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id2 = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id2)
-        old_vic2 = resp["attachment"]
-        self.assertTrue(old_vic2 == "None")
-
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id2,
-                                    "nova-instance-test2-%s" % os.getpid())
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id2)
-        new_vic = resp["attachment"]
-        self.assertTrue(old_vic2 != new_vic)
-
-        resp = self.quantum.get_all_ports("quantum-test-tenant", net_id)
-
-        resp = self.quantum.get_network_details("quantum-test-tenant", net_id)
-
-        resp = self.quantum.get_all_networks("quantum-test-tenant")
-
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                        port_id1)
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                        port_id2)
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-        self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-        self.quantum.delete_network("quantum-test-tenant", resp2["net-id"])
-        self.quantum.delete_network("quantum-test-tenant", resp3["net-id"])
-
-    def test_update_network(self):
-        resp = self.quantum.create_network("quantum-test-tenant",
-                                           "quantum-Private-TenantA")
-        net_id = resp["net-id"]
-        try:
-            resp = self.quantum.update_network("quantum-test-tenant", net_id,
-                                               name="new-name")
-        except exception.NetworkNotFound:
-            self.assertTrue(False)
-
-        self.assertTrue(resp["net-name"] == "new-name")
-
-    def test_negative_delete_networks(self):
-        try:
-            self.quantum.delete_network("quantum-test-tenant", "xxx-no-net-id")
-        except exception.NetworkNotFound:
-            self.assertTrue(True)
-
-    def test_negative_get_network_details(self):
-        try:
-            self.quantum.get_network_details("quantum-test-tenant",
-                                             "xxx-no-net-id")
-        except exception.NetworkNotFound:
-            self.assertTrue(True)
-
-    def test_negative_update_network(self):
-        try:
-            self.quantum.update_network("quantum-test-tenant", "xxx-no-net-id",
-                                        name="new-name")
-        except exception.NetworkNotFound:
-            self.assertTrue(True)
-
-    def test_get_all_networks(self):
-        networks = self.quantum.get_all_networks("quantum-test-tenant")
-        num_nets = len(networks)
-
-        # Make sure we only get back networks with the specified tenant_id
-        unique_tid = "tenant-%s" % os.getpid()
-        # Add a network that we shouldn't get back
-        resp = self.quantum.create_custom_network(
-            "another_tid", "another_tid_network",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-        self.nets.append(("another_tid", net_id))
-        # Add 3 networks that we should get back
-        for i in [1, 2, 3]:
-            resp = self.quantum.create_custom_network(
-                unique_tid, "net-%s" % str(i),
-                self.BRIDGE_TZ_UUID, self.quantum.controller)
-            net_id = resp["net-id"]
-            self.nets.append((unique_tid, net_id))
-        networks = self.quantum.get_all_networks(unique_tid)
-        self.assertTrue(len(networks) == 3)
-
-    def test_delete_nonexistent_network(self):
-        try:
-            nvplib.delete_network(self.quantum.controller,
-                                  "my-non-existent-network")
-        except exception.NetworkNotFound:
-            return
-        # shouldn't be reached
-        self.assertTrue(False)
-
-    def test_query_networks(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-        self.nets.append(("quantum-test-tenant", net_id))
-        nets = nvplib.query_networks(self.quantum.controller,
-                                     "quantum-test-tenant")
index c358c3fc3179f86466775baf2f906b071c4a8a62..ce332dbfd8f1ae9184a2157e85bf733843e3edea 100644 (file)
@@ -1,20 +1,16 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright (C) 2009-2011 Nicira Networks, Inc. All Rights Reserved.
 #
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
+# This software is provided only under the terms and conditions of a written
+# license agreement with Nicira. If no such agreement applies to you, you are
+# not authorized to use this software. Contact Nicira to obtain an appropriate
+# license: www.nicira.com.
 
+# System
 import httplib
-import unittest2 as unittest
+import unittest
 
+# Third party
+# Local
 import quantum.plugins.nicira.nicira_nvp_plugin.api_client.common as naco
 
 
@@ -35,5 +31,5 @@ class NvpApiCommonTest(unittest.TestCase):
         self.assertTrue(
             naco._conn_str(conn) == 'http://localhost:4242')
 
-        with self.assertRaises(TypeError):
-            naco._conn_str('not an httplib.HTTPSConnection')
+        self.assertRaises(TypeError, naco._conn_str,
+                          ('not an httplib.HTTPSConnection'))
index 3fe7ee2fbe0fc2c3c3d53d74b93d3c32c6e449f0..c0d3ab1391547c3befce2f346e48fc6277940f40 100644 (file)
@@ -1,27 +1,18 @@
-# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
+# Copyright (C) 2009-2011 Nicira Networks, Inc. All Rights Reserved.
 #
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
+# This software is provided only under the terms and conditions of a written
+# license agreement with Nicira. If no such agreement applies to you, you are
+# not authorized to use this software. Contact Nicira to obtain an appropriate
+# license: www.nicira.com.
 
+import eventlet
+eventlet.monkey_patch()
 import logging
 import unittest
-
-from eventlet.green import urllib2
-
+import urllib2
 
 logging.basicConfig(level=logging.DEBUG)
-LOG = logging.getLogger("test_nvp_api_request")
-
+lg = logging.getLogger("test_nvp_api_request")
 
 REQUEST_TIMEOUT = 1
 
index d9d4ce7218fa1b7c08ce6243250845c34e98fdc0..f06aabe35dfa4dd0b12d8e2bade867a906008cbc 100644 (file)
@@ -211,8 +211,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
         self.assertTrue(retval is None)
 
     def test_redirect_params_setup_https_with_cooki(self):
-        with patch('nicira_nvp_plugin.api_client.client_eventlet'
-                   '.NvpApiClientEventlet') as mock:
+        with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
+                   'client_eventlet.NvpApiClientEventlet') as mock:
             api_client = mock.return_value
             api_client.wait_for_login.return_value = None
             api_client.auth_cookie = 'mycookie'
@@ -226,8 +226,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
             self.assertTrue(api_client.acquire_connection.called)
 
     def test_redirect_params_setup_htttps_and_query(self):
-        with patch('nicira_nvp_plugin.api_client.client_eventlet'
-                   '.NvpApiClientEventlet') as mock:
+        with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
+                   'client_eventlet.NvpApiClientEventlet') as mock:
             api_client = mock.return_value
             api_client.wait_for_login.return_value = None
             api_client.auth_cookie = 'mycookie'
@@ -241,8 +241,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
             self.assertTrue(api_client.acquire_connection.called)
 
     def test_redirect_params_setup_https_connection_no_cookie(self):
-        with patch('nicira_nvp_plugin.api_client.client_eventlet'
-                   '.NvpApiClientEventlet') as mock:
+        with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
+                   'client_eventlet.NvpApiClientEventlet') as mock:
             api_client = mock.return_value
             api_client.wait_for_login.return_value = None
             api_client.auth_cookie = None
@@ -256,8 +256,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
             self.assertTrue(api_client.acquire_connection.called)
 
     def test_redirect_params_setup_https_and_query_no_cookie(self):
-        with patch('nicira_nvp_plugin.api_client.client_eventlet'
-                   '.NvpApiClientEventlet') as mock:
+        with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
+                   'client_eventlet.NvpApiClientEventlet') as mock:
             api_client = mock.return_value
             api_client.wait_for_login.return_value = None
             api_client.auth_cookie = None
@@ -270,8 +270,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
             self.assertTrue(api_client.acquire_connection.called)
 
     def test_redirect_params_path_only_with_query(self):
-        with patch('nicira_nvp_plugin.api_client.client_eventlet'
-                   '.NvpApiClientEventlet') as mock:
+        with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
+                   'client_eventlet.NvpApiClientEventlet') as mock:
             api_client = mock.return_value
             api_client.wait_for_login.return_value = None
             api_client.auth_cookie = None
diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_port.py b/quantum/plugins/nicira/nicira_nvp_plugin/tests/test_port.py
deleted file mode 100644 (file)
index 722dd13..0000000
+++ /dev/null
@@ -1,521 +0,0 @@
-# Copyright 2012 Nicira Networks, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Somik Behera, Nicira Networks, Inc.
-
-import logging
-import os
-import unittest
-
-from quantum.common import exceptions as exception
-from quantum.openstack.common import jsonutils
-from quantum.plugins.nicira.nicira_nvp_plugin import (
-    NvpApiClient,
-    nvplib,
-)
-from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
-
-
-logging.basicConfig(level=logging.DEBUG)
-LOG = logging.getLogger("test_port")
-
-
-class NvpTests(unittest.TestCase):
-    def setUp(self):
-        self.quantum = NvpPlugin()
-        self.BRIDGE_TZ_UUID = self._create_tz("bridge")
-        self.networks = []
-        self.ports = []
-        self.transport_nodes = []
-        self.cis_uuids = []
-
-    def tearDown(self):
-        self._delete_tz(self.BRIDGE_TZ_UUID)
-
-        for (net_id, p) in self.ports:
-            self.quantum.unplug_interface("quantum-test-tenant", net_id, p)
-            self.quantum.delete_port("quantum-test-tenant", net_id, p)
-        for n in self.networks:
-            self.quantum.delete_network("quantum-test-tenant", n)
-        for t in self.transport_nodes:
-            nvplib.do_single_request("DELETE", "/ws.v1/transport-node/%s" % t,
-                                     controller=self.quantum.controller)
-        for c in self.cis_uuids:
-            nvplib.do_single_request(
-                "DELETE",
-                "/ws.v1/cluster-interconnect-service/%s" % c,
-                controller=self.quantum.controller)
-
-    def _create_tz(self, name):
-        post_uri = "/ws.v1/transport-zone"
-        body = {"display_name": name, "tags": [{"tag": "plugin-test"}]}
-        try:
-            resp_obj = self.quantum.api_client.request("POST",
-                                                       post_uri,
-                                                       jsonutils.dumps(body))
-        except NvpApiClient.NvpApiException as e:
-            LOG.error("Unknown API Error: %s" % str(e))
-            raise exception.QuantumException()
-        return jsonutils.loads(resp_obj)["uuid"]
-
-    def _delete_tz(self, uuid):
-        post_uri = "/ws.v1/transport-zone/%s" % uuid
-        try:
-            resp_obj = self.quantum.api_client.request("DELETE", post_uri)
-        except NvpApiClient.NvpApiException as e:
-            LOG.error("Unknown API Error: %s" % str(e))
-            raise exception.QuantumException()
-
-    def test_create_and_delete_lots_of_ports(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-
-        nports = 250
-
-        ids = []
-        for i in xrange(0, nports):
-            resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                            "ACTIVE")
-            port_id = resp["port-id"]
-            ids.append(port_id)
-
-        # Test that we get the correct number of ports back
-        ports = self.quantum.get_all_ports("quantum-test-tenant", net_id)
-        self.assertTrue(len(ports) == nports)
-
-        # Verify that each lswitch has matching tags
-        net = nvplib.get_network(self.quantum.controller, net_id)
-        tags = []
-        net_tags = [t["tag"] for t in net["tags"]]
-        if len(tags) == 0:
-            tags = net_tags
-        else:
-            for t in net_tags:
-                self.assertTrue(t in tags)
-
-        for port_id in ids:
-            resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                            port_id)
-            try:
-                self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                              port_id)
-            except exception.PortNotFound:
-                continue
-            # Shouldn't be reached
-            self.assertFalse(True)
-
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-
-    def test_create_and_delete_port(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-
-    def test_create_and_delete_port_with_portsec(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-
-        params = {}
-        params["NICIRA:allowed_address_pairs"] = [
-            {
-                "ip_address": "172.168.17.5",
-                "mac_address": "10:9a:dd:61:4e:89",
-            },
-            {
-                "ip_address": "172.168.17.6",
-                "mac_address": "10:9a:dd:61:4e:88",
-            },
-        ]
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE", **params)
-        port_id = resp["port-id"]
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-        self.assertTrue(True)
-
-    def test_create_update_and_delete_port(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id)
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                        port_id)
-        self.quantum.delete_network("quantum-test-tenant",
-                                    net_id)
-        self.assertTrue(True)
-
-    def test_create_plug_unplug_iface(self):
-        resp = self.quantum.create_custom_network(
-            "quantum-test-tenant", "quantum-Private-TenantA",
-            self.BRIDGE_TZ_UUID, self.quantum.controller)
-        net_id = resp["net-id"]
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id)
-        old_vic = resp["attachment"]
-        self.assertTrue(old_vic == "None")
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
-                                    "nova-instance-test-%s" % os.getpid())
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id)
-        new_vic = resp["attachment"]
-
-        self.assertTrue(old_vic != new_vic)
-        self.quantum.unplug_interface("quantum-test-tenant", net_id, port_id)
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id)
-        new_vic = resp["attachment"]
-        self.assertTrue(old_vic == new_vic)
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-        self.assertTrue(True)
-
-    def test_create_multi_port_attachment(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id1 = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id1)
-        old_vic = resp["attachment"]
-        self.assertTrue(old_vic == "None")
-
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id1,
-                                    "nova-instance-test-%s" % os.getpid())
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id1)
-        new_vic = resp["attachment"]
-        self.assertTrue(old_vic != new_vic)
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id2 = resp["port-id"]
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id2)
-        old_vic2 = resp["attachment"]
-        self.assertTrue(old_vic2 == "None")
-
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id2,
-                                    "nova-instance-test2-%s" % os.getpid())
-        resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
-                                             port_id2)
-        new_vic = resp["attachment"]
-        self.assertTrue(old_vic2 != new_vic)
-
-        resp = self.quantum.get_all_ports("quantum-test-tenant", net_id)
-
-        resp = self.quantum.get_network_details("quantum-test-tenant", net_id)
-
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                        port_id1)
-        resp = self.quantum.delete_port("quantum-test-tenant", net_id,
-                                        port_id2)
-        self.quantum.delete_network("quantum-test-tenant", net_id)
-        self.assertTrue(True)
-
-    def test_negative_get_all_ports(self):
-        try:
-            self.quantum.get_all_ports("quantum-test-tenant", "xxx-no-net-id")
-        except exception.NetworkNotFound:
-            self.assertTrue(True)
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_create_port1(self):
-        try:
-            self.quantum.create_port("quantum-test-tenant", "xxx-no-net-id",
-                                     "ACTIVE")
-        except exception.NetworkNotFound:
-            self.assertTrue(True)
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_create_port2(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.create_port("quantum-test-tenant", resp1["net-id"],
-                                     "INVALID")
-        except exception.StateInvalid:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-        self.assertTrue(False)
-
-    def test_negative_update_port1(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
-                                     "port_id_fake", state="ACTIVE")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_update_port2(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
-                                     "port_id_fake", state="INVALID")
-        except exception.StateInvalid:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_update_port3(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
-                                     "port_id_fake", state="ACTIVE")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-        self.assertTrue(False)
-
-    def test_negative_delete_port1(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.delete_port("quantum-test-tenant", resp1["net-id"],
-                                     "port_id_fake")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_delete_port2(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.delete_port("quantum-test-tenant", resp1["net-id"],
-                                     "port_id_fake")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-            return
-
-        self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-        self.assertTrue(False)
-
-    def test_negative_get_port_details(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.get_port_details("quantum-test-tenant",
-                                          resp1["net-id"],
-                                          "port_id_fake")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant",
-                                        resp1["net-id"])
-            return
-
-        self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
-        self.assertTrue(False)
-
-    def test_negative_plug_interface(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.plug_interface("quantum-test-tenant",
-                                        resp1["net-id"],
-                                        "port_id_fake", "iface_id_fake")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant",
-                                        resp1["net-id"])
-            return
-
-        self.assertTrue(False)
-
-    def test_negative_unplug_interface(self):
-        resp1 = self.quantum.create_network("quantum-test-tenant",
-                                            "quantum-Private-TenantB")
-        try:
-            self.quantum.unplug_interface("quantum-test-tenant",
-                                          resp1["net-id"], "port_id_fake")
-        except exception.PortNotFound:
-            self.assertTrue(True)
-            self.quantum.delete_network("quantum-test-tenant",
-                                        resp1["net-id"])
-            return
-
-        self.assertTrue(False)
-
-    def test_get_port_status_invalid_lswitch(self):
-        try:
-            nvplib.get_port_status(self.quantum.controller,
-                                   "invalid-lswitch",
-                                   "invalid-port")
-        except exception.NetworkNotFound:
-            return
-        # Shouldn't be reached
-        self.assertTrue(False)
-
-    def test_get_port_status_invalid_port(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-        self.networks.append(net_id)
-
-        try:
-            nvplib.get_port_status(self.quantum.controller, net_id,
-                                   "invalid-port")
-        except exception.PortNotFound:
-            return
-        # Shouldn't be reached
-        self.assertTrue(False)
-
-    def test_get_port_status_returns_the_right_stuff(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-        self.networks.append(net_id)
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        self.ports.append((net_id, port_id))
-        res = nvplib.get_port_status(self.quantum.controller, net_id, port_id)
-        self.assertTrue(res in ['UP', 'DOWN', 'PROVISIONING'])
-
-    def test_get_port_stats_invalid_lswitch(self):
-        try:
-            nvplib.get_port_stats(self.quantum.controller,
-                                  "invalid-lswitch",
-                                  "invalid-port")
-        except exception.NetworkNotFound:
-            return
-        # Shouldn't be reached
-        self.assertTrue(False)
-
-    def test_get_port_stats_invalid_port(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-        self.networks.append(net_id)
-
-        try:
-            nvplib.get_port_stats(self.quantum.controller, net_id,
-                                  "invalid-port")
-        except exception.PortNotFound:
-            return
-        # Shouldn't be reached
-        self.assertTrue(False)
-
-    def test_get_port_stats_returns_the_right_stuff(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-        self.networks.append(net_id)
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        self.ports.append((net_id, port_id))
-        res = nvplib.get_port_stats(self.quantum.controller, net_id, port_id)
-        self.assertTrue("tx_errors" in res)
-        self.assertTrue("tx_bytes" in res)
-        self.assertTrue("tx_packets" in res)
-        self.assertTrue("rx_errors" in res)
-        self.assertTrue("rx_bytes" in res)
-        self.assertTrue("rx_packets" in res)
-
-    def test_port_filters_by_attachment(self):
-        resp = self.quantum.create_custom_network("quantum-test-tenant",
-                                                  "quantum-Private-TenantA",
-                                                  self.BRIDGE_TZ_UUID,
-                                                  self.quantum.controller)
-        net_id = resp["net-id"]
-        self.networks.append(net_id)
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        port_id1 = port_id
-        self.ports.append((net_id, port_id))
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
-                                    "attachment1")
-
-        resp = self.quantum.create_port("quantum-test-tenant", net_id,
-                                        "ACTIVE")
-        port_id = resp["port-id"]
-        port_id2 = port_id
-        self.ports.append((net_id, port_id))
-        self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
-                                    "attachment2")
-
-        # Make sure we get all the ports that we created back
-        ports = self.quantum.get_all_ports("quantum-test-tenant", net_id)
-        self.assertTrue(len(ports) == 2)
-
-        # Make sure we only get the filtered ones back
-        ports = self.quantum.get_all_ports("quantum-test-tenant", net_id,
-                                           filter_opts={"attachment":
-                                                        "attachment2"})
-        self.assertTrue(len(ports) == 1)
-        self.assertTrue(ports[0]["port-id"] == port_id2)
-
-        # Make sure we don't get any back with an invalid filter
-        ports = self.quantum.get_all_ports(
-            "quantum-test-tenant", net_id,
-            filter_opts={"attachment": "invalidattachment"})
-        self.assertTrue(len(ports) == 0)