]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Simplify NVP plugin configuration
authorarmando-migliaccio <amigliaccio@nicira.com>
Tue, 9 Apr 2013 00:40:21 +0000 (17:40 -0700)
committerarmando-migliaccio <amigliaccio@nicira.com>
Thu, 25 Apr 2013 20:56:32 +0000 (13:56 -0700)
Fixes bug #1121605

This patch replacest the nvp_controller_connection configuration option
with a set of options more intutitive to user which also avoid
repetitions in ini files.

In order to simplify the configuration of the plugin, this patch also
removes named clusters. As the plugin supports a single cluster at
the moment, this can be simply specified in the [DEFAULT] configuration
section.

Also, this patch restrucures nvp_cluster.NvpCluster so that per-cluster
configuration options are not store anymore multiple times.

Change-Id: Id5f84220122d7c5f3239e3333cb772247d1ed05e

15 files changed:
etc/quantum/plugins/nicira/nvp.ini
quantum/plugins/nicira/QuantumPlugin.py
quantum/plugins/nicira/README
quantum/plugins/nicira/check_nvp_config.py
quantum/plugins/nicira/common/config.py
quantum/plugins/nicira/common/exceptions.py
quantum/plugins/nicira/nvp_cluster.py
quantum/tests/unit/nicira/etc/nvp.ini.basic.test [new file with mode: 0644]
quantum/tests/unit/nicira/etc/nvp.ini.full.test [new file with mode: 0644]
quantum/tests/unit/nicira/etc/nvp.ini.grizzly.test [new file with mode: 0644]
quantum/tests/unit/nicira/etc/nvp.ini.test
quantum/tests/unit/nicira/test_defaults.py [deleted file]
quantum/tests/unit/nicira/test_nicira_plugin.py
quantum/tests/unit/nicira/test_nvplib.py
quantum/tests/unit/nicira/test_nvpopts.py [new file with mode: 0644]

index 6a5f542a2fffe3a2ca57e1bdfba8c294168d3577..87760e4a46fa86779df94cb3206bf408e034a036 100644 (file)
@@ -1,3 +1,67 @@
+# #############################################################
+# WARNINGS: The following deprecations have been made in the
+# Havana release. Support for the options below will be removed
+# in Ixxx.
+#
+# Section: [DEFAULT], Option: 'metadata_dhcp_host_route'
+# Remarks: Use 'enable_isolated_metadata' in dhcp_agent.ini.
+#
+#
+# Section: [CLUSTER:name], Option: 'nvp_controller_connection'
+# Remarks: The configuration will allow the specification of
+#          a single cluster, therefore [CLUSTER:name] is no
+#          longer used.  Use 'nvp_*', options, 'req_timeout',
+#          'retries', etc. as indicated in the DEFAULT section.
+#          Support for multiple clusters will be added through
+#          an API extension.
+# ##############################################################
+
+[DEFAULT]
+# User name for NVP controller
+# nvp_user = admin
+
+# Password for NVP controller
+# nvp_password = admin
+
+# Total time limit for a cluster request
+# (including retries across different controllers)
+# req_timeout = 30
+
+# Time before aborting a request on an unresponsive controller
+# http_timeout = 10
+
+# Maximum number of times a particular request should be retried
+# retries = 2
+
+# Maximum number of times a redirect response should be followed
+# redirects = 2
+
+# Comma-separated list of NVP controller endpoints (<ip>:<port>). When port
+# is omitted, 443 is assumed. This option MUST be specified, e.g.:
+# nvp_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
+
+# UUID of the pre-existing default NVP Transport zone to be used for creating
+# tunneled isolated "Quantum" networks. This option MUST be specified, e.g.:
+# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
+
+# (Optional) UUID of the cluster in NVP.  It can be retrieved from NVP management
+# console "admin" section.
+# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7
+
+# (Optional) UUID for the default l3 gateway service to use with this cluster.
+# To be specified if planning to use logical routers with external gateways.
+# default_l3_gw_service_uuid =
+
+# (Optional) UUID for the default l2 gateway service to use with this cluster.
+# To be specified for providing a predefined gateway tenant for connecting their networks.
+# default_l2_gw_service_uuid =
+
+# Name of the default interface name to be used on network-gateway.  This value
+# will be used for any device associated with a network gateway for which an
+# interface name was not specified
+# default_iface_name = breth0
+
+
 [DATABASE]
 # This line MUST be changed to actually run the plugin.
 # Example:
 # Replace 127.0.0.1 above with the IP address of the database used by the
 # main quantum server. (Leave it as is if the database runs on this host.)
 sql_connection = sqlite://
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
+
+# Number of reconnection attempts to the DB; Set to -1 to try indefinitely
 # sql_max_retries = 10
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-reconnect_interval = 2
+
+# Period between reconnection attempts to the DB
+# reconnect_interval = 2
+
 # Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
 # sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
+
 # sql_dbpool_enable = False
+
 # Minimum number of SQL connections to keep open in a pool
 # sql_min_pool_size = 1
+
 # Maximum number of SQL connections to keep open in a pool
 # sql_max_pool_size = 5
+
 # Timeout in seconds before idle sql connections are reaped
 # sql_idle_timeout = 3600
 
+
 [QUOTAS]
 # number of network gateways allowed per tenant, -1 means unlimited
 # quota_network_gateway = 5
 
+
 [NVP]
 # Maximum number of ports for each bridged logical switch
 # max_lp_per_bridged_ls = 64
+
 # Maximum number of ports for each overlay (stt, gre) logical switch
 # max_lp_per_overlay_ls = 256
+
 # Number of connects to each controller node.
 # concurrent_connections = 3
-# Name of the default cluster where requests should be sent if a nova zone id
-# is not specified. If it is empty or reference a non-existent cluster
-# the first cluster specified in this configuration file will be used
-# default_cluster_name =
-# If set to access_network this enables a dedicated connection to the
-# metadata proxy for metadata server access via Quantum router. If set to
-# dhcp_host_route this enables host route injection via the dhcp agent.
+
+# Acceptable values for 'metadata_mode' are:
+#   - 'access_network': this enables a dedicated connection to the metadata
+#     proxy for metadata server access via Quantum router.
+#   - 'dhcp_host_route': this enables host route injection via the dhcp agent.
 # This option is only useful if running on a host that does not support
 # namespaces otherwise access_network should be used.
 # metadata_mode = access_network
-
-#[CLUSTER:example]
-# This is uuid of the default NVP Transport zone that will be used for
-# creating tunneled isolated "Quantum" networks.  It needs to be created in
-# NVP before starting Quantum with the nvp plugin.
-# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
-
-# Nova "zone" that maps to this NVP cluster.  This should map to the
-# node_availability_zone in your nova.conf for each nova cluster.  Each nova
-# cluster should have a unique node_availability_zone set.
-# nova_zone_id = zone1 # (Optional)
-
-# UUID of the cluster in NVP.  This can be retrieved from NVP management
-# console "admin" section.
-# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7 # (Optional)
-
-# UUID of the default layer 3 gateway service to use for this cluster
-# This is optional, but should be filled if planning to use logical routers
-# with external gateways
-# default_l3_gw_service_uuid =
-
-# UUID of the default layer 2 gateway service to use for this cluster
-# This is optional. It should be filled for providing a predefined gateway
-# tenant case use for connecting their networks.
-# default_l2_gw_service_uuid =
-
-# Name of the default interface name to be used on network-gateway.
-# This value will be used for any device associated with a network
-# gateway for which an interface name was not specified
-# default_iface_name = breth0
-
-# This parameter describes a connection to a single NVP controller. Format:
-# <ip>:<port>:<user>:<pw>:<req_timeout>:<http_timeout>:<retries>:<redirects>
-# <ip> is the ip address of the controller
-# <port> is the port of the controller (default NVP port is 443)
-# <user> is the user name for this controller
-# <pw> is the user password.
-# <req_timeout>: The total time limit on all operations for a controller
-#   request (including retries, redirects from unresponsive controllers).
-#   Default is 30.
-# <http_timeout>: How long to wait before aborting an unresponsive controller
-#   (and allow for retries to another controller in the cluster).
-#   Default is 10.
-# <retries>: the maximum number of times to retry a particular request
-#   Default is 2.
-# <redirects>: the maximum number of times to follow a redirect response from a server.
-#   Default is 2.
-# There must be at least one nvp_controller_connection per system or per cluster.
-# nvp_controller_connection=10.0.1.2:443:admin:admin:30:10:2:2
-# nvp_controller_connection=10.0.1.3:443:admin:admin:30:10:2:2
-# nvp_controller_connection=10.0.1.4:443:admin:admin:30:10:2:2
index 09aae0f38e05ad31c80e94031f613037a61f19fb..984cbec999d9eec3a19f821786e66a667b123249 100644 (file)
@@ -51,7 +51,7 @@ from quantum.extensions import providernet as pnet
 from quantum.extensions import securitygroup as ext_sg
 from quantum.openstack.common import importutils
 from quantum.openstack.common import rpc
-from quantum.plugins.nicira.common import config
+from quantum.plugins.nicira.common import config  # noqa
 from quantum.plugins.nicira.common import exceptions as nvp_exc
 from quantum.plugins.nicira.common import metadata_access as nvp_meta
 from quantum.plugins.nicira.common import securitygroups as nvp_sec
@@ -82,108 +82,25 @@ class NetworkTypes:
     VLAN = 'vlan'
 
 
-def parse_config():
-    """Parse the supplied plugin configuration.
-
-    :param config: a ConfigParser() object encapsulating nvp.ini.
-    :returns: A tuple: (clusters, plugin_config). 'clusters' is a list of
-        NVPCluster objects, 'plugin_config' is a dictionary with plugin
-        parameters (currently only 'max_lp_per_bridged_ls').
-    """
-    nvp_conf = config.ClusterConfigOptions(cfg.CONF)
-    cluster_names = config.register_cluster_groups(nvp_conf)
-    nvp_conf.log_opt_values(LOG, logging.DEBUG)
-
-    clusters_options = []
-    for cluster_name in cluster_names:
-        clusters_options.append(
-            {'name': cluster_name,
-             'default_tz_uuid':
-             nvp_conf[cluster_name].default_tz_uuid,
-             'nvp_cluster_uuid':
-             nvp_conf[cluster_name].nvp_cluster_uuid,
-             'nova_zone_id':
-             nvp_conf[cluster_name].nova_zone_id,
-             'nvp_controller_connection':
-             nvp_conf[cluster_name].nvp_controller_connection,
-             'default_l3_gw_service_uuid':
-             nvp_conf[cluster_name].default_l3_gw_service_uuid,
-             'default_l2_gw_service_uuid':
-             nvp_conf[cluster_name].default_l2_gw_service_uuid,
-             'default_interface_name':
-             nvp_conf[cluster_name].default_interface_name})
-    LOG.debug(_("Cluster options:%s"), clusters_options)
-
-    # If no api_extensions_path is provided set the following
-    if not cfg.CONF.api_extensions_path:
-        cfg.CONF.set_override(
-            'api_extensions_path',
-            'quantum/plugins/nicira/extensions')
-    if (cfg.CONF.NVP.metadata_mode == "access_network" and
-        not cfg.CONF.allow_overlapping_ips):
-        LOG.warn(_("Overlapping IPs must be enabled in order to setup "
-                   "the metadata access network. Metadata access in "
-                   "routed mode will not work with this configuration"))
-    return cfg.CONF.NVP, clusters_options
-
-
-def parse_clusters_opts(clusters_opts, concurrent_connections,
-                        nvp_gen_timeout, default_cluster_name):
-    # Will store the first cluster in case is needed for default
-    # cluster assignment
-    clusters = {}
-    first_cluster = None
-    for c_opts in clusters_opts:
-        # Password is guaranteed to be the same across all controllers
-        # in the same NVP cluster.
-        cluster = nvp_cluster.NVPCluster(c_opts['name'])
-        try:
-            for ctrl_conn in c_opts['nvp_controller_connection']:
-                args = ctrl_conn.split(':')
-                try:
-                    args.extend([c_opts['default_tz_uuid'],
-                                 c_opts['nvp_cluster_uuid'],
-                                 c_opts['nova_zone_id'],
-                                 c_opts['default_l3_gw_service_uuid'],
-                                 c_opts['default_l2_gw_service_uuid'],
-                                 c_opts['default_interface_name']])
-                    cluster.add_controller(*args)
-                except Exception:
-                    LOG.exception(_("Invalid connection parameters for "
-                                    "controller %(ctrl)s in "
-                                    "cluster %(cluster)s"),
-                                  {'ctrl': ctrl_conn,
-                                   'cluster': c_opts['name']})
-                    raise nvp_exc.NvpInvalidConnection(
-                        conn_params=ctrl_conn)
-        except TypeError:
-            msg = _("No controller connection specified in cluster "
-                    "configuration. Please ensure at least a value for "
-                    "'nvp_controller_connection' is specified in the "
-                    "[CLUSTER:%s] section") % c_opts['name']
-            LOG.exception(msg)
-            raise nvp_exc.NvpPluginException(err_msg=msg)
-
-        api_providers = [(x['ip'], x['port'], True)
-                         for x in cluster.controllers]
-        cluster.api_client = NvpApiClient.NVPApiHelper(
-            api_providers, cluster.user, cluster.password,
-            request_timeout=cluster.request_timeout,
-            http_timeout=cluster.http_timeout,
-            retries=cluster.retries,
-            redirects=cluster.redirects,
-            concurrent_connections=concurrent_connections,
-            nvp_gen_timeout=nvp_gen_timeout)
-
-        if not clusters:
-            first_cluster = cluster
-        clusters[c_opts['name']] = cluster
-
-    if default_cluster_name and default_cluster_name in clusters:
-        default_cluster = clusters[default_cluster_name]
-    else:
-        default_cluster = first_cluster
-    return (clusters, default_cluster)
+def create_nvp_cluster(cluster_opts, concurrent_connections,
+                       nvp_gen_timeout):
+    # NOTE(armando-migliaccio): remove this block once we no longer
+    # want to support deprecated options in the nvp config file
+    # ### BEGIN
+    config.register_deprecated(cfg.CONF)
+    # ### END
+    cluster = nvp_cluster.NVPCluster(**cluster_opts)
+    api_providers = [ctrl.split(':') + [True]
+                     for ctrl in cluster.nvp_controllers]
+    cluster.api_client = NvpApiClient.NVPApiHelper(
+        api_providers, cluster.nvp_user, cluster.nvp_password,
+        request_timeout=cluster.req_timeout,
+        http_timeout=cluster.http_timeout,
+        retries=cluster.retries,
+        redirects=cluster.redirects,
+        concurrent_connections=concurrent_connections,
+        nvp_gen_timeout=nvp_gen_timeout)
+    return cluster
 
 
 class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
@@ -223,8 +140,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
 
     # Map nova zones to cluster for easy retrieval
     novazone_cluster_map = {}
-    # Default controller cluster (to be used when nova zone id is unspecified)
-    default_cluster = None
 
     provider_network_view = "extension:provider_network:view"
     provider_network_set = "extension:provider_network:set"
@@ -263,32 +178,22 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                        'default': self._nvp_delete_port}
         }
 
-        self.nvp_opts, self.clusters_opts = parse_config()
-        if not self.clusters_opts:
-            msg = _("No cluster specified in NVP plugin configuration. "
-                    "Unable to start. Please ensure at least a "
-                    "[CLUSTER:<cluster_name>] section is specified in "
-                    "the NVP Plugin configuration file.")
-            LOG.error(msg)
-            raise nvp_exc.NvpPluginException(err_msg=msg)
-
-        self.clusters, self.default_cluster = parse_clusters_opts(
-            self.clusters_opts, self.nvp_opts.concurrent_connections,
-            self.nvp_opts.nvp_gen_timeout, self.nvp_opts.default_cluster_name)
+        self.nvp_opts = cfg.CONF.NVP
+        self.cluster = create_nvp_cluster(cfg.CONF,
+                                          self.nvp_opts.concurrent_connections,
+                                          self.nvp_opts.nvp_gen_timeout)
 
         db.configure_db()
-        # Extend the fault map
         self._extend_fault_map()
         # Set up RPC interface for DHCP agent
         self.setup_rpc()
         self.network_scheduler = importutils.import_object(
             cfg.CONF.network_scheduler_driver)
-        # TODO(salvatore-orlando): Handle default gateways in multiple clusters
         self._ensure_default_network_gateway()
 
     def _ensure_default_network_gateway(self):
         # Add the gw in the db as default, and unset any previous default
-        def_l2_gw_uuid = self.default_cluster.default_l2_gw_service_uuid
+        def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
         try:
             ctx = q_context.get_admin_context()
             self._unset_default_network_gateways(ctx)
@@ -425,10 +330,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             max_ports = self.nvp_opts.max_lp_per_bridged_ls
             allow_extra_lswitches = True
         try:
-            cluster = self._find_target_cluster(port_data)
-            return self._handle_lswitch_selection(
-                cluster, network, network_binding, max_ports,
-                allow_extra_lswitches)
+            return self._handle_lswitch_selection(self.cluster, network,
+                                                  network_binding, max_ports,
+                                                  allow_extra_lswitches)
         except NvpApiClient.NvpApiException:
             err_desc = _("An exception occured while selecting logical "
                          "switch for the port")
@@ -461,10 +365,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # No need to actually update the DB state - the default is down
             return port_data
         try:
-            cluster = self._find_target_cluster(port_data)
             selected_lswitch = self._nvp_find_lswitch_for_port(context,
                                                                port_data)
-            lport = self._nvp_create_port_helper(cluster,
+            lport = self._nvp_create_port_helper(self.cluster,
                                                  selected_lswitch['uuid'],
                                                  port_data,
                                                  True)
@@ -473,7 +376,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if (not port_data['device_owner'] in
                 (l3_db.DEVICE_OWNER_ROUTER_GW,
                  l3_db.DEVICE_OWNER_ROUTER_INTF)):
-                nvplib.plug_interface(cluster, selected_lswitch['uuid'],
+                nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
                                       lport['uuid'], "VifAttachment",
                                       port_data['id'])
             LOG.debug(_("_nvp_create_port completed for port %(name)s "
@@ -495,7 +398,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                         "external networks. Port %s will be down."),
                       port_data['network_id'])
             return
-        nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
+        nvp_port_id = self._nvp_get_port_id(context, self.cluster,
                                             port_data)
         if not nvp_port_id:
             LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
@@ -504,7 +407,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # back will have zero ports after the delete we should garbage collect
         # the lswitch.
         try:
-            nvplib.delete_port(self.default_cluster,
+            nvplib.delete_port(self.cluster,
                                port_data['network_id'],
                                nvp_port_id)
             LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
@@ -518,13 +421,13 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
     def _nvp_delete_router_port(self, context, port_data):
         # Delete logical router port
         lrouter_id = port_data['device_id']
-        nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
+        nvp_port_id = self._nvp_get_port_id(context, self.cluster,
                                             port_data)
         if not nvp_port_id:
             raise q_exc.PortNotFound(port_id=port_data['id'])
 
         try:
-            nvplib.delete_peer_router_lport(self.default_cluster,
+            nvplib.delete_peer_router_lport(self.cluster,
                                             lrouter_id,
                                             port_data['network_id'],
                                             nvp_port_id)
@@ -550,9 +453,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         try:
             selected_lswitch = self._nvp_find_lswitch_for_port(context,
                                                                port_data)
-            cluster = self._find_target_cluster(port_data)
             # Do not apply port security here!
-            lport = self._nvp_create_port_helper(cluster,
+            lport = self._nvp_create_port_helper(self.cluster,
                                                  selected_lswitch['uuid'],
                                                  port_data,
                                                  False)
@@ -570,14 +472,13 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
 
     def _find_router_gw_port(self, context, port_data):
         router_id = port_data['device_id']
-        cluster = self._find_target_cluster(port_data)
         if not router_id:
             raise q_exc.BadRequest(_("device_id field must be populated in "
                                    "order to create an external gateway "
                                    "port for network %s"),
                                    port_data['network_id'])
 
-        lr_port = nvplib.find_router_gw_port(context, cluster, router_id)
+        lr_port = nvplib.find_router_gw_port(context, self.cluster, router_id)
         if not lr_port:
             raise nvp_exc.NvpPluginException(
                 err_msg=(_("The gateway port for the router %s "
@@ -598,9 +499,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # the fabric status of the NVP router will be down.
         # admin_status should always be up for the gateway port
         # regardless of what the user specifies in quantum
-        cluster = self._find_target_cluster(port_data)
         router_id = port_data['device_id']
-        nvplib.update_router_lport(cluster,
+        nvplib.update_router_lport(self.cluster,
                                    router_id,
                                    lr_port['uuid'],
                                    port_data['tenant_id'],
@@ -612,7 +512,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         if ext_network.get(pnet.NETWORK_TYPE) == NetworkTypes.L3_EXT:
             # Update attachment
             self._update_router_port_attachment(
-                cluster, context, router_id, port_data,
+                self.cluster, context, router_id, port_data,
                 "L3GatewayAttachment",
                 ext_network[pnet.PHYSICAL_NETWORK],
                 ext_network[pnet.SEGMENTATION_ID],
@@ -620,7 +520,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # Set the SNAT rule for each subnet (only first IP)
         for cidr in self._find_router_subnets_cidrs(context, router_id):
             nvplib.create_lrouter_snat_rule(
-                cluster, router_id,
+                self.cluster, router_id,
                 ip_addresses[0].split('/')[0],
                 ip_addresses[0].split('/')[0],
                 order=NVP_EXTGW_NAT_RULES_ORDER,
@@ -640,9 +540,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         try:
             # Delete is actually never a real delete, otherwise the NVP
             # logical router will stop working
-            cluster = self._find_target_cluster(port_data)
             router_id = port_data['device_id']
-            nvplib.update_router_lport(cluster,
+            nvplib.update_router_lport(self.cluster,
                                        router_id,
                                        lr_port['uuid'],
                                        port_data['tenant_id'],
@@ -653,14 +552,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # Delete the SNAT rule for each subnet
             for cidr in self._find_router_subnets_cidrs(context, router_id):
                 nvplib.delete_nat_rules_by_match(
-                    cluster, router_id, "SourceNatRule",
+                    self.cluster, router_id, "SourceNatRule",
                     max_num_expected=1, min_num_expected=1,
                     source_ip_addresses=cidr)
             # Reset attachment
             self._update_router_port_attachment(
-                cluster, context, router_id, port_data,
+                self.cluster, context, router_id, port_data,
                 "L3GatewayAttachment",
-                self.default_cluster.default_l3_gw_service_uuid,
+                self.cluster.default_l3_gw_service_uuid,
                 nvp_router_port_id=lr_port['uuid'])
 
         except NvpApiClient.ResourceNotFound:
@@ -690,17 +589,16 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # No need to actually update the DB state - the default is down
             return port_data
         try:
-            cluster = self._find_target_cluster(port_data)
             selected_lswitch = self._nvp_find_lswitch_for_port(context,
                                                                port_data)
-            lport = self._nvp_create_port_helper(cluster,
+            lport = self._nvp_create_port_helper(self.cluster,
                                                  selected_lswitch['uuid'],
                                                  port_data,
                                                  True)
             nicira_db.add_quantum_nvp_port_mapping(
                 context.session, port_data['id'], lport['uuid'])
             nvplib.plug_l2_gw_service(
-                cluster,
+                self.cluster,
                 port_data['network_id'],
                 lport['uuid'],
                 port_data['device_id'],
@@ -763,34 +661,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                                nvp_exc.NvpNoMorePortsException:
                                webob.exc.HTTPBadRequest})
 
-    def _novazone_to_cluster(self, novazone_id):
-        if novazone_id in self.novazone_cluster_map:
-            return self.novazone_cluster_map[novazone_id]
-        LOG.debug(_("Looking for nova zone: %s"), novazone_id)
-        for x in self.clusters:
-            LOG.debug(_("Looking for nova zone %(novazone_id)s in "
-                        "cluster: %(x)s"),
-                      {'novazone_id': novazone_id, 'x': x})
-            if x.zone == str(novazone_id):
-                self.novazone_cluster_map[x.zone] = x
-                return x
-        LOG.error(_("Unable to find cluster config entry for nova zone: %s"),
-                  novazone_id)
-        raise nvp_exc.NvpInvalidNovaZone(nova_zone=novazone_id)
-
-    def _find_target_cluster(self, resource):
-        """Return cluster where configuration should be applied
-
-        If the resource being configured has a paremeter expressing
-        the zone id (nova_id), then select corresponding cluster,
-        otherwise return default cluster.
-
-        """
-        if 'nova_id' in resource:
-            return self._novazone_to_cluster(resource['nova_id'])
-        else:
-            return self.default_cluster
-
     def _check_view_auth(self, context, resource, action):
         return policy.check(context, action, resource)
 
@@ -865,7 +735,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                                   allow_extra_lswitches):
         lswitches = nvplib.get_lswitches(cluster, network.id)
         try:
-            # TODO(savatore-orlando) Find main_ls too!
+            # TODO(salvatore-orlando) find main_ls too!
             return [ls for ls in lswitches
                     if (ls['_relations']['LogicalSwitchStatus']
                         ['lport_count'] < max_ports)].pop(0)
@@ -908,15 +778,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # Consume from all consumers in a thread
         self.conn.consume_in_thread()
 
-    def get_all_networks(self, tenant_id, **kwargs):
-        networks = []
-        for c in self.clusters:
-            networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
-        LOG.debug(_("get_all_networks() completed for tenant "
-                    "%(tenant_id)s: %(networks)s"),
-                  {'tenant_id': tenant_id, 'networks': networks})
-        return networks
-
     def create_network(self, context, network):
         net_data = network['network']
         tenant_id = self._get_tenant_id_for_create(context, net_data)
@@ -932,7 +793,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             LOG.warning(_("Network with admin_state_up=False are not yet "
                           "supported by this plugin. Ignoring setting for "
                           "network %s"), net_data.get('name', '<unknown>'))
-        target_cluster = self._find_target_cluster(net_data)
         external = net_data.get(l3.EXTERNAL)
         if (not attr.is_attr_set(external) or
             attr.is_attr_set(external) and not external):
@@ -940,9 +800,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if nvp_binding_type in ('flat', 'vlan'):
                 nvp_binding_type = 'bridge'
             lswitch = nvplib.create_lswitch(
-                target_cluster, tenant_id, net_data.get('name'),
-                nvp_binding_type,
-                net_data.get(pnet.PHYSICAL_NETWORK),
+                self.cluster, tenant_id, net_data.get('name'),
+                nvp_binding_type, net_data.get(pnet.PHYSICAL_NETWORK),
                 net_data.get(pnet.SEGMENTATION_ID),
                 shared=net_data.get(attr.SHARED))
             net_data['id'] = lswitch['uuid']
@@ -986,7 +845,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         router_iface_ports = self.get_ports(context, filters=port_filter)
         for port in router_iface_ports:
             nvp_port_id = self._nvp_get_port_id(
-                context, self.default_cluster, port)
+                context, self.cluster, port)
             if nvp_port_id:
                 port['nvp_port_id'] = nvp_port_id
             else:
@@ -998,7 +857,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         for port in router_iface_ports:
             try:
                 if 'nvp_port_id' in port:
-                    nvplib.delete_peer_router_lport(self.default_cluster,
+                    nvplib.delete_peer_router_lport(self.cluster,
                                                     port['device_id'],
                                                     port['network_id'],
                                                     port['nvp_port_id'])
@@ -1015,35 +874,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # Do not go to NVP for external networks
         if not external:
             try:
-                # FIXME(salvatore-orlando): Failures here might lead NVP
-                # and quantum state to diverge
-                pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
-                for (cluster, switches) in pairs:
-                    nvplib.delete_networks(cluster, id, switches)
-
+                lswitch_ids = [ls['uuid'] for ls in
+                               nvplib.get_lswitches(self.cluster, id)]
+                nvplib.delete_networks(self.cluster, id, lswitch_ids)
                 LOG.debug(_("delete_network completed for tenant: %s"),
                           context.tenant_id)
             except q_exc.NotFound:
                 LOG.warning(_("Did not found lswitch %s in NVP"), id)
 
-    def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
-        """Figure out the set of lswitches on each cluster that maps to this
-           network id
-        """
-        pairs = []
-        for c in self.clusters.itervalues():
-            lswitches = []
-            try:
-                results = nvplib.get_lswitches(c, netw_id)
-                lswitches.extend([ls['uuid'] for ls in results])
-            except q_exc.NetworkNotFound:
-                continue
-            pairs.append((c, lswitches))
-        if not pairs:
-            raise q_exc.NetworkNotFound(net_id=netw_id)
-        LOG.debug(_("Returning pairs for network: %s"), pairs)
-        return pairs
-
     def get_network(self, context, id, fields=None):
         with context.session.begin(subtransactions=True):
             # goto to the plugin DB and fetch the network
@@ -1053,11 +891,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                 # verify the fabric status of the corresponding
                 # logical switch(es) in nvp
                 try:
-                    # FIXME(salvatore-orlando): This is not going to work
-                    # unless we store the nova_id in the database once we'll
-                    # enable multiple clusters
-                    cluster = self._find_target_cluster(network)
-                    lswitches = nvplib.get_lswitches(cluster, id)
+                    lswitches = nvplib.get_lswitches(self.cluster, id)
                     nvp_net_status = constants.NET_STATUS_ACTIVE
                     quantum_status = network.status
                     for lswitch in lswitches:
@@ -1123,18 +957,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             relations='LogicalSwitchStatus',
             filters={'tag': 'true', 'tag_scope': 'shared'})
         try:
-            for c in self.clusters.itervalues():
-                res = nvplib.get_all_query_pages(
-                    lswitch_url_path_1, c)
-                nvp_lswitches.update(dict(
-                    (ls['uuid'], ls) for ls in res))
-                # Issue a second query for fetching shared networks.
-                # We cannot unfortunately use just a single query because tags
-                # cannot be or-ed
-                res_shared = nvplib.get_all_query_pages(
-                    lswitch_url_path_2, c)
-                nvp_lswitches.update(dict(
-                    (ls['uuid'], ls) for ls in res_shared))
+            res = nvplib.get_all_query_pages(lswitch_url_path_1, self.cluster)
+            nvp_lswitches.update(dict((ls['uuid'], ls) for ls in res))
+            # Issue a second query for fetching shared networks.
+            # We cannot unfortunately use just a single query because tags
+            # cannot be or-ed
+            res_shared = nvplib.get_all_query_pages(lswitch_url_path_2,
+                                                    self.cluster)
+            nvp_lswitches.update(dict((ls['uuid'], ls) for ls in res_shared))
         except Exception:
             err_msg = _("Unable to get logical switches")
             LOG.exception(err_msg)
@@ -1244,24 +1074,23 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         lport_fields_str = ("tags,admin_status_enabled,display_name,"
                             "fabric_status_up")
         try:
-            for c in self.clusters.itervalues():
-                lport_query_path = (
-                    "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
-                    "&relations=LogicalPortStatus" %
-                    (lswitch, lport_fields_str, vm_filter, tenant_filter))
-
-                try:
-                    ports = nvplib.get_all_query_pages(lport_query_path, c)
-                except q_exc.NotFound:
-                    LOG.warn(_("Lswitch %s not found in NVP"), lswitch)
-                    ports = None
-
-                if ports:
-                    for port in ports:
-                        for tag in port["tags"]:
-                            if tag["scope"] == "q_port_id":
-                                nvp_lports[tag["tag"]] = port
+            lport_query_path = (
+                "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
+                "&relations=LogicalPortStatus" %
+                (lswitch, lport_fields_str, vm_filter, tenant_filter))
 
+            try:
+                ports = nvplib.get_all_query_pages(lport_query_path,
+                                                   self.cluster)
+            except q_exc.NotFound:
+                LOG.warn(_("Lswitch %s not found in NVP"), lswitch)
+                ports = None
+
+            if ports:
+                for port in ports:
+                    for tag in port["tags"]:
+                        if tag["scope"] == "q_port_id":
+                            nvp_lports[tag["tag"]] = port
         except Exception:
             err_msg = _("Unable to get ports")
             LOG.exception(err_msg)
@@ -1438,8 +1267,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             self._extend_port_dict_security_group(context, ret_port)
             LOG.debug(_("Update port request: %s"), port)
             nvp_port_id = self._nvp_get_port_id(
-                context, self.default_cluster, ret_port)
-            nvplib.update_port(self.default_cluster,
+                context, self.cluster, ret_port)
+            nvplib.update_port(self.cluster,
                                ret_port['network_id'],
                                nvp_port_id, id, tenant_id,
                                ret_port['name'], ret_port['device_id'],
@@ -1458,7 +1287,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # the status.
         try:
             ret_port['status'] = nvplib.get_port_status(
-                self.default_cluster, ret_port['network_id'], nvp_port_id)
+                self.cluster, ret_port['network_id'], nvp_port_id)
         except Exception:
             LOG.warn(_("Unable to retrieve port status for:%s."), nvp_port_id)
         return ret_port
@@ -1520,15 +1349,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if self._network_is_external(context,
                                          quantum_db_port['network_id']):
                 return quantum_db_port
-            nvp_id = self._nvp_get_port_id(context, self.default_cluster,
+            nvp_id = self._nvp_get_port_id(context, self.cluster,
                                            quantum_db_port)
             # If there's no nvp IP do not bother going to NVP and put
             # the port in error state
             if nvp_id:
-                #TODO(salvatore-orlando): pass the appropriate cluster here
                 try:
                     port = nvplib.get_logical_port_status(
-                        self.default_cluster, quantum_db_port['network_id'],
+                        self.cluster, quantum_db_port['network_id'],
                         nvp_id)
                     quantum_db_port["admin_state_up"] = (
                         port["admin_status_enabled"])
@@ -1574,8 +1402,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                     if ext_net.subnets:
                         ext_subnet = ext_net.subnets[0]
                         nexthop = ext_subnet.gateway_ip
-            cluster = self._find_target_cluster(router)
-            lrouter = nvplib.create_lrouter(cluster, tenant_id,
+            lrouter = nvplib.create_lrouter(self.cluster, tenant_id,
                                             router['router']['name'],
                                             nexthop)
             # Use NVP identfier for Quantum resource
@@ -1584,12 +1411,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             raise nvp_exc.NvpPluginException(
                 err_msg=_("Unable to create logical router on NVP Platform"))
         # Create the port here - and update it later if we have gw_info
-        self._create_and_attach_router_port(cluster,
-                                            context,
-                                            lrouter['uuid'],
-                                            {'fake_ext_gw': True},
-                                            "L3GatewayAttachment",
-                                            cluster.default_l3_gw_service_uuid)
+        self._create_and_attach_router_port(
+            self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
+            "L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid)
 
         with context.session.begin(subtransactions=True):
             router_db = l3_db.Router(id=lrouter['uuid'],
@@ -1623,8 +1447,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                     if ext_net.subnets:
                         ext_subnet = ext_net.subnets[0]
                         nexthop = ext_subnet.gateway_ip
-            cluster = self._find_target_cluster(router)
-            nvplib.update_lrouter(cluster, id,
+            nvplib.update_lrouter(self.cluster, id,
                                   router['router'].get('name'), nexthop)
         except NvpApiClient.ResourceNotFound:
             raise nvp_exc.NvpPluginException(
@@ -1649,7 +1472,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # allow an extra field for storing the cluster information
             # together with the resource
             try:
-                nvplib.delete_lrouter(self.default_cluster, id)
+                nvplib.delete_lrouter(self.cluster, id)
             except q_exc.NotFound:
                 LOG.warning(_("Logical router '%s' not found "
                               "on NVP Platform") % id)
@@ -1661,11 +1484,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
     def get_router(self, context, id, fields=None):
         router = self._get_router(context, id)
         try:
-            # FIXME(salvatore-orlando): We need to
-            # find the appropriate cluster!
-            cluster = self.default_cluster
             try:
-                lrouter = nvplib.get_lrouter(cluster, id)
+                lrouter = nvplib.get_lrouter(self.cluster, id)
             except q_exc.NotFound:
                 lrouter = {}
                 router_op_status = constants.NET_STATUS_ERROR
@@ -1706,7 +1526,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         else:
             tenant_id = context.tenant_id
         try:
-            nvp_lrouters = nvplib.get_lrouters(self.default_cluster,
+            nvp_lrouters = nvplib.get_lrouters(self.cluster,
                                                tenant_id,
                                                fields)
         except NvpApiClient.NvpApiException:
@@ -1747,16 +1567,13 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         port_id = router_iface_info['port_id']
         subnet_id = router_iface_info['subnet_id']
         # Add port to the logical router as well
-        # TODO(salvatore-orlando): Identify the appropriate cluster, instead
-        # of always defaulting to self.default_cluster
-        cluster = self.default_cluster
         # The owner of the router port is always the same as the owner of the
         # router. Use tenant_id from the port instead of fetching more records
         # from the Quantum database
         port = self._get_port(context, port_id)
         # Find the NVP port corresponding to quantum port_id
         results = nvplib.query_lswitch_lports(
-            cluster, '*',
+            self.cluster, '*',
             filters={'tag': port_id, 'tag_scope': 'q_port_id'})
         if results:
             ls_port = results[0]
@@ -1769,7 +1586,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
 
         # Create logical router port and patch attachment
         self._create_and_attach_router_port(
-            cluster, context, router_id, port,
+            self.cluster, context, router_id, port,
             "PatchAttachment", ls_port['uuid'],
             subnet_ids=[subnet_id])
         subnet = self._get_subnet(context, subnet_id)
@@ -1783,11 +1600,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if gw_port.get('fixed_ips'):
                 snat_ip = gw_port['fixed_ips'][0]['ip_address']
                 nvplib.create_lrouter_snat_rule(
-                    cluster, router_id, snat_ip, snat_ip,
+                    self.cluster, router_id, snat_ip, snat_ip,
                     order=NVP_EXTGW_NAT_RULES_ORDER,
                     match_criteria={'source_ip_addresses': subnet['cidr']})
         nvplib.create_lrouter_nosnat_rule(
-            cluster, router_id,
+            self.cluster, router_id,
             order=NVP_NOSNAT_RULES_ORDER,
             match_criteria={'destination_ip_addresses': subnet['cidr']})
 
@@ -1801,8 +1618,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         return router_iface_info
 
     def remove_router_interface(self, context, router_id, interface_info):
-        # TODO(salvatore-orlando): Usual thing about cluster selection
-        cluster = self.default_cluster
         # The code below is duplicated from base class, but comes handy
         # as we need to retrieve the router port id before removing the port
         subnet = None
@@ -1833,7 +1648,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                 raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
                                                           subnet_id=subnet_id)
         results = nvplib.query_lswitch_lports(
-            cluster, '*', relations="LogicalPortAttachment",
+            self.cluster, '*', relations="LogicalPortAttachment",
             filters={'tag': port_id, 'tag_scope': 'q_port_id'})
         lrouter_port_id = None
         if results:
@@ -1871,16 +1686,17 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # Remove SNAT rule if external gateway is configured
             if router.gw_port:
                 nvplib.delete_nat_rules_by_match(
-                    cluster, router_id, "SourceNatRule",
+                    self.cluster, router_id, "SourceNatRule",
                     max_num_expected=1, min_num_expected=1,
                     source_ip_addresses=subnet['cidr'])
             # Relax the minimum expected number as the nosnat rules
             # do not exist in 2.x deployments
             nvplib.delete_nat_rules_by_match(
-                cluster, router_id, "NoSourceNatRule",
+                self.cluster, router_id, "NoSourceNatRule",
                 max_num_expected=1, min_num_expected=0,
                 destination_ip_addresses=subnet['cidr'])
-            nvplib.delete_router_lport(cluster, router_id, lrouter_port_id)
+            nvplib.delete_router_lport(self.cluster,
+                                       router_id, lrouter_port_id)
         except NvpApiClient.ResourceNotFound:
             raise nvp_exc.NvpPluginException(
                 err_msg=(_("Logical router port resource %s not found "
@@ -1893,11 +1709,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
     def _retrieve_and_delete_nat_rules(self, floating_ip_address,
                                        internal_ip, router_id,
                                        min_num_rules_expected=0):
-        #TODO(salvatore-orlando): Multiple cluster support
-        cluster = self.default_cluster
         try:
             nvplib.delete_nat_rules_by_match(
-                cluster, router_id, "DestinationNatRule",
+                self.cluster, router_id, "DestinationNatRule",
                 max_num_expected=1,
                 min_num_expected=min_num_rules_expected,
                 destination_ip_addresses=floating_ip_address)
@@ -1905,7 +1719,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # Remove SNAT rule associated with the single fixed_ip
             # to floating ip
             nvplib.delete_nat_rules_by_match(
-                cluster, router_id, "SourceNatRule",
+                self.cluster, router_id, "SourceNatRule",
                 max_num_expected=1,
                 min_num_expected=min_num_rules_expected,
                 source_ip_addresses=internal_ip)
@@ -1924,12 +1738,12 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # Fetch logical port of router's external gateway
         router_id = fip_db.router_id
         nvp_gw_port_id = nvplib.find_router_gw_port(
-            context, self.default_cluster, router_id)['uuid']
+            context, self.cluster, router_id)['uuid']
         ext_quantum_port_db = self._get_port(context.elevated(),
                                              fip_db.floating_port_id)
         nvp_floating_ips = self._build_ip_address_list(
             context.elevated(), ext_quantum_port_db['fixed_ips'])
-        nvplib.update_lrouter_port_ips(self.default_cluster,
+        nvplib.update_lrouter_port_ips(self.cluster,
                                        router_id,
                                        nvp_gw_port_id,
                                        ips_to_add=[],
@@ -1963,7 +1777,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                 fip,
                 floatingip_db['floating_network_id'])
 
-        cluster = self._find_target_cluster(fip)
         floating_ip = floatingip_db['floating_ip_address']
         # Retrieve and delete existing NAT rules, if any
         if not router_id and floatingip_db.get('fixed_port_id'):
@@ -1980,7 +1793,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                                                 router_id)
             # Fetch logical port of router's external gateway
             nvp_gw_port_id = nvplib.find_router_gw_port(
-                context, self.default_cluster, router_id)['uuid']
+                context, self.cluster, router_id)['uuid']
             nvp_floating_ips = self._build_ip_address_list(
                 context.elevated(), external_port['fixed_ips'])
             LOG.debug(_("Address list for NVP logical router "
@@ -1990,18 +1803,18 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                 try:
                     # Create new NAT rules
                     nvplib.create_lrouter_dnat_rule(
-                        cluster, router_id, internal_ip,
+                        self.cluster, router_id, internal_ip,
                         order=NVP_FLOATINGIP_NAT_RULES_ORDER,
                         match_criteria={'destination_ip_addresses':
                                         floating_ip})
                     # setup snat rule such that src ip of a IP packet when
                     #  using floating is the floating ip itself.
                     nvplib.create_lrouter_snat_rule(
-                        cluster, router_id, floating_ip, floating_ip,
+                        self.cluster, router_id, floating_ip, floating_ip,
                         order=NVP_FLOATINGIP_NAT_RULES_ORDER,
                         match_criteria={'source_ip_addresses': internal_ip})
                     # Add Floating IP address to router_port
-                    nvplib.update_lrouter_port_ips(cluster,
+                    nvplib.update_lrouter_port_ips(self.cluster,
                                                    router_id,
                                                    nvp_gw_port_id,
                                                    ips_to_add=nvp_floating_ips,
@@ -2017,7 +1830,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             elif floatingip_db['fixed_port_id']:
                 # This is a disassociation.
                 # Remove floating IP address from logical router port
-                nvplib.update_lrouter_port_ips(cluster,
+                nvplib.update_lrouter_port_ips(self.cluster,
                                                router_id,
                                                nvp_gw_port_id,
                                                ips_to_add=[],
@@ -2062,16 +1875,14 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         # Need to re-do authZ checks here in order to avoid creation on NVP
         gw_data = network_gateway[networkgw.RESOURCE_NAME.replace('-', '_')]
         tenant_id = self._get_tenant_id_for_create(context, gw_data)
-        cluster = self._find_target_cluster(gw_data)
         devices = gw_data['devices']
         # Populate default physical network where not specified
         for device in devices:
             if not device.get('interface_name'):
-                device['interface_name'] = cluster.default_interface_name
+                device['interface_name'] = self.cluster.default_interface_name
         try:
-            nvp_res = nvplib.create_l2_gw_service(cluster, tenant_id,
-                                                  gw_data['name'],
-                                                  devices)
+            nvp_res = nvplib.create_l2_gw_service(self.cluster, tenant_id,
+                                                  gw_data['name'], devices)
             nvp_uuid = nvp_res.get('uuid')
         except Exception:
             raise nvp_exc.NvpPluginException(
@@ -2091,7 +1902,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         with context.session.begin(subtransactions=True):
             try:
                 super(NvpPluginV2, self).delete_network_gateway(context, id)
-                nvplib.delete_l2_gw_service(self.default_cluster, id)
+                nvplib.delete_l2_gw_service(self.cluster, id)
             except NvpApiClient.ResourceNotFound:
                 # Do not cause a 500 to be returned to the user if
                 # the corresponding NVP resource does not exist
@@ -2133,7 +1944,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         if not default_sg:
             self._ensure_default_security_group(context, tenant_id)
 
-        nvp_secgroup = nvplib.create_security_profile(self.default_cluster,
+        nvp_secgroup = nvplib.create_security_profile(self.cluster,
                                                       tenant_id, s)
         security_group['security_group']['id'] = nvp_secgroup['uuid']
         return super(NvpPluginV2, self).create_security_group(
@@ -2156,7 +1967,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             if super(NvpPluginV2, self)._get_port_security_group_bindings(
                 context, filters):
                 raise ext_sg.SecurityGroupInUse(id=security_group['id'])
-            nvplib.delete_security_profile(self.default_cluster,
+            nvplib.delete_security_profile(self.cluster,
                                            security_group['id'])
             return super(NvpPluginV2, self).delete_security_group(
                 context, security_group_id)
@@ -2192,7 +2003,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             # of them to PUT to NVP.
             combined_rules = self._merge_security_group_rules_with_current(
                 context, s, security_group['id'])
-            nvplib.update_security_group_rules(self.default_cluster,
+            nvplib.update_security_group_rules(self.cluster,
                                                security_group['id'],
                                                combined_rules)
             return super(
@@ -2218,7 +2029,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
             self._remove_security_group_with_id_and_id_field(
                 current_rules, sgrid)
             nvplib.update_security_group_rules(
-                self.default_cluster, sgid, current_rules)
+                self.cluster, sgid, current_rules)
             return super(NvpPluginV2, self).delete_security_group_rule(context,
                                                                        sgrid)
 
@@ -2227,7 +2038,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
         if check_policy:
             self._enforce_set_auth(context, q, ext_qos.qos_queue_create)
         self._validate_qos_queue(context, q)
-        q['id'] = nvplib.create_lqueue(self.default_cluster,
+        q['id'] = nvplib.create_lqueue(self.cluster,
                                        self._nvp_lqueue(q))
         return super(NvpPluginV2, self).create_qos_queue(context, qos_queue)
 
@@ -2239,7 +2050,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
                 raise ext_qos.QueueInUseByPort()
             else:
                 return
-        nvplib.delete_lqueue(self.default_cluster, id)
+        nvplib.delete_lqueue(self.cluster, id)
         return super(NvpPluginV2, self).delete_qos_queue(context, id)
 
     def get_qos_queue(self, context, id, fields=None):
index 01908735a88d236dacdb3ec09c2a332a8a583238..10c7fe1a28f2016477a968a1014a3868510550c1 100644 (file)
@@ -24,20 +24,17 @@ NVP Plugin configuration
     - nvp_gen_timout: Number of seconds a generation id should be valid for
     (default -1 meaning do not time out)
     3) NVP cluster
-    The Quantum NVP plugin allow for configuring multiple clusters.
-    Each cluster configuration section must be declared in the following way
-    in the configuration file: [CLUSTER:cluster_name].
-    The following parameters can be configured for each cluster:
+    By default the Quantum NVP plugin can talk to multiple controllers in a
+    single cluster. In the future (Havana+) support for multiple clusters
+    will be added.
+    The following parameters can be configured:
     - default_tz_uuid: This is uuid of the default NVP Transport zone that
-    will be used for creating tunneled isolated "Quantum" networks. It
-    needs to be created in NVP before starting Quantum with the nvp plugin.
-        - nova_zone_id: Optional parameter identifying the Nova "zone" that maps
-        to this NVP cluster.
-        - nvp_cluster_uuid: Optional paramter identifying the UUID of the cluster
-        in NVP.  This can be retrieved from NVP management console "admin" section.
-        - nvp_controller_connetion: describes a connection to a single NVP
-        controller. A different connection for each controller in the cluster can
-        be specified; there must be at least one connection per cluster.
+      will be used for creating tunneled isolated "Quantum" networks. It
+      needs to be created in NVP before starting Quantum with the nvp plugin.
+    - nvp_cluster_uuid: Optional paramter identifying the UUID of the cluster
+      in NVP.  This can be retrieved from NVP management console "admin" section.
+    - nvp_controllers: describes the list of controllers
+    More details can be found in etc/quantum/plugins/nicira/nvp.ini
 
 Quantum Configuration
 
index a4a8a907af752b103f74958fe10d3240e344d368..1d1c6e28e076ecae8b97ce72c7396cabc26a384e 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2013 Nicira, Inc.
@@ -24,50 +23,24 @@ import sys
 from oslo.config import cfg
 
 from quantum.common import config
-from quantum.plugins.nicira import NvpApiClient
+from quantum.plugins.nicira.common import config as nvp_cfg  # noqa
 from quantum.plugins.nicira import nvplib
 from quantum.plugins.nicira import QuantumPlugin
 
 config.setup_logging(cfg.CONF)
 
 
-def help():
-    print "Usage ./check_nvp_config path/to/nvp.ini"
-    exit(1)
-
-
-def display_controller_info(controller):
-    print "\tCan login: %s" % controller.get('can_login')
-    print "\tuser: %s" % controller.get('user')
-    print "\tpassword: %s" % controller.get('password')
-    print "\tip: %s" % controller.get('ip')
-    print "\tport: %s" % controller.get('port')
-    print "\trequested_timeout: %s" % controller.get('requested_timeout')
-    print "\tretires: %s" % controller.get('retries')
-    print "\tredirects: %s" % controller.get('redirects')
-    print "\thttp_timeout: %s" % controller.get('http_timeout')
-
-
-def test_controller(cluster, controller):
-    api_providers = [(controller.get('ip'), controller.get('port'), True)]
-    api_client = NvpApiClient.NVPApiHelper(
-        api_providers, cluster.user, cluster.password,
-        controller.get('requested_timeout'),
-        controller.get('http_timeout'),
-        controller.get('retries'),
-        controller.get('redirects'))
-
-    controller['can_login'] = (api_client.login() and True or False)
+def help(name):
+    print "Usage: %s path/to/nvp.ini" % name
+    sys.exit(1)
 
 
 def get_gateway_services(cluster):
     ret_gw_services = {"L2GatewayServiceConfig": [],
                        "L3GatewayServiceConfig": []}
-    gw_services = nvplib.get_gateway_services(cluster).get('results')
-    if gw_services:
-        for gw_service in gw_services:
-            ret_gw_services[gw_service['type']].append(gw_service['uuid'])
-
+    gw_services = nvplib.get_gateway_services(cluster).get('results', [])
+    for gw_service in gw_services:
+        ret_gw_services[gw_service['type']].append(gw_service['uuid'])
     return ret_gw_services
 
 
@@ -77,77 +50,46 @@ def get_transport_zones(cluster):
 
 
 def main(argv):
-    if len(sys.argv) != 2:
-        help()
+    if len(argv) != 2:
+        help(argv[0])
     args = ['--config-file']
-    args.append(sys.argv[1])
+    args.append(argv[1])
     config.parse(args)
-    errors = False
-    nvp_opts, clusters_opts = QuantumPlugin.parse_config()
-    print "-----------Database Options--------------------"
-    print "sql_connection: %s" % cfg.CONF.DATABASE.sql_connection
-    print "reconnect_interval: %d" % cfg.CONF.DATABASE.reconnect_interval
-    print "sql_max_retries: %d" % cfg.CONF.DATABASE.sql_max_retries
-    print "-----------NVP Options--------------------"
-    print ("Number of concurrents allow to each controller %d" %
-           nvp_opts.concurrent_connections)
-    print "NVP Generation Timeout %d" % nvp_opts.nvp_gen_timeout
-    print "NVP Default Cluster Name %s" % nvp_opts.default_cluster_name
-
-    print "-----------Cluster Options--------------------"
-    if not clusters_opts:
-        print "No NVP Clusters detected in nvp.ini!"
-        exit(1)
-    clusters, default_cluster = QuantumPlugin.parse_clusters_opts(
-        clusters_opts, nvp_opts.concurrent_connections,
-        nvp_opts.nvp_gen_timeout, nvp_opts.default_cluster_name)
-    for cluster in clusters.itervalues():
-        num_controllers = cluster.get_num_controllers()
-        print "\n%d controllers found in cluster [CLUSTER:%s]" % (
-            num_controllers, cluster.name)
-        if num_controllers == 0:
-            print ("Cluster %s has no nvp_controller_connection defined!" %
-                   cluster.name)
-            exit(1)
-
-        for i in range(0, num_controllers):
-            controller = cluster.get_controller(i)
-            if i == 0:
-                gateway_services = get_gateway_services(cluster)
-                transport_zones = get_transport_zones(cluster)
-                controller.update(nvplib.check_cluster_connectivity(cluster))
-                default_tz_zone = controller.get('default_tz_uuid')
-                print ("\n\tdefault_tz_uuid: %s" % default_tz_zone)
-                if not default_tz_zone:
-                    print "\t* ERROR: No default trasport zone specified!"
-                    errors = True
-                elif default_tz_zone not in transport_zones:
-                    print ("\t* ERROR: did not find default transport %s zone "
-                           "on NVP!" % default_tz_zone)
-                    errors = True
-                print ("\tapi_redirect_interval: %s" %
-                       controller.get('api_redirect_interval'))
-                print "\tcluster uuid: %s" % controller.get('uuid')
-                print "\tapi_mode: %s" % controller.get('api_mode')
-                l2_gateway = controller.get('default_l2_gw_service_uuid')
-                print ("\tdefault_l2_gw_service_uuid: %s" % l2_gateway)
-                if (l2_gateway and l2_gateway not in
-                        gateway_services['L2GatewayServiceConfig']):
-                    print ("\t* ERROR: Did not find L2 gateway service uuid %s"
-                           " in NVP!" % l2_gateway)
-                    errors = True
-                l3_gateway = controller.get('default_l3_gw_service_uuid')
-                print ("\tdefault_l3_gw_service_uuid: %s" % l3_gateway)
-                if (l3_gateway and l3_gateway not in
-                        gateway_services['L3GatewayServiceConfig']):
-                    print ("\t* ERROR did not find L3 gateway service uuid %s"
-                           " in NVP!" % l3_gateway)
-                    errors = True
-            print ("\n-----controller %d------\n" % (i + 1))
-            test_controller(cluster, controller)
-            display_controller_info(controller)
-        print "\n"
-    if errors:
-        print ("**There were configuration errors found "
-               "please review output carefully!!**")
-        print "\n"
+    print "------------------------ Database Options ------------------------"
+    print "\tsql_connection: %s" % cfg.CONF.DATABASE.sql_connection
+    print "\treconnect_interval: %d" % cfg.CONF.DATABASE.reconnect_interval
+    print "\tsql_max_retries: %d" % cfg.CONF.DATABASE.sql_max_retries
+    print "------------------------    NVP Options   ------------------------"
+    print "\tNVP Generation Timeout %d" % cfg.CONF.NVP.nvp_gen_timeout
+    print ("\tNumber of concurrent connections to each controller %d" %
+           cfg.CONF.NVP.concurrent_connections)
+    print "\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NVP.max_lp_per_bridged_ls
+    print "\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NVP.max_lp_per_overlay_ls
+    print ("\tenable_metadata_access_network: %s" %
+           cfg.CONF.NVP.enable_metadata_access_network)
+    print "------------------------  Cluster Options ------------------------"
+    print "\trequested_timeout: %s" % cfg.CONF.req_timeout
+    print "\tretries: %s" % cfg.CONF.retries
+    print "\tredirects: %s" % cfg.CONF.redirects
+    print "\thttp_timeout: %s" % cfg.CONF.http_timeout
+    cluster = QuantumPlugin.create_nvp_cluster(
+        cfg.CONF,
+        cfg.CONF.NVP.concurrent_connections,
+        cfg.CONF.NVP.nvp_gen_timeout)
+    num_controllers = len(cluster.nvp_controllers)
+    print "Number of controllers found: %s" % num_controllers
+    if num_controllers == 0:
+        print "You must specify at least one controller!"
+        sys.exit(1)
+
+    for controller in cluster.nvp_controllers:
+        print "\tController endpoint: %s" % controller
+        nvplib.check_cluster_connectivity(cluster)
+        gateway_services = get_gateway_services(cluster)
+        for svc_type in ["L2GatewayServiceConfig",
+                         "L3GatewayServiceConfig"]:
+            for uuid in gateway_services[svc_type]:
+                print "\t\tGateway(%s) uuid: %s" % (svc_type, uuid)
+        transport_zones = get_transport_zones(cluster)
+        print "\tTransport zones: %s" % transport_zones
+    print "Done."
index c2960292573a170028489f6cec54df7a0dbb1f1c..cc42f49a878e2ef7de8ac9fe7c4f143fca0af450 100644 (file)
@@ -18,10 +18,6 @@ from oslo.config import cfg
 
 from quantum import scheduler
 
-core_opts = [
-    cfg.BoolOpt('metadata_dhcp_host_route', default=False),
-]
-
 nvp_opts = [
     cfg.IntOpt('max_lp_per_bridged_ls', default=64,
                help=_("Maximum number of ports of a logical switch on a "
@@ -34,8 +30,6 @@ nvp_opts = [
     cfg.IntOpt('nvp_gen_timeout', default=-1,
                help=_("Number of seconds a generation id should be valid for "
                       "(default -1 meaning do not time out)")),
-    cfg.StrOpt('default_cluster_name',
-               help=_("Default cluster name")),
     cfg.StrOpt('metadata_mode', default='access_network',
                help=_("If set to access_network this enables a dedicated "
                       "connection to the metadata proxy for metadata server "
@@ -44,6 +38,33 @@ nvp_opts = [
                       "This option is only useful if running on a host that "
                       "does not support namespaces otherwise access_network "
                       "should be used.")),
+    cfg.BoolOpt('enable_metadata_access_network', default=True,
+                help=_("Enables dedicated connection to the metadata proxy "
+                       "for metadata server access via Quantum router")),
+]
+
+connection_opts = [
+    cfg.StrOpt('nvp_user',
+               default='admin',
+               help=_('User name for NVP controllers in this cluster')),
+    cfg.StrOpt('nvp_password',
+               default='admin',
+               secret=True,
+               help=_('Password for NVP controllers in this cluster')),
+    cfg.IntOpt('req_timeout',
+               default=30,
+               help=_('Total time limit for a cluster request')),
+    cfg.IntOpt('http_timeout',
+               default=10,
+               help=_('Time before aborting a request')),
+    cfg.IntOpt('retries',
+               default=2,
+               help=_('Number of time a request should be retried')),
+    cfg.IntOpt('redirects',
+               default=2,
+               help=_('Number of times a redirect should be followed')),
+    cfg.ListOpt('nvp_controllers',
+                help=_("Lists the NVP controllers in this cluster")),
 ]
 
 cluster_opts = [
@@ -56,15 +77,6 @@ cluster_opts = [
                help=_("Optional paramter identifying the UUID of the cluster "
                       "in NVP.  This can be retrieved from NVP management "
                       "console \"admin\" section.")),
-    cfg.StrOpt('nova_zone_id',
-               help=_("Optional parameter identifying the Nova \"zone\" that "
-                      "maps to this NVP cluster.")),
-    cfg.MultiStrOpt('nvp_controller_connection',
-                    help=_("Describes a connection to a single NVP "
-                           "controller. A different connection for each "
-                           "controller in the cluster can be specified; "
-                           "there must be at least one connection per "
-                           "cluster.")),
     cfg.StrOpt('default_l3_gw_service_uuid',
                help=_("Unique identifier of the NVP L3 Gateway service "
                       "which will be used for implementing routers and "
@@ -79,100 +91,32 @@ cluster_opts = [
 ]
 
 # Register the configuration options
-cfg.CONF.register_opts(core_opts)
+cfg.CONF.register_opts(connection_opts)
+cfg.CONF.register_opts(cluster_opts)
 cfg.CONF.register_opts(nvp_opts, "NVP")
 cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS)
-
-
-class ClusterConfigOptions(cfg.ConfigOpts):
-
-    def __init__(self, config_options):
-        super(ClusterConfigOptions, self).__init__()
-        self._group_mappings = {}
-        self._config_opts = config_options._config_opts
-        self._cparser = config_options._cparser
-        self._oparser = config_options._oparser
-        self.register_cli_opts(self._config_opts)
-
-    def _do_get(self, name, group=None):
-        """Look up an option value.
-
-        :param name: the opt name (or 'dest', more precisely)
-        :param group: an OptGroup
-        :returns: the option value, or a GroupAttr object
-        :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
-                 TemplateSubstitutionError
-        """
-        if group is None and name in self._groups:
-            return self.GroupAttr(self, self._get_group(name))
-
-        info = self._get_opt_info(name, group)
-        opt = info['opt']
-
-        if 'override' in info:
-            return info['override']
-
-        values = []
-        if self._cparser is not None:
-            section = group.name if group is not None else 'DEFAULT'
-            # Check if the name of the group maps to something else in
-            # the conf file.Otherwise leave the section name unchanged
-
-            section = self._group_mappings.get(section, section)
-            try:
-                value = opt._get_from_config_parser(self._cparser, section)
-            except KeyError:
-                pass
-            except ValueError as ve:
-                raise cfg.ConfigFileValueError(str(ve))
-            else:
-                if not opt.multi:
-                    # No need to continue since the last value wins
-                    return value[-1]
-                values.extend(value)
-
-        name = name if group is None else group.name + '_' + name
-        value = self._cli_values.get(name)
-        if value is not None:
-            if not opt.multi:
-                return value
-
-            return value + values
-
-        if values:
-            return values
-
-        if 'default' in info:
-            return info['default']
-
-        return opt.default
-
-    def register_opts(self, opts, group_internal_name=None, group=None):
-        """Register multiple option schemas at once."""
-        if group_internal_name:
-            self._group_mappings[group] = group_internal_name
-        for opt in opts:
-            self.register_opt(opt, group, clear_cache=False)
-
-
-def _retrieve_extra_groups(conf, key=None, delimiter=':'):
-    """retrieve configuration groups not listed above."""
-    results = []
-    for parsed_file in cfg.CONF._cparser.parsed:
-        for parsed_item in parsed_file.keys():
-            if parsed_item not in cfg.CONF:
-                items = key and parsed_item.split(delimiter)
-                if not key or key == items[0]:
-                    results.append(parsed_item)
-    return results
-
-
-def register_cluster_groups(conf):
-    """retrieve configuration groups for nvp clusters."""
-    cluster_names = []
-    cluster_tags = _retrieve_extra_groups(conf, "CLUSTER")
-    for tag in cluster_tags:
-        cluster_name = tag.split(':')[1]
-        conf.register_opts(cluster_opts, tag, cluster_name)
-        cluster_names.append(cluster_name)
-    return cluster_names
+# NOTE(armando-migliaccio): keep the following code until we support
+# NVP configuration files in older format (Grizzly or older).
+# ### BEGIN
+controller_depr = cfg.MultiStrOpt('nvp_controller_connection',
+                                  help=_("Describes a connection to a single "
+                                         "controller. A different connection "
+                                         "for each controller in the cluster "
+                                         "can be specified; there must be at "
+                                         "least one connection per cluster."))
+
+host_route_depr = cfg.BoolOpt('metadata_dhcp_host_route', default=None)
+
+
+def register_deprecated(conf):
+    conf.register_opts([host_route_depr])
+    multi_parser = cfg.MultiConfigParser()
+    read_ok = multi_parser.read(conf.config_file)
+    if len(read_ok) != len(conf.config_file):
+        raise cfg.Error("Some config files were not parsed properly")
+
+    for parsed_file in multi_parser.parsed:
+        for section in parsed_file.keys():
+            if section not in conf and section.startswith("CLUSTER:"):
+                conf.register_opts(cluster_opts + [controller_depr], section)
+# ### END
index db9f9c3d39ab750501601edec953914c9a6b2525..9d62aa336fd068438c8a7b2b24ee344d155fc0c1 100644 (file)
@@ -28,6 +28,12 @@ class NvpInvalidConnection(NvpPluginException):
     message = _("Invalid NVP connection parameters: %(conn_params)s")
 
 
+class NvpInvalidClusterConfiguration(NvpPluginException):
+    message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure "
+                "that these values are specified in the [DEFAULT] "
+                "section of the nvp plugin ini file.")
+
+
 class NvpInvalidNovaZone(NvpPluginException):
     message = _("Unable to find cluster config entry "
                 "for nova zone: %(nova_zone)s")
index d5586ae379f6c7eef6913aaefa2390228ee28766..9fadbe709c9e394273f06c474961f531dc02fc1b 100644 (file)
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 #
 
-import re
+from oslo.config import cfg
 
-from quantum.api.v2 import attributes
 from quantum.openstack.common import log as logging
+from quantum.plugins.nicira.common import exceptions
 
 LOG = logging.getLogger(__name__)
+DEFAULT_PORT = 443
+# Raise if one of those attributes is not specified
+REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nvp_user',
+                       'nvp_password', 'nvp_controllers']
+# Emit a INFO log if one of those attributes is not specified
+IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid']
+# Deprecated attributes
+DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route',
+                         'nvp_controller_connection']
 
 
 class NVPCluster(object):
-    """Encapsulates controller connection and api_client for a cluster.
+    """Encapsulates controller connections and the Api client for a
+    NVP cluster. Accessed within the NvpPluginV2 class.
 
-    Accessed within the NvpPluginV2 class.
-
-    Each element in the self.controllers list is a dictionary that
-    contains the following keys:
-        ip, port, user, password, default_tz_uuid, uuid, zone
-
-    There may be some redundancy here, but that has been done to provide
-    future flexibility.
+    Controller-specific parameters, such as timeouts are stored in the
+    elements of the controllers attribute, which are dicts.
     """
 
-    def __init__(self, name):
-        self._name = name
-        self.controllers = []
+    def __init__(self, **kwargs):
+        self._required_attributes = REQUIRED_ATTRIBUTES[:]
+        self._important_attributes = IMPORTANT_ATTRIBUTES[:]
+        self._deprecated_attributes = {}
+        self._sanity_check(kwargs)
+
+        for opt, val in self._deprecated_attributes.iteritems():
+            LOG.deprecated(_("Attribute '%s' has been deprecated or moved "
+                             "to a new section. See new configuration file "
+                             "for details."), opt)
+            depr_func = getattr(self, '_process_%s' % opt, None)
+            if depr_func:
+                depr_func(val)
+
+        # If everything went according to plan these two lists should be empty
+        if self._required_attributes:
+            raise exceptions.NvpInvalidClusterConfiguration(
+                invalid_attrs=self._required_attributes)
+        if self._important_attributes:
+            LOG.info(_("The following cluster attributes were "
+                       "not specified: %s'"), self._important_attributes)
+        # The API client will be explicitly created by users of this class
         self.api_client = None
 
-    def __repr__(self):
-        ss = ['{ "NVPCluster": [']
-        ss.append('{ "name" : "%s" }' % self.name)
-        ss.append(',')
-        for c in self.controllers:
-            ss.append(str(c))
-            ss.append(',')
-        ss.append('] }')
-        return ''.join(ss)
-
-    def add_controller(self, ip, port, user, password, request_timeout,
-                       http_timeout, retries, redirects, default_tz_uuid,
-                       uuid=None, zone=None, default_l3_gw_service_uuid=None,
-                       default_l2_gw_service_uuid=None,
-                       default_interface_name=None):
-        """Add a new set of controller parameters.
-
-        :param ip: IP address of controller.
-        :param port: port controller is listening on.
-        :param user: user name.
-        :param password: user password.
-        :param request_timeout: timeout for an entire API request.
-        :param http_timeout: timeout for a connect to a controller.
-        :param retries: maximum number of request retries.
-        :param redirects: maximum number of server redirect responses to
-            follow.
-        :param default_tz_uuid: default transport zone uuid.
-        :param uuid: UUID of this cluster (used in MDI configs).
-        :param zone: Zone of this cluster (used in MDI configs).
-        :param default_l3_gw_service_uuid: Default l3 gateway service
-        :param default_l2_gw_service_uuid: Default l2 gateway service
-        :param default_interface_name: Default interface name for l2 gateways
-        """
-
-        keys = ['ip', 'user', 'password', 'default_tz_uuid',
-                'default_l3_gw_service_uuid', 'default_l2_gw_service_uuid',
-                'default_interface_name', 'uuid', 'zone']
-        controller_dict = dict([(k, locals()[k]) for k in keys])
-        default_tz_uuid = controller_dict.get('default_tz_uuid')
-        if not re.match(attributes.UUID_PATTERN, default_tz_uuid):
-            LOG.warning(_("default_tz_uuid:%(default_tz_uuid)s is not a "
-                          "valid UUID in the cluster %(cluster_name)s. "
-                          "Network operations might not work "
-                          "properly in this cluster"),
-                        {'default_tz_uuid': default_tz_uuid,
-                         'cluster_name': self.name})
-        # default_l3_gw_service_uuid is an optional parameter
-        # validate only if specified
-        l3_gw_service_uuid = controller_dict.get('default_l3_gw_service_uuid')
-        if (l3_gw_service_uuid and
-            not re.match(attributes.UUID_PATTERN, l3_gw_service_uuid)):
-            LOG.warning(_("default_l3_gw_service_uuid:%(l3_gw_service_uuid)s "
-                          "is not a valid UUID in the cluster "
-                          "%(cluster_name)s. Logical router operations "
-                          "might not work properly in this cluster"),
-                        {'l3_gw_service_uuid': l3_gw_service_uuid,
-                         'cluster_name': self.name})
-        # default_l2_gw_node_uuid is an optional parameter
-        # validate only if specified
-        l2_gw_service_uuid = controller_dict.get('default_l2_gw_node_uuid')
-        if l2_gw_service_uuid and not re.match(attributes.UUID_PATTERN,
-                                               l2_gw_service_uuid):
-            LOG.warning(_("default_l2_gw_node_uuid:%(l2_gw_service_uuid)s "
-                          "is not a valid UUID in the cluster "
-                          "%(cluster_name)s."),
-                        {'l2_gw_service_uuid': l2_gw_service_uuid,
-                         'cluster_name': self.name})
-
-        int_keys = [
-            'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
-        for k in int_keys:
-            controller_dict[k] = int(locals()[k])
-
-        self.controllers.append(controller_dict)
-
-    def get_controller(self, idx):
-        return self.controllers[idx]
-
-    def get_num_controllers(self):
-        return len(self.controllers)
-
-    @property
-    def name(self):
-        return self._name
-
-    @name.setter
-    def name(self, val=None):
-        self._name = val
-
-    @property
-    def host(self):
-        return self.controllers[0]['ip']
-
-    @property
-    def port(self):
-        return self.controllers[0]['port']
-
-    @property
-    def user(self):
-        return self.controllers[0]['user']
-
-    @property
-    def password(self):
-        return self.controllers[0]['password']
-
-    @property
-    def request_timeout(self):
-        return self.controllers[0]['request_timeout']
-
-    @property
-    def http_timeout(self):
-        return self.controllers[0]['http_timeout']
-
-    @property
-    def retries(self):
-        return self.controllers[0]['retries']
-
-    @property
-    def redirects(self):
-        return self.controllers[0]['redirects']
-
-    @property
-    def default_tz_uuid(self):
-        return self.controllers[0]['default_tz_uuid']
-
-    @property
-    def default_l3_gw_service_uuid(self):
-        return self.controllers[0]['default_l3_gw_service_uuid']
-
-    @property
-    def default_l2_gw_service_uuid(self):
-        return self.controllers[0]['default_l2_gw_service_uuid']
-
-    @property
-    def default_interface_name(self):
-        return self.controllers[0]['default_interface_name']
-
-    @property
-    def zone(self):
-        return self.controllers[0]['zone']
-
-    @property
-    def uuid(self):
-        return self.controllers[0]['uuid']
+    def _sanity_check(self, options):
+        # Iterating this way ensures the conf parameters also
+        # define the structure of this class
+        for arg in cfg.CONF:
+            if arg not in DEPRECATED_ATTRIBUTES:
+                setattr(self, arg, options.get(arg, cfg.CONF.get(arg)))
+                self._process_attribute(arg)
+            elif options.get(arg) is not None:
+                # Process deprecated attributes only if specified
+                self._deprecated_attributes[arg] = options.get(arg)
+            if arg.startswith("CLUSTER:"):
+                cluster_section = cfg.CONF.get(arg)
+                for option in cluster_section:
+                    v = cluster_section.get(option)
+                    if option not in DEPRECATED_ATTRIBUTES:
+                        # option may be in dict, but with None value
+                        setattr(self, option, options.get(option) or v)
+                        self._process_attribute(option)
+                    else:
+                        self._deprecated_attributes[option] = v
+
+    def _process_attribute(self, attribute):
+        # Process the attribute only if it's not empty!
+        if getattr(self, attribute, None):
+            if attribute in self._required_attributes:
+                self._required_attributes.remove(attribute)
+            if attribute in self._important_attributes:
+                self._important_attributes.remove(attribute)
+            handler_func = getattr(self, '_process_%s' % attribute, None)
+            if handler_func:
+                handler_func()
+        else:
+            LOG.info(_("Attribute:%s is empty or null"), attribute)
+
+    def _process_nvp_controllers(self):
+        # If this raises something is not right, so let it bubble up
+        # TODO(salvatore-orlando): Also validate attribute here
+        for i, ctrl in enumerate(self.nvp_controllers or []):
+            if len(ctrl.split(':')) == 1:
+                self.nvp_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT)
+
+    def _process_nvp_controller_connection(self, connections):
+
+        def parse_conn_str(ip, port, user, password, req_timeout,
+                           http_timeout, retries, redirects):
+            # TODO(salvatore-orlando): Set the attributes only
+            # if correspondent non-deprecated options have been
+            # explicitly specified in the ini file
+            # TODO(salvatore-orlando): Validate data to avoid ugly ValueError
+            self.nvp_user = user
+            self._process_attribute('nvp_user')
+            self.nvp_password = password
+            self._process_attribute('nvp_password')
+            self.req_timeout = int(req_timeout)
+            self._process_attribute('req_timeout')
+            self.http_timeout = int(http_timeout)
+            self._process_attribute('http_timeout')
+            self.retries = int(retries)
+            self._process_attribute('retries')
+            self.redirects = int(redirects)
+            self._process_attribute('redirects')
+            try:
+                nvp_controllers = getattr(self, 'nvp_controllers')
+                nvp_controllers.append('%s:%s' % (ip, port))
+            except AttributeError:
+                self.nvp_controllers = ['%s:%s' % (ip, port)]
+                self._process_attribute('nvp_controllers')
+        for conn in connections:
+            parse_conn_str(*conn.split(':'))
diff --git a/quantum/tests/unit/nicira/etc/nvp.ini.basic.test b/quantum/tests/unit/nicira/etc/nvp.ini.basic.test
new file mode 100644 (file)
index 0000000..b9daa50
--- /dev/null
@@ -0,0 +1,5 @@
+[DEFAULT]
+default_tz_uuid = fake_tz_uuid
+nvp_controllers=fake_1,fake_2
+nvp_user=foo
+nvp_password=bar
diff --git a/quantum/tests/unit/nicira/etc/nvp.ini.full.test b/quantum/tests/unit/nicira/etc/nvp.ini.full.test
new file mode 100644 (file)
index 0000000..fdcdcb8
--- /dev/null
@@ -0,0 +1,14 @@
+[DEFAULT]
+default_tz_uuid = fake_tz_uuid
+nova_zone_id = whatever
+nvp_cluster_uuid = fake_cluster_uuid
+nvp_controllers = fake_1, fake_2
+nvp_user = foo
+nvp_password = bar
+default_l3_gw_service_uuid = whatever
+default_l2_gw_service_uuid = whatever
+default_interface_name = whatever
+req_timeout = 14
+http_timeout = 13
+redirects = 12
+retries = 11
diff --git a/quantum/tests/unit/nicira/etc/nvp.ini.grizzly.test b/quantum/tests/unit/nicira/etc/nvp.ini.grizzly.test
new file mode 100644 (file)
index 0000000..c853d8c
--- /dev/null
@@ -0,0 +1,11 @@
+[DEFAULT]
+metadata_dhcp_host_route = False
+
+[CLUSTER:fake]
+default_tz_uuid = fake_tz_uuid
+nova_zone_id = whatever
+nvp_cluster_uuid = fake_cluster_uuid
+nvp_controller_connection=fake_1:443:foo:bar:4:3:2:1
+nvp_controller_connection=fake_2:443:foo:bar:4:3:2:1
+default_l3_gw_service_uuid = whatever
+default_l2_gw_service_uuid = whatever
index d3b832309a2ee1daf5d53389bcf5bf65927e69f8..f59b71f45f1cfba99222dbdee14bffc7c373ca84 100644 (file)
@@ -1,9 +1,7 @@
 [DEFAULT]
-
-[CLUSTER:fake]
 default_tz_uuid = fake_tz_uuid
-nova_zone_id = whatever
-nvp_cluster_uuid = fake_cluster_uuid
-nvp_controller_connection=fake:443:admin:admin:30:10:2:2
+nvp_controllers=fake_1, fake_2
+nvp_user=foo
+nvp_password=bar
 default_l3_gw_service_uuid = whatever
 default_l2_gw_service_uuid = whatever
diff --git a/quantum/tests/unit/nicira/test_defaults.py b/quantum/tests/unit/nicira/test_defaults.py
deleted file mode 100644 (file)
index 7cabf86..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2013 Nicira Networks, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo.config import cfg
-
-from quantum.plugins.nicira.common import config  # noqa
-from quantum.tests import base
-
-
-class ConfigurationTest(base.BaseTestCase):
-
-    def test_defaults(self):
-        self.assertEqual(64, cfg.CONF.NVP.max_lp_per_bridged_ls)
-        self.assertEqual(256, cfg.CONF.NVP.max_lp_per_overlay_ls)
-        self.assertEqual(5, cfg.CONF.NVP.concurrent_connections)
index f7667974b481b34b3e97e398ac57e026eec6c8b7..506b1c1497ae24938e78bf319f2033b7800531a0 100644 (file)
@@ -127,7 +127,7 @@ class TestNiciraPortsV2(test_plugin.TestPortsV2, NiciraPluginV2TestCase):
                 with self.port(subnet=sub):
                     with self.port(subnet=sub):
                         plugin = manager.QuantumManager.get_plugin()
-                        ls = nvplib.get_lswitches(plugin.default_cluster,
+                        ls = nvplib.get_lswitches(plugin.cluster,
                                                   net['network']['id'])
                         self.assertEqual(len(ls), 2)
 
index 0895585f98f56c0db3277558314578eaa2ef8b58..8628aab350d69847cd3f48f0725f58b700b45d3b 100644 (file)
@@ -46,13 +46,13 @@ class NvplibTestCase(base.BaseTestCase):
             return self.fc.fake_request(*args, **kwargs)
 
         instance.return_value.request.side_effect = _fake_request
-        self.fake_cluster = nvp_cluster.NVPCluster('fake-cluster')
-        self.fake_cluster.add_controller('1.1.1.1', '999', 'foo', 'bar',
-                                         9, 9, 9, 9, _uuid())
+        self.fake_cluster = nvp_cluster.NVPCluster(
+            name='fake-cluster', nvp_controllers=['1.1.1.1:999'],
+            default_tz_uuid=_uuid(), nvp_user='foo', nvp_password='bar')
         self.fake_cluster.api_client = NvpApiClient.NVPApiHelper(
             ('1.1.1.1', '999', True),
-            self.fake_cluster.user, self.fake_cluster.password,
-            self.fake_cluster.request_timeout, self.fake_cluster.http_timeout,
+            self.fake_cluster.nvp_user, self.fake_cluster.nvp_password,
+            self.fake_cluster.req_timeout, self.fake_cluster.http_timeout,
             self.fake_cluster.retries, self.fake_cluster.redirects)
 
         super(NvplibTestCase, self).setUp()
diff --git a/quantum/tests/unit/nicira/test_nvpopts.py b/quantum/tests/unit/nicira/test_nvpopts.py
new file mode 100644 (file)
index 0000000..2f6fc68
--- /dev/null
@@ -0,0 +1,164 @@
+# Copyright 2013 Nicira Networks, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+import fixtures
+import os
+import testtools
+
+from oslo.config import cfg
+
+from quantum.common import config as q_config
+from quantum.manager import QuantumManager
+from quantum.openstack.common import uuidutils
+from quantum.plugins.nicira.common import config  # noqa
+from quantum.plugins.nicira.common import exceptions
+from quantum.plugins.nicira import nvp_cluster
+
+BASE_CONF_PATH = os.path.join(os.path.dirname(__file__),
+                              '../../etc/quantum.conf.test')
+NVP_INI_PATH = os.path.join(os.path.dirname(__file__),
+                            'etc/nvp.ini.basic.test')
+NVP_INI_FULL_PATH = os.path.join(os.path.dirname(__file__),
+                                 'etc/nvp.ini.full.test')
+NVP_INI_DEPR_PATH = os.path.join(os.path.dirname(__file__),
+                                 'etc/nvp.ini.grizzly.test')
+NVP_PLUGIN_PATH = ('quantum.plugins.nicira.nicira_nvp_plugin.'
+                   'QuantumPlugin.NvpPluginV2')
+
+
+class NVPClusterTest(testtools.TestCase):
+
+    cluster_opts = {'default_tz_uuid': uuidutils.generate_uuid(),
+                    'default_l2_gw_service_uuid': uuidutils.generate_uuid(),
+                    'default_l2_gw_service_uuid': uuidutils.generate_uuid(),
+                    'nvp_cluster_uuid': uuidutils.generate_uuid(),
+                    'nvp_user': 'foo',
+                    'nvp_password': 'bar',
+                    'req_timeout': 45,
+                    'http_timeout': 25,
+                    'retries': 7,
+                    'redirects': 23,
+                    'default_interface_name': 'baz',
+                    'nvp_controllers': ['1.1.1.1:443']}
+
+    def setUp(self):
+        super(NVPClusterTest, self).setUp()
+        self.addCleanup(cfg.CONF.reset)
+
+    def test_create_cluster(self):
+        cluster = nvp_cluster.NVPCluster(**self.cluster_opts)
+        for (k, v) in self.cluster_opts.iteritems():
+            self.assertEqual(v, getattr(cluster, k))
+
+    def test_create_cluster_default_port(self):
+        opts = self.cluster_opts.copy()
+        opts['nvp_controllers'] = ['1.1.1.1']
+        cluster = nvp_cluster.NVPCluster(**opts)
+        for (k, v) in self.cluster_opts.iteritems():
+            self.assertEqual(v, getattr(cluster, k))
+
+    def test_create_cluster_missing_required_attribute_raises(self):
+        opts = self.cluster_opts.copy()
+        opts.pop('default_tz_uuid')
+        self.assertRaises(exceptions.NvpInvalidClusterConfiguration,
+                          nvp_cluster.NVPCluster, **opts)
+
+
+class ConfigurationTest(testtools.TestCase):
+
+    def setUp(self):
+        super(ConfigurationTest, self).setUp()
+        self.addCleanup(cfg.CONF.reset)
+        self.useFixture(fixtures.MonkeyPatch(
+                        'quantum.manager.QuantumManager._instance',
+                        None))
+
+    def _assert_required_options(self, cluster):
+        self.assertEqual(cluster.nvp_controllers, ['fake_1:443', 'fake_2:443'])
+        self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
+        self.assertEqual(cluster.nvp_user, 'foo')
+        self.assertEqual(cluster.nvp_password, 'bar')
+
+    def _assert_extra_options(self, cluster):
+        self.assertEqual(14, cluster.req_timeout)
+        self.assertEqual(13, cluster.http_timeout)
+        self.assertEqual(12, cluster.redirects)
+        self.assertEqual(11, cluster.retries)
+        self.assertEqual('whatever', cluster.default_l2_gw_service_uuid)
+        self.assertEqual('whatever', cluster.default_l3_gw_service_uuid)
+        self.assertEqual('whatever', cluster.default_interface_name)
+
+    def test_load_plugin_with_full_options(self):
+        q_config.parse(['--config-file', BASE_CONF_PATH,
+                        '--config-file', NVP_INI_FULL_PATH])
+        cfg.CONF.set_override('core_plugin', NVP_PLUGIN_PATH)
+        plugin = QuantumManager().get_plugin()
+        cluster = plugin.cluster
+        self._assert_required_options(cluster)
+        self._assert_extra_options(cluster)
+
+    def test_load_plugin_with_required_options_only(self):
+        q_config.parse(['--config-file', BASE_CONF_PATH,
+                        '--config-file', NVP_INI_PATH])
+        cfg.CONF.set_override('core_plugin', NVP_PLUGIN_PATH)
+        plugin = QuantumManager().get_plugin()
+        self._assert_required_options(plugin.cluster)
+
+    def test_defaults(self):
+        self.assertEqual(64, cfg.CONF.NVP.max_lp_per_bridged_ls)
+        self.assertEqual(256, cfg.CONF.NVP.max_lp_per_overlay_ls)
+        self.assertEqual(5, cfg.CONF.NVP.concurrent_connections)
+
+        self.assertIsNone(cfg.CONF.default_tz_uuid)
+        self.assertIsNone(cfg.CONF.nvp_cluster_uuid)
+        self.assertEqual('admin', cfg.CONF.nvp_user)
+        self.assertEqual('admin', cfg.CONF.nvp_password)
+        self.assertEqual(30, cfg.CONF.req_timeout)
+        self.assertEqual(10, cfg.CONF.http_timeout)
+        self.assertEqual(2, cfg.CONF.retries)
+        self.assertEqual(2, cfg.CONF.redirects)
+        self.assertIsNone(cfg.CONF.nvp_controllers)
+        self.assertIsNone(cfg.CONF.default_l3_gw_service_uuid)
+        self.assertIsNone(cfg.CONF.default_l2_gw_service_uuid)
+        self.assertEqual('breth0', cfg.CONF.default_interface_name)
+
+
+class OldConfigurationTest(testtools.TestCase):
+
+    def setUp(self):
+        super(OldConfigurationTest, self).setUp()
+        self.addCleanup(cfg.CONF.reset)
+        self.useFixture(fixtures.MonkeyPatch(
+                        'quantum.manager.QuantumManager._instance',
+                        None))
+
+    def _assert_required_options(self, cluster):
+        self.assertEqual(cluster.nvp_controllers, ['fake_1:443', 'fake_2:443'])
+        self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
+        self.assertEqual(cluster.nvp_user, 'foo')
+        self.assertEqual(cluster.nvp_password, 'bar')
+
+    def test_load_plugin_with_deprecated_options(self):
+        q_config.parse(['--config-file', BASE_CONF_PATH,
+                        '--config-file', NVP_INI_DEPR_PATH])
+        cfg.CONF.set_override('core_plugin', NVP_PLUGIN_PATH)
+        plugin = QuantumManager().get_plugin()
+        cluster = plugin.cluster
+        self._assert_required_options(cluster)
+        # Verify nvp_controller_connection has been fully parsed
+        self.assertEqual(4, cluster.req_timeout)
+        self.assertEqual(3, cluster.http_timeout)
+        self.assertEqual(2, cluster.retries)
+        self.assertEqual(1, cluster.redirects)