]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Add multiple provider network extension
authorAaron Rosen <arosen@nicira.com>
Wed, 19 Jun 2013 23:09:05 +0000 (16:09 -0700)
committerAaron Rosen <arosen@nicira.com>
Tue, 20 Aug 2013 17:30:08 +0000 (10:30 -0700)
The following commit adds the ability to associate multiple
different provider networks on a single network.

Implements blueprint map-networks-to-multiple-provider-networks

Change-Id: I3c70fb2426899f728a401566debab7f66e7246bc

etc/policy.json
neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py [new file with mode: 0644]
neutron/extensions/multiprovidernet.py [new file with mode: 0644]
neutron/plugins/nicira/NeutronPlugin.py
neutron/plugins/nicira/dbexts/nicira_db.py
neutron/plugins/nicira/dbexts/nicira_models.py
neutron/plugins/nicira/nvplib.py
neutron/tests/unit/nicira/test_nicira_plugin.py
neutron/tests/unit/nicira/test_nvplib.py

index 403cd0201a59e8e939f89510cb99a23d6004b755..6310e2b136ffbf2d3741e4d5bf2f75c733a0227c 100644 (file)
     "create_network": "",
     "get_network": "rule:admin_or_owner or rule:shared or rule:external",
     "get_network:router:external": "rule:regular_user",
+    "get_network:segments": "rule:admin_only",
     "get_network:provider:network_type": "rule:admin_only",
     "get_network:provider:physical_network": "rule:admin_only",
     "get_network:provider:segmentation_id": "rule:admin_only",
     "get_network:queue_id": "rule:admin_only",
     "create_network:shared": "rule:admin_only",
     "create_network:router:external": "rule:admin_only",
+    "create_network:segments": "rule:admin_only",
     "create_network:provider:network_type": "rule:admin_only",
     "create_network:provider:physical_network": "rule:admin_only",
     "create_network:provider:segmentation_id": "rule:admin_only",
     "update_network": "rule:admin_or_owner",
+    "update_network:segments": "rule:admin_only",
     "update_network:provider:network_type": "rule:admin_only",
     "update_network:provider:physical_network": "rule:admin_only",
     "update_network:provider:segmentation_id": "rule:admin_only",
diff --git a/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py b/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py
new file mode 100644 (file)
index 0000000..72e7200
--- /dev/null
@@ -0,0 +1,102 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""add multiprovider
+
+Revision ID: 3c6e57a23db4
+Revises: 86cf4d88bd3
+Create Date: 2013-07-10 12:43:35.769283
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3c6e57a23db4'
+down_revision = '86cf4d88bd3'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+    'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+from neutron.db import migration
+
+
+def upgrade(active_plugins=None, options=None):
+    if not migration.should_run(active_plugins, migration_for_plugins):
+        return
+
+    op.create_table(
+        'nvp_multi_provider_networks',
+        sa.Column('network_id', sa.String(length=36), nullable=False),
+        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
+                                ondelete='CASCADE'),
+        sa.PrimaryKeyConstraint('network_id'),
+        mysql_engine='InnoDB'
+    )
+    op.create_table('rename_nvp_network_bindings',
+                    sa.Column('network_id', sa.String(length=36),
+                              primary_key=True),
+                    sa.Column('binding_type',
+                              sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
+                                      name=(
+                                      'nvp_network_bindings_binding_type')),
+                              nullable=False, primary_key=True),
+                    sa.Column('phy_uuid', sa.String(36), primary_key=True,
+                              nullable=True),
+                    sa.Column('vlan_id', sa.Integer, primary_key=True,
+                              nullable=True, autoincrement=False))
+    # copy data from nvp_network_bindings into rename_nvp_network_bindings
+    op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, "
+               "binding_type, phy_uuid, vlan_id from nvp_network_bindings")
+
+    op.drop_table('nvp_network_bindings')
+    op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings')
+
+
+def downgrade(active_plugins=None, options=None):
+    if not migration.should_run(active_plugins, migration_for_plugins):
+        return
+
+    # Delete the multi_provider_network entries from nvp_network_bindings
+    op.execute("DELETE from nvp_network_bindings WHERE network_id IN "
+               "(SELECT network_id from nvp_multi_provider_networks)")
+
+    # create table with previous contains
+    op.create_table('rename_nvp_network_bindings',
+                    sa.Column('network_id', sa.String(length=36),
+                              primary_key=True),
+                    sa.Column('binding_type',
+                              sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
+                                      name=(
+                                      'nvp_network_bindings_binding_type')),
+                              nullable=False),
+                    sa.Column('phy_uuid', sa.String(36),
+                              nullable=True),
+                    sa.Column('vlan_id', sa.Integer,
+                              nullable=True, autoincrement=False))
+
+    # copy data from nvp_network_bindings into rename_nvp_network_bindings
+    op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, "
+               "binding_type, phy_uuid, vlan_id from nvp_network_bindings")
+
+    op.drop_table('nvp_network_bindings')
+    op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings')
+    op.drop_table('nvp_multi_provider_networks')
diff --git a/neutron/extensions/multiprovidernet.py b/neutron/extensions/multiprovidernet.py
new file mode 100644 (file)
index 0000000..3ed3f69
--- /dev/null
@@ -0,0 +1,116 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 OpenStack Foundation.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import webob.exc
+
+from neutron.api import extensions
+from neutron.api.v2 import attributes as attr
+from neutron.common import exceptions as qexception
+from neutron.extensions import providernet as pnet
+
+SEGMENTS = 'segments'
+
+
+class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput):
+    message = _("Segments and provider values cannot both be set.")
+
+
+class SegmentsContainDuplicateEntry(qexception.InvalidInput):
+    message = _("Duplicate segment entry in request.")
+
+
+def _convert_and_validate_segments(segments, valid_values=None):
+    unique = set()
+    for segment in segments:
+        unique.add(tuple(segment.iteritems()))
+        network_type = segment.get(pnet.NETWORK_TYPE,
+                                   attr.ATTR_NOT_SPECIFIED)
+        segment[pnet.NETWORK_TYPE] = network_type
+        physical_network = segment.get(pnet.PHYSICAL_NETWORK,
+                                       attr.ATTR_NOT_SPECIFIED)
+        segment[pnet.PHYSICAL_NETWORK] = physical_network
+        segmentation_id = segment.get(pnet.SEGMENTATION_ID)
+        if segmentation_id:
+            segment[pnet.SEGMENTATION_ID] = attr.convert_to_int(
+                segmentation_id)
+        else:
+            segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED
+        if len(segment.keys()) != 3:
+            msg = (_("Unrecognized attribute(s) '%s'") %
+                   ', '.join(set(segment.keys()) -
+                             set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                                  pnet.SEGMENTATION_ID])))
+            raise webob.exc.HTTPBadRequest(msg)
+    if len(unique) != len(segments):
+        raise SegmentsContainDuplicateEntry()
+
+
+attr.validators['type:convert_segments'] = (
+    _convert_and_validate_segments)
+
+
+EXTENDED_ATTRIBUTES_2_0 = {
+    'networks': {
+        SEGMENTS: {'allow_post': True, 'allow_put': True,
+                   'validate': {'type:convert_segments': None},
+                   'convert_list_to': attr.convert_kvp_list_to_dict,
+                   'default': attr.ATTR_NOT_SPECIFIED,
+                   'enforce_policy': True,
+                   'is_visible': True},
+    }
+}
+
+
+class Multiprovidernet(extensions.ExtensionDescriptor):
+    """Extension class supporting multiple provider networks.
+
+    This class is used by neutron's extension framework to make
+    metadata about the multiple provider network extension available to
+    clients. No new resources are defined by this extension. Instead,
+    the existing network resource's request and response messages are
+    extended with attributes in the provider namespace.
+
+    With admin rights, network dictionaries returned will also include
+    provider attributes.
+    """
+
+    @classmethod
+    def get_name(cls):
+        return "Multi Provider Network"
+
+    @classmethod
+    def get_alias(cls):
+        return "multi-provider"
+
+    @classmethod
+    def get_description(cls):
+        return ("Expose mapping of virtual networks to multiple physical "
+                "networks")
+
+    @classmethod
+    def get_namespace(cls):
+        return "http://docs.openstack.org/ext/multi-provider/api/v1.0"
+
+    @classmethod
+    def get_updated(cls):
+        return "2013-06-27T10:00:00-00:00"
+
+    def get_extended_resources(self, version):
+        if version == "2.0":
+            return EXTENDED_ATTRIBUTES_2_0
+        else:
+            return {}
index 8e9afae00cd63a3a7354e51f0897e3ee5567afd7..a08ccbcb307c6f06ab713ae7cf1f0fdb733479c2 100644 (file)
@@ -52,6 +52,7 @@ from neutron.db import quota_db  # noqa
 from neutron.db import securitygroups_db
 from neutron.extensions import extraroute
 from neutron.extensions import l3
+from neutron.extensions import multiprovidernet as mpnet
 from neutron.extensions import portbindings as pbin
 from neutron.extensions import portsecurity as psec
 from neutron.extensions import providernet as pnet
@@ -91,6 +92,7 @@ class NetworkTypes:
     GRE = 'gre'
     FLAT = 'flat'
     VLAN = 'vlan'
+    BRIDGE = 'bridge'
 
 
 def create_nvp_cluster(cluster_opts, concurrent_connections,
@@ -153,6 +155,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                                    "ext-gw-mode",
                                    "extraroute",
                                    "mac-learning",
+                                   "multi-provider",
                                    "network-gateway",
                                    "nvp-qos",
                                    "port-security",
@@ -401,18 +404,19 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
 
     def _nvp_find_lswitch_for_port(self, context, port_data):
         network = self._get_network(context, port_data['network_id'])
-        network_binding = nicira_db.get_network_binding(
+        network_bindings = nicira_db.get_network_bindings(
             context.session, port_data['network_id'])
         max_ports = self.nvp_opts.max_lp_per_overlay_ls
         allow_extra_lswitches = False
-        if (network_binding and
-            network_binding.binding_type in (NetworkTypes.FLAT,
-                                             NetworkTypes.VLAN)):
-            max_ports = self.nvp_opts.max_lp_per_bridged_ls
-            allow_extra_lswitches = True
+        for network_binding in network_bindings:
+            if network_binding.binding_type in (NetworkTypes.FLAT,
+                                                NetworkTypes.VLAN):
+                max_ports = self.nvp_opts.max_lp_per_bridged_ls
+                allow_extra_lswitches = True
+                break
         try:
             return self._handle_lswitch_selection(self.cluster, network,
-                                                  network_binding, max_ports,
+                                                  network_bindings, max_ports,
                                                   allow_extra_lswitches)
         except NvpApiClient.NvpApiException:
             err_desc = _("An exception occured while selecting logical "
@@ -761,76 +765,89 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                                nvp_exc.MaintenanceInProgress:
                                webob.exc.HTTPServiceUnavailable})
 
-    def _handle_provider_create(self, context, attrs):
-        # NOTE(salvatore-orlando): This method has been borrowed from
-        # the OpenvSwtich plugin, altough changed to match NVP specifics.
-        network_type = attrs.get(pnet.NETWORK_TYPE)
-        physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
-        segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
-        network_type_set = attr.is_attr_set(network_type)
-        physical_network_set = attr.is_attr_set(physical_network)
-        segmentation_id_set = attr.is_attr_set(segmentation_id)
-        if not (network_type_set or physical_network_set or
-                segmentation_id_set):
+    def _validate_provider_create(self, context, network):
+        if not attr.is_attr_set(network.get(mpnet.SEGMENTS)):
             return
 
-        err_msg = None
-        if not network_type_set:
-            err_msg = _("%s required") % pnet.NETWORK_TYPE
-        elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
-                              NetworkTypes.FLAT):
-            if segmentation_id_set:
-                err_msg = _("Segmentation ID cannot be specified with "
-                            "flat network type")
-        elif network_type == NetworkTypes.VLAN:
-            if not segmentation_id_set:
-                err_msg = _("Segmentation ID must be specified with "
-                            "vlan network type")
-            elif (segmentation_id_set and
-                  not utils.is_valid_vlan_tag(segmentation_id)):
-                err_msg = (_("%(segmentation_id)s out of range "
-                             "(%(min_id)s through %(max_id)s)") %
-                           {'segmentation_id': segmentation_id,
-                            'min_id': constants.MIN_VLAN_TAG,
-                            'max_id': constants.MAX_VLAN_TAG})
+        for segment in network[mpnet.SEGMENTS]:
+            network_type = segment.get(pnet.NETWORK_TYPE)
+            physical_network = segment.get(pnet.PHYSICAL_NETWORK)
+            segmentation_id = segment.get(pnet.SEGMENTATION_ID)
+            network_type_set = attr.is_attr_set(network_type)
+            segmentation_id_set = attr.is_attr_set(segmentation_id)
+
+            err_msg = None
+            if not network_type_set:
+                err_msg = _("%s required") % pnet.NETWORK_TYPE
+            elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
+                                  NetworkTypes.FLAT):
+                if segmentation_id_set:
+                    err_msg = _("Segmentation ID cannot be specified with "
+                                "flat network type")
+            elif network_type == NetworkTypes.VLAN:
+                if not segmentation_id_set:
+                    err_msg = _("Segmentation ID must be specified with "
+                                "vlan network type")
+                elif (segmentation_id_set and
+                      not utils.is_valid_vlan_tag(segmentation_id)):
+                    err_msg = (_("%(segmentation_id)s out of range "
+                                 "(%(min_id)s through %(max_id)s)") %
+                               {'segmentation_id': segmentation_id,
+                                'min_id': constants.MIN_VLAN_TAG,
+                                'max_id': constants.MAX_VLAN_TAG})
+                else:
+                    # Verify segment is not already allocated
+                    bindings = nicira_db.get_network_bindings_by_vlanid(
+                        context.session, segmentation_id)
+                    if bindings:
+                        raise q_exc.VlanIdInUse(
+                            vlan_id=segmentation_id,
+                            physical_network=physical_network)
+            elif network_type == NetworkTypes.L3_EXT:
+                if (segmentation_id_set and
+                    not utils.is_valid_vlan_tag(segmentation_id)):
+                    err_msg = (_("%(segmentation_id)s out of range "
+                                 "(%(min_id)s through %(max_id)s)") %
+                               {'segmentation_id': segmentation_id,
+                                'min_id': constants.MIN_VLAN_TAG,
+                                'max_id': constants.MAX_VLAN_TAG})
             else:
-                # Verify segment is not already allocated
-                binding = nicira_db.get_network_binding_by_vlanid(
-                    context.session, segmentation_id)
-                if binding:
-                    raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
-                                            physical_network=physical_network)
-        elif network_type == NetworkTypes.L3_EXT:
-            if (segmentation_id_set and
-                not utils.is_valid_vlan_tag(segmentation_id)):
-                err_msg = (_("%(segmentation_id)s out of range "
-                             "(%(min_id)s through %(max_id)s)") %
-                           {'segmentation_id': segmentation_id,
-                            'min_id': constants.MIN_VLAN_TAG,
-                            'max_id': constants.MAX_VLAN_TAG})
-        else:
-            err_msg = _("%(net_type_param)s %(net_type_value)s not "
-                        "supported") % {'net_type_param': pnet.NETWORK_TYPE,
-                                        'net_type_value': network_type}
-        if err_msg:
-            raise q_exc.InvalidInput(error_message=err_msg)
-        # TODO(salvatore-orlando): Validate tranport zone uuid
-        # which should be specified in physical_network
-
-    def _extend_network_dict_provider(self, context, network, binding=None):
-        if not binding:
-            binding = nicira_db.get_network_binding(context.session,
-                                                    network['id'])
+                err_msg = (_("%(net_type_param)s %(net_type_value)s not "
+                             "supported") %
+                           {'net_type_param': pnet.NETWORK_TYPE,
+                            'net_type_value': network_type})
+            if err_msg:
+                raise q_exc.InvalidInput(error_message=err_msg)
+            # TODO(salvatore-orlando): Validate tranport zone uuid
+            # which should be specified in physical_network
+
+    def _extend_network_dict_provider(self, context, network,
+                                      multiprovider=None, bindings=None):
+        if not bindings:
+            bindings = nicira_db.get_network_bindings(context.session,
+                                                      network['id'])
+        if not multiprovider:
+            multiprovider = nicira_db.is_multiprovider_network(context.session,
+                                                               network['id'])
         # With NVP plugin 'normal' overlay networks will have no binding
         # TODO(salvatore-orlando) make sure users can specify a distinct
         # phy_uuid as 'provider network' for STT net type
-        if binding:
-            network[pnet.NETWORK_TYPE] = binding.binding_type
-            network[pnet.PHYSICAL_NETWORK] = binding.phy_uuid
-            network[pnet.SEGMENTATION_ID] = binding.vlan_id
+        if bindings:
+            if not multiprovider:
+                # network came in through provider networks api
+                network[pnet.NETWORK_TYPE] = bindings[0].binding_type
+                network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
+                network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
+            else:
+                # network come in though multiprovider networks api
+                network[mpnet.SEGMENTS] = [
+                    {pnet.NETWORK_TYPE: binding.binding_type,
+                     pnet.PHYSICAL_NETWORK: binding.phy_uuid,
+                     pnet.SEGMENTATION_ID: binding.vlan_id}
+                    for binding in bindings]
 
     def _handle_lswitch_selection(self, cluster, network,
-                                  network_binding, max_ports,
+                                  network_bindings, max_ports,
                                   allow_extra_lswitches):
         lswitches = nvplib.get_lswitches(cluster, network.id)
         try:
@@ -853,12 +870,12 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                                       main_ls[0]['display_name'],
                                       network['tenant_id'],
                                       tags=tags)
+            transport_zone_config = self._convert_to_nvp_transport_zones(
+                cluster, bindings=network_bindings)
             selected_lswitch = nvplib.create_lswitch(
                 cluster, network.tenant_id,
                 "%s-ext-%s" % (network.name, len(lswitches)),
-                network_binding.binding_type,
-                network_binding.phy_uuid,
-                network_binding.vlan_id,
+                transport_zone_config,
                 network.id)
             return selected_lswitch
         else:
@@ -878,12 +895,86 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         # Consume from all consumers in a thread
         self.conn.consume_in_thread()
 
+    def _convert_to_nvp_transport_zones(self, cluster, network=None,
+                                        bindings=None):
+        nvp_transport_zones_config = []
+
+        # Convert fields from provider request to nvp format
+        if (network and not attr.is_attr_set(
+            network.get(mpnet.SEGMENTS))):
+            return [{"zone_uuid": cluster.default_tz_uuid,
+                     "transport_type": cfg.CONF.NVP.default_transport_type}]
+
+        # Convert fields from db to nvp format
+        if bindings:
+            transport_entry = {}
+            for binding in bindings:
+                if binding.binding_type in [NetworkTypes.FLAT,
+                                            NetworkTypes.VLAN]:
+                    transport_entry['transport_type'] = NetworkTypes.BRIDGE
+                    transport_entry['binding_config'] = {}
+                    vlan_id = binding.vlan_id
+                    if vlan_id:
+                        transport_entry['binding_config'] = (
+                            {'vlan_translation': [{'transport': vlan_id}]})
+                else:
+                    transport_entry['transport_type'] = binding.binding_type
+                transport_entry['zone_uuid'] = binding.phy_uuid
+                nvp_transport_zones_config.append(transport_entry)
+            return nvp_transport_zones_config
+
+        for transport_zone in network.get(mpnet.SEGMENTS):
+            for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                          pnet.SEGMENTATION_ID]:
+                if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED:
+                    transport_zone[value] = None
+
+            transport_entry = {}
+            transport_type = transport_zone.get(pnet.NETWORK_TYPE)
+            if transport_type in [NetworkTypes.FLAT, NetworkTypes.VLAN]:
+                transport_entry['transport_type'] = NetworkTypes.BRIDGE
+                transport_entry['binding_config'] = {}
+                vlan_id = transport_zone.get(pnet.SEGMENTATION_ID)
+                if vlan_id:
+                    transport_entry['binding_config'] = (
+                        {'vlan_translation': [{'transport': vlan_id}]})
+            else:
+                transport_entry['transport_type'] = transport_type
+            transport_entry['zone_uuid'] = (
+                transport_zone[pnet.PHYSICAL_NETWORK] or
+                cluster.deafult_tz_uuid)
+            nvp_transport_zones_config.append(transport_entry)
+        return nvp_transport_zones_config
+
+    def _convert_to_transport_zones_dict(self, network):
+        """Converts the provider request body to multiprovider.
+        Returns: True if request is multiprovider False if provider
+        and None if neither.
+        """
+        if any(attr.is_attr_set(network.get(f))
+               for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                         pnet.SEGMENTATION_ID)):
+            if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
+                raise mpnet.SegmentsSetInConjunctionWithProviders()
+            # convert to transport zone list
+            network[mpnet.SEGMENTS] = [
+                {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
+                 pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
+                 pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
+            del network[pnet.NETWORK_TYPE]
+            del network[pnet.PHYSICAL_NETWORK]
+            del network[pnet.SEGMENTATION_ID]
+            return False
+        if attr.is_attr_set(mpnet.SEGMENTS):
+            return True
+
     def create_network(self, context, network):
         net_data = network['network']
         tenant_id = self._get_tenant_id_for_create(context, net_data)
         self._ensure_default_security_group(context, tenant_id)
         # Process the provider network extension
-        self._handle_provider_create(context, net_data)
+        provider_type = self._convert_to_transport_zones_dict(net_data)
+        self._validate_provider_create(context, net_data)
         # Replace ATTR_NOT_SPECIFIED with None before sending to NVP
         for key, value in network['network'].iteritems():
             if value is attr.ATTR_NOT_SPECIFIED:
@@ -893,16 +984,14 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             LOG.warning(_("Network with admin_state_up=False are not yet "
                           "supported by this plugin. Ignoring setting for "
                           "network %s"), net_data.get('name', '<unknown>'))
+        transport_zone_config = self._convert_to_nvp_transport_zones(
+            self.cluster, net_data)
         external = net_data.get(l3.EXTERNAL)
         if (not attr.is_attr_set(external) or
             attr.is_attr_set(external) and not external):
-            nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
-            if nvp_binding_type in ('flat', 'vlan'):
-                nvp_binding_type = 'bridge'
             lswitch = nvplib.create_lswitch(
                 self.cluster, tenant_id, net_data.get('name'),
-                nvp_binding_type, net_data.get(pnet.PHYSICAL_NETWORK),
-                net_data.get(pnet.SEGMENTATION_ID),
+                transport_zone_config,
                 shared=net_data.get(attr.SHARED))
             net_data['id'] = lswitch['uuid']
 
@@ -924,14 +1013,21 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                 self._process_network_queue_mapping(context, new_net)
                 self._extend_network_qos_queue(context, new_net)
 
-            if net_data.get(pnet.NETWORK_TYPE):
-                net_binding = nicira_db.add_network_binding(
-                    context.session, new_net['id'],
-                    net_data.get(pnet.NETWORK_TYPE),
-                    net_data.get(pnet.PHYSICAL_NETWORK),
-                    net_data.get(pnet.SEGMENTATION_ID, 0))
+            if (net_data.get(mpnet.SEGMENTS) and
+                isinstance(provider_type, bool)):
+                net_bindings = []
+                for tz in net_data[mpnet.SEGMENTS]:
+                    net_bindings.append(nicira_db.add_network_binding(
+                        context.session, new_net['id'],
+                        tz.get(pnet.NETWORK_TYPE),
+                        tz.get(pnet.PHYSICAL_NETWORK),
+                        tz.get(pnet.SEGMENTATION_ID, 0)))
+                if provider_type:
+                    nicira_db.set_multiprovider_network(context.session,
+                                                        new_net['id'])
                 self._extend_network_dict_provider(context, new_net,
-                                                   net_binding)
+                                                   provider_type,
+                                                   net_bindings)
         self.schedule_network(context, new_net)
         return new_net
 
index 68f165c41fe0835fd376853626e67d29258d6638..ab4bf2e55c771b7dc5b83aa9b6b9212dfd60be11 100644 (file)
@@ -25,26 +25,18 @@ from neutron.plugins.nicira.dbexts import nicira_networkgw_db
 LOG = logging.getLogger(__name__)
 
 
-def get_network_binding(session, network_id):
+def get_network_bindings(session, network_id):
     session = session or db.get_session()
-    try:
-        binding = (session.query(nicira_models.NvpNetworkBinding).
-                   filter_by(network_id=network_id).
-                   one())
-        return binding
-    except exc.NoResultFound:
-        return
+    return (session.query(nicira_models.NvpNetworkBinding).
+            filter_by(network_id=network_id).
+            all())
 
 
-def get_network_binding_by_vlanid(session, vlan_id):
+def get_network_bindings_by_vlanid(session, vlan_id):
     session = session or db.get_session()
-    try:
-        binding = (session.query(nicira_models.NvpNetworkBinding).
-                   filter_by(vlan_id=vlan_id).
-                   one())
-        return binding
-    except exc.NoResultFound:
-        return
+    return (session.query(nicira_models.NvpNetworkBinding).
+            filter_by(vlan_id=vlan_id).
+            all())
 
 
 def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
@@ -88,3 +80,18 @@ def set_default_network_gateway(session, gw_id):
         gw = (session.query(nicira_networkgw_db.NetworkGateway).
               filter_by(id=gw_id).one())
         gw['default'] = True
+
+
+def set_multiprovider_network(session, network_id):
+    with session.begin(subtransactions=True):
+        multiprovider_network = nicira_models.MultiProviderNetworks(
+            network_id)
+        session.add(multiprovider_network)
+        return multiprovider_network
+
+
+def is_multiprovider_network(session, network_id):
+    with session.begin(subtransactions=True):
+        return bool(
+            session.query(nicira_models.MultiProviderNetworks).filter_by(
+                network_id=network_id).first())
index 86bf5213b988a3dde57dac6928f2d57732969573..976f673a872487a898e6cd06d4fa055adfde6cc5 100644 (file)
@@ -29,15 +29,18 @@ class NvpNetworkBinding(model_base.BASEV2):
     """
     __tablename__ = 'nvp_network_bindings'
 
+    # TODO(arosen) - it might be worth while refactoring the how this data
+    # is stored later so every column does not need to be a primary key.
     network_id = Column(String(36),
                         ForeignKey('networks.id', ondelete="CASCADE"),
                         primary_key=True)
     # 'flat', 'vlan', stt' or 'gre'
     binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
                                name='nvp_network_bindings_binding_type'),
-                          nullable=False)
-    phy_uuid = Column(String(36))
-    vlan_id = Column(Integer)
+                          nullable=False, primary_key=True)
+    phy_uuid = Column(String(36), primary_key=True, nullable=True)
+    vlan_id = Column(Integer, primary_key=True, nullable=True,
+                     autoincrement=False)
 
     def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
         self.network_id = network_id
@@ -64,3 +67,15 @@ class NeutronNvpPortMapping(model_base.BASEV2):
     def __init__(self, quantum_id, nvp_id):
         self.quantum_id = quantum_id
         self.nvp_id = nvp_id
+
+
+class MultiProviderNetworks(model_base.BASEV2):
+    """Networks that were provision through multiprovider extension."""
+
+    __tablename__ = 'nvp_multi_provider_networks'
+    network_id = Column(String(36),
+                        ForeignKey('networks.id', ondelete="CASCADE"),
+                        primary_key=True)
+
+    def __init__(self, network_id):
+        self.network_id = network_id
index 0fa1e2aedcab9b7f7bf057b4292189c469f8c950..6ad816b133c7f9c8d52deb616a66a7acd8e8a028 100644 (file)
@@ -24,8 +24,6 @@ import hashlib
 import inspect
 import json
 
-from oslo.config import cfg
-
 #FIXME(danwent): I'd like this file to get to the point where it has
 # no neutron-specific logic in it
 from neutron.common import constants
@@ -217,27 +215,14 @@ def get_lswitches(cluster, neutron_net_id):
 
 
 def create_lswitch(cluster, tenant_id, display_name,
-                   transport_type=None,
-                   transport_zone_uuid=None,
-                   vlan_id=None,
+                   transport_zones_config,
                    neutron_net_id=None,
                    shared=None,
                    **kwargs):
-    nvp_binding_type = transport_type
-    if transport_type in ('flat', 'vlan'):
-        nvp_binding_type = 'bridge'
-    transport_zone_config = (
-        {"zone_uuid": (transport_zone_uuid or
-                       cluster.default_tz_uuid),
-         "transport_type": (nvp_binding_type or
-                            cfg.CONF.NVP.default_transport_type)})
     lswitch_obj = {"display_name": _check_and_truncate_name(display_name),
-                   "transport_zones": [transport_zone_config],
+                   "transport_zones": transport_zones_config,
                    "tags": [{"tag": tenant_id, "scope": "os_tid"},
                             {"tag": NEUTRON_VERSION, "scope": "quantum"}]}
-    if nvp_binding_type == 'bridge' and vlan_id:
-        transport_zone_config["binding_config"] = {"vlan_translation":
-                                                   [{"transport": vlan_id}]}
     if neutron_net_id:
         lswitch_obj["tags"].append({"tag": neutron_net_id,
                                     "scope": "quantum_net_id"})
index 3cb8bddc7d83b01297ae69b0d50edaec20305ede..7b0d3b8c65122cf1b163baa83c90d0ffc21517ea 100644 (file)
@@ -25,6 +25,7 @@ from neutron.common import exceptions as ntn_exc
 import neutron.common.test_lib as test_lib
 from neutron import context
 from neutron.extensions import l3
+from neutron.extensions import multiprovidernet as mpnet
 from neutron.extensions import portbindings
 from neutron.extensions import providernet as pnet
 from neutron.extensions import securitygroup as secgrp
@@ -1232,3 +1233,105 @@ class TestNiciraNetworkGateway(test_l2_gw.NetworkGatewayDbTestCase,
     def test_delete_network_gateway(self):
         # The default gateway must still be there
         self._test_delete_network_gateway(1)
+
+
+class TestNiciraMultiProviderNetworks(NiciraPluginV2TestCase):
+
+    def setUp(self, plugin=None):
+        cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
+        super(TestNiciraMultiProviderNetworks, self).setUp()
+
+    def test_create_network_provider(self):
+        data = {'network': {'name': 'net1',
+                            pnet.NETWORK_TYPE: 'vlan',
+                            pnet.PHYSICAL_NETWORK: 'physnet1',
+                            pnet.SEGMENTATION_ID: 1,
+                            'tenant_id': 'tenant_one'}}
+        network_req = self.new_create_request('networks', data)
+        network = self.deserialize(self.fmt,
+                                   network_req.get_response(self.api))
+        self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan')
+        self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
+        self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1)
+        self.assertNotIn(mpnet.SEGMENTS, network['network'])
+
+    def test_create_network_single_multiple_provider(self):
+        data = {'network': {'name': 'net1',
+                            mpnet.SEGMENTS:
+                            [{pnet.NETWORK_TYPE: 'vlan',
+                              pnet.PHYSICAL_NETWORK: 'physnet1',
+                              pnet.SEGMENTATION_ID: 1}],
+                            'tenant_id': 'tenant_one'}}
+        net_req = self.new_create_request('networks', data)
+        network = self.deserialize(self.fmt, net_req.get_response(self.api))
+        for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                               pnet.SEGMENTATION_ID]:
+            self.assertTrue(provider_field not in network['network'])
+        tz = network['network'][mpnet.SEGMENTS][0]
+        self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
+        self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
+        self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
+
+        # Tests get_network()
+        net_req = self.new_show_request('networks', network['network']['id'])
+        network = self.deserialize(self.fmt, net_req.get_response(self.api))
+        tz = network['network'][mpnet.SEGMENTS][0]
+        self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
+        self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
+        self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
+
+    def test_create_network_multprovider(self):
+        data = {'network': {'name': 'net1',
+                            mpnet.SEGMENTS:
+                            [{pnet.NETWORK_TYPE: 'vlan',
+                              pnet.PHYSICAL_NETWORK: 'physnet1',
+                              pnet.SEGMENTATION_ID: 1},
+                            {pnet.NETWORK_TYPE: 'stt',
+                             pnet.PHYSICAL_NETWORK: 'physnet1'}],
+                            'tenant_id': 'tenant_one'}}
+        network_req = self.new_create_request('networks', data)
+        network = self.deserialize(self.fmt,
+                                   network_req.get_response(self.api))
+        tz = network['network'][mpnet.SEGMENTS]
+        for tz in data['network'][mpnet.SEGMENTS]:
+            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                          pnet.SEGMENTATION_ID]:
+                self.assertEqual(tz.get(field), tz.get(field))
+
+        # Tests get_network()
+        net_req = self.new_show_request('networks', network['network']['id'])
+        network = self.deserialize(self.fmt, net_req.get_response(self.api))
+        tz = network['network'][mpnet.SEGMENTS]
+        for tz in data['network'][mpnet.SEGMENTS]:
+            for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                          pnet.SEGMENTATION_ID]:
+                self.assertEqual(tz.get(field), tz.get(field))
+
+    def test_create_network_with_provider_and_multiprovider_fail(self):
+        data = {'network': {'name': 'net1',
+                            mpnet.SEGMENTS:
+                            [{pnet.NETWORK_TYPE: 'vlan',
+                              pnet.PHYSICAL_NETWORK: 'physnet1',
+                              pnet.SEGMENTATION_ID: 1}],
+                            pnet.NETWORK_TYPE: 'vlan',
+                            pnet.PHYSICAL_NETWORK: 'physnet1',
+                            pnet.SEGMENTATION_ID: 1,
+                            'tenant_id': 'tenant_one'}}
+
+        network_req = self.new_create_request('networks', data)
+        res = network_req.get_response(self.api)
+        self.assertEqual(res.status_int, 400)
+
+    def test_create_network_duplicate_segments(self):
+        data = {'network': {'name': 'net1',
+                            mpnet.SEGMENTS:
+                            [{pnet.NETWORK_TYPE: 'vlan',
+                              pnet.PHYSICAL_NETWORK: 'physnet1',
+                              pnet.SEGMENTATION_ID: 1},
+                            {pnet.NETWORK_TYPE: 'vlan',
+                             pnet.PHYSICAL_NETWORK: 'physnet1',
+                             pnet.SEGMENTATION_ID: 1}],
+                            'tenant_id': 'tenant_one'}}
+        network_req = self.new_create_request('networks', data)
+        res = network_req.get_response(self.api)
+        self.assertEqual(res.status_int, 400)
index 02f4ea1fa2a10e094601b45a9dbc211cc8c95357..01383f6bb2396d002175821f053041291be4a3e7 100644 (file)
@@ -254,8 +254,10 @@ class TestNvplibL2Gateway(NvplibTestCase):
     def test_plug_l2_gw_port_attachment(self):
         tenant_id = 'pippo'
         node_uuid = _uuid()
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
-                                        'fake-switch')
+                                        'fake-switch', transport_zones_config)
         gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
         lport = nvplib.create_lport(self.fake_cluster,
                                     lswitch['uuid'],
@@ -283,9 +285,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
 
     def test_create_and_get_lswitches_single(self):
         tenant_id = 'pippo'
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
                                         tenant_id,
-                                        'fake-switch')
+                                        'fake-switch',
+                                        transport_zones_config)
         res_lswitch = nvplib.get_lswitches(self.fake_cluster,
                                            lswitch['uuid'])
         self.assertEqual(len(res_lswitch), 1)
@@ -294,9 +299,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
 
     def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
         tenant_id = 'pippo'
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
                                         tenant_id,
-                                        '*' * 50)
+                                        '*' * 50,
+                                        transport_zones_config)
         res_lswitch = nvplib.get_lswitches(self.fake_cluster,
                                            lswitch['uuid'])
         self.assertEqual(len(res_lswitch), 1)
@@ -305,12 +313,16 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
 
     def test_create_and_get_lswitches_multiple(self):
         tenant_id = 'pippo'
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         main_lswitch = nvplib.create_lswitch(
             self.fake_cluster, tenant_id, 'fake-switch',
+            transport_zones_config,
             tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
         # Create secondary lswitch
         nvplib.create_lswitch(
             self.fake_cluster, tenant_id, 'fake-switch-2',
+            transport_zones_config,
             neutron_net_id=main_lswitch['uuid'])
         res_lswitch = nvplib.get_lswitches(self.fake_cluster,
                                            main_lswitch['uuid'])
@@ -329,9 +341,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
     def test_update_lswitch(self):
         new_name = 'new-name'
         new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}]
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
                                         'pippo',
-                                        'fake-switch')
+                                        'fake-switch',
+                                        transport_zones_config)
         nvplib.update_lswitch(self.fake_cluster, lswitch['uuid'],
                               new_name, tags=new_tags)
         res_lswitch = nvplib.get_lswitches(self.fake_cluster,
@@ -349,9 +364,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
                           'foo', 'bar')
 
     def test_delete_networks(self):
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
                                         'pippo',
-                                        'fake-switch')
+                                        'fake-switch',
+                                        transport_zones_config)
         nvplib.delete_networks(self.fake_cluster, lswitch['uuid'],
                                [lswitch['uuid']])
         self.assertRaises(exceptions.NotFound,
@@ -842,8 +860,11 @@ class TestNvplibLogicalRouters(NvplibTestCase):
 
     def test_plug_lrouter_port_patch_attachment(self):
         tenant_id = 'pippo'
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
-                                        tenant_id, 'fake-switch')
+                                        tenant_id, 'fake-switch',
+                                        transport_zones_config)
         lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
                                     tenant_id, 'xyz',
                                     'name', 'device_id', True)
@@ -1215,8 +1236,11 @@ class TestNvplibLogicalPorts(NvplibTestCase):
 
     def _create_switch_and_port(self, tenant_id='pippo',
                                 neutron_port_id='whatever'):
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster,
-                                        tenant_id, 'fake-switch')
+                                        tenant_id, 'fake-switch',
+                                        transport_zones_config)
         lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
                                     tenant_id, neutron_port_id,
                                     'name', 'device_id', True)
@@ -1252,8 +1276,10 @@ class TestNvplibLogicalPorts(NvplibTestCase):
     def test_get_port_by_tag_not_found_returns_None(self):
         tenant_id = 'pippo'
         neutron_port_id = 'whatever'
+        transport_zones_config = [{'zone_uuid': _uuid(),
+                                   'transport_type': 'stt'}]
         lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
-                                        'fake-switch')
+                                        'fake-switch', transport_zones_config)
         lport = nvplib.get_port_by_neutron_tag(self.fake_cluster,
                                                lswitch['uuid'],
                                                neutron_port_id)