This patchset introduces basic ml2 driver for nuage.
In Juno release, mechanism driver will support basic
L2 functionality as a stepping stone to enhance
it in later releases.
Implements blueprint: ml2-mech-driver-nuage
Change-Id: Idae4f88f3d21526f377ec0f81377cb90b9fc14e4
--- /dev/null
+# Copyright 2014 Alcatel-Lucent USA Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
+
+
+import netaddr
+from oslo.config import cfg
+
+from neutron.common import constants as n_consts
+from neutron.extensions import portbindings
+from neutron.openstack.common import log
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.nuage.common import config
+from neutron.plugins.nuage.common import constants as nuage_const
+from neutron.plugins.nuage import plugin
+
+LOG = log.getLogger(__name__)
+
+
+class NuageMechanismDriver(plugin.NuagePlugin,
+ api.MechanismDriver):
+
+ def initialize(self):
+ LOG.debug('Initializing driver')
+ config.nuage_register_cfg_opts()
+ self.nuageclient_init()
+ self.vif_type = portbindings.VIF_TYPE_OVS
+ self.vif_details = {portbindings.CAP_PORT_FILTER: False}
+ self.default_np_id = self.nuageclient.get_net_partition_id_by_name(
+ cfg.CONF.RESTPROXY.default_net_partition_name)
+ LOG.debug('Initializing complete')
+
+ def create_subnet_postcommit(self, context):
+ subnet = context.current
+ net = netaddr.IPNetwork(subnet['cidr'])
+ params = {
+ 'netpart_id': self.default_np_id,
+ 'tenant_id': subnet['tenant_id'],
+ 'net': net
+ }
+ self.nuageclient.create_subnet(subnet, params)
+
+ def delete_subnet_postcommit(self, context):
+ subnet = context.current
+ self.nuageclient.delete_subnet(subnet['id'])
+
+ def update_port_postcommit(self, context):
+ port = context.current
+ port_prefix = nuage_const.NOVA_PORT_OWNER_PREF
+ # Check two things prior to proceeding with
+ # talking to backend.
+ # 1) binding has happened successfully.
+ # 2) Its a VM port.
+ if ((not context.original_bound_segment and
+ context.bound_segment) and
+ port['device_owner'].startswith(port_prefix)):
+ np_name = cfg.CONF.RESTPROXY.default_net_partition_name
+ self._create_update_port(context._plugin_context,
+ port, np_name)
+
+ def delete_port_postcommit(self, context):
+ port = context.current
+ np_name = cfg.CONF.RESTPROXY.default_net_partition_name
+ self._delete_nuage_vport(context._plugin_context,
+ port, np_name)
+
+ def bind_port(self, context):
+ LOG.debug("Attempting to bind port %(port)s on "
+ "network %(network)s",
+ {'port': context.current['id'],
+ 'network': context.network.current['id']})
+ for segment in context.network.network_segments:
+ if self._check_segment(segment):
+ context.set_binding(segment[api.ID],
+ self.vif_type,
+ self.vif_details,
+ status=n_consts.PORT_STATUS_ACTIVE)
+ LOG.debug("Bound using segment: %s", segment)
+ return
+ else:
+ LOG.error(_("Refusing to bind port for segment ID %(id)s, "
+ "segment %(seg)s, phys net %(physnet)s, and "
+ "network type %(nettype)s"),
+ {'id': segment[api.ID],
+ 'seg': segment[api.SEGMENTATION_ID],
+ 'physnet': segment[api.PHYSICAL_NETWORK],
+ 'nettype': segment[api.NETWORK_TYPE]})
+
+ def _check_segment(self, segment):
+ """Verify a segment is valid for the Nuage MechanismDriver."""
+ network_type = segment[api.NETWORK_TYPE]
+ return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+ constants.TYPE_VXLAN, constants.TYPE_VLAN]
found_resource = found_resource[0]
return found_resource
- def _create_update_port(self, context, port,
- netpart_id, parent_id):
+ def _create_update_port(self, context, port, np_name):
filters = {'device_id': [port['device_id']]}
ports = self.get_ports(context, filters)
- net_partition = nuagedb.get_net_partition_by_id(context.session,
- netpart_id)
params = {
'port_id': port['id'],
'id': port['device_id'],
'mac': port['mac_address'],
- 'parent_id': parent_id,
- 'net_partition': net_partition,
+ 'netpart_name': np_name,
'ip': port['fixed_ips'][0]['ip_address'],
'no_of_ports': len(ports),
'tenant': port['tenant_id'],
+ 'neutron_id': port['fixed_ips'][0]['subnet_id']
}
-
- nuage_vm = self.nuageclient.create_vms(params)
- if nuage_vm:
- if port['fixed_ips'][0]['ip_address'] != str(nuage_vm['ip']):
- self._update_port_ip(context, port, nuage_vm['ip'])
+ self.nuageclient.create_vms(params)
def _get_router_by_subnet(self, context, subnet_id):
filters = {
if port['device_owner'].startswith(port_prefix):
#This request is coming from nova
try:
+ net_partition = nuagedb.get_net_partition_by_id(
+ session,
+ subnet_mapping['net_partition_id'])
self._create_update_port(
context,
port,
- subnet_mapping['net_partition_id'],
- subnet_mapping['nuage_subnet_id'])
+ net_partition['name'])
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_port(
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if not nuage_port or not nuage_port.get('nuage_vport_id'):
+ net_partition = nuagedb.get_net_partition_by_id(
+ session, subnet_mapping['net_partition_id'])
self._create_update_port(context, port,
- subnet_mapping[
- 'net_partition_id'],
- subnet_mapping['nuage_subnet_id'])
+ net_partition['np_name'])
updated_port = self._make_port_dict(port)
sg_port = self._extend_port_dict_security_group(
updated_port,
)
return updated_port
- @lockutils.synchronized('delete-port', 'nuage-del', external=True)
- def delete_port(self, context, id, l3_port_check=True):
- if l3_port_check:
- self.prevent_l3_port_deletion(context, id)
- port = self._get_port(context, id)
+ def _delete_nuage_vport(self, context, port, np_name):
nuage_vif_id = None
params = {
- 'neutron_port_id': id,
+ 'neutron_port_id': port['id'],
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
+ if constants.NOVA_PORT_OWNER_PREF in port['device_owner']:
+ # This was a VM Port
+ if nuage_port:
+ nuage_vif_id = nuage_port['nuage_vif_id']
+ filters = {'device_id': [port['device_id']]}
+ ports = self.get_ports(context, filters)
+ params = {
+ 'no_of_ports': len(ports),
+ 'netpart_name': np_name,
+ 'tenant': port['tenant_id'],
+ 'mac': port['mac_address'],
+ 'nuage_vif_id': nuage_vif_id,
+ 'id': port['device_id']
+ }
+ self.nuageclient.delete_vms(params)
+
+ @lockutils.synchronized('delete-port', 'nuage-del', external=True)
+ def delete_port(self, context, id, l3_port_check=True):
+ if l3_port_check:
+ self.prevent_l3_port_deletion(context, id)
+ port = self._get_port(context, id)
# This is required for to pass ut test_floatingip_port_delete
self.disassociate_floatingips(context, id)
if not port['fixed_ips']:
netpart_id = subnet_mapping['net_partition_id']
net_partition = nuagedb.get_net_partition_by_id(context.session,
netpart_id)
-
- # Need to call this explicitly to delete vport
- if constants.NOVA_PORT_OWNER_PREF in port['device_owner']:
- if nuage_port:
- nuage_vif_id = nuage_port['nuage_vif_id']
- # This was a VM Port
- filters = {'device_id': [port['device_id']]}
- ports = self.get_ports(context, filters)
- params = {
- 'no_of_ports': len(ports),
- 'net_partition': net_partition,
- 'tenant': port['tenant_id'],
- 'mac': port['mac_address'],
- 'nuage_vif_id': nuage_vif_id,
- 'id': port['device_id']
- }
- self.nuageclient.delete_vms(params)
+ self._delete_nuage_vport(context, port, net_partition['name'])
super(NuagePlugin, self).delete_port(context, id)
def _check_view_auth(self, context, resource, action):
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session,
subn['id'])
if subnet_l2dom:
- nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
- nuage_l2dom_tid = subnet_l2dom['nuage_l2dom_tmplt_id']
user_id = subnet_l2dom['nuage_user_id']
group_id = subnet_l2dom['nuage_group_id']
- self.nuageclient.delete_subnet(nuage_subnet_id,
- nuage_l2dom_tid)
+ self.nuageclient.delete_subnet(subn['id'])
nuagedb.delete_subnetl2dom_mapping(context.session,
subnet_l2dom)
if not self._check_router_subnet_for_tenant(
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id)
if subnet_l2dom:
try:
- self.nuageclient.delete_subnet(
- subnet_l2dom['nuage_subnet_id'],
- subnet_l2dom['nuage_l2dom_tmplt_id'])
+ self.nuageclient.delete_subnet(id)
except Exception:
msg = (_('Unable to complete operation on subnet %s.'
'One or more ports have an IP allocation '
'router': router_id})
raise n_exc.BadRequest(resource='subnet', msg=msg)
nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
- nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id']
if self.nuageclient.vms_on_l2domain(nuage_subnet_id):
super(NuagePlugin,
self).remove_router_interface(context,
msg = (_("Subnet %s has one or more active VMs "
"Router-IF add not permitted") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
- self.nuageclient.delete_subnet(nuage_subnet_id,
- nuage_l2dom_tmplt_id)
+ self.nuageclient.delete_subnet(subnet_id)
net = netaddr.IPNetwork(subn['cidr'])
pnet_binding = nuagedb.get_network_binding(context.session,
subn['network_id'])
--- /dev/null
+# Copyright 2014 Alcatel-Lucent USA Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
+
+
+from neutron.plugins.ml2 import config as ml2_config
+from neutron.tests.unit.ml2 import test_ml2_plugin
+import neutron.tests.unit.nuage.test_nuage_plugin as tnp
+from neutron.tests.unit import test_db_plugin
+
+
+class TestNuageMechDriverBase(tnp.NuagePluginV2TestCase):
+ def setUp(self):
+ ml2_config.cfg.CONF.set_override('mechanism_drivers',
+ ['nuage'],
+ 'ml2')
+
+ super(TestNuageMechDriverBase,
+ self).setUp(plugin=test_ml2_plugin.PLUGIN_NAME)
+
+
+class TestNuageMechDriverNetworksV2(test_db_plugin.TestNetworksV2,
+ TestNuageMechDriverBase):
+ pass
+
+
+class TestNuageMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2,
+ TestNuageMechDriverBase):
+ pass
+
+
+class TestNuageMechDriverPortsV2(test_db_plugin.TestPortsV2,
+ TestNuageMechDriverBase):
+
+ def setUp(self):
+ super(TestNuageMechDriverPortsV2, self).setUp()
+ self.port_create_status = 'DOWN'
def update_subnet(self, neutron_subnet, params):
pass
- def delete_subnet(self, id, template_id):
+ def delete_subnet(self, id):
pass
def create_router(self, neutron_router, router, params):
}
return fake_defnetpart_data
+ def get_net_partition_id_by_name(self, name):
+ return uuidutils.generate_uuid()
+
def delete_net_partition(self, id, l3dom_id=None, l2dom_id=None):
pass
etc/neutron/plugins/ml2/ml2_conf_ofa.ini
etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini
etc/neutron/plugins/ml2/ml2_conf_sriov.ini
+ etc/neutron/plugins/nuage/nuage_plugin.ini
etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini
etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini
etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini
brocade = neutron.plugins.ml2.drivers.brocade.mechanism_brocade:BrocadeMechanism
fslsdn = neutron.plugins.ml2.drivers.mechanism_fslsdn:FslsdnMechanismDriver
sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver:SriovNicSwitchMechanismDriver
+ nuage = neutron.plugins.ml2.drivers.mech_nuage.driver:NuageMechanismDriver
neutron.openstack.common.cache.backends =
memory = neutron.openstack.common.cache._backends.memory:MemoryBackend
# These are for backwards compat with Icehouse notification_driver configuration values