From c4bc49afeff0c472ae20df1d5c5e068f47ba8093 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 11 Oct 2013 14:39:28 -0700 Subject: [PATCH] Add migration support from agent to NSX dhcp/metadata services This is feature patch (3 of 3) that introduces support for transitioning existing NSX-based deployments from the agent based model of providing dhcp and metadata proxy services to the new agentless based mode. In 'combined' mode, existing networks will still be served by the existing infrastructure, whereas new networks will be served by the new infrastructure. Networks may be migrated to the model using a new CLI tool provided, called 'neutron-nsx-manage'. Currently the tool provides two admin-only commands: neutron-nsx-manage net-report This will check that the network can be migrated and returns the resources currently in use. And: neutron-nsx-manage net-migrate This will move the network over the new model and deallocate resources from the agent. Once a network has been migrated there is no turning back. Completes-blueprint nsx-integrated-services Change-Id: I37c9aa0e76124e1023899106406de7be6714c24d --- etc/neutron/plugins/nicira/nvp.ini | 17 +- etc/neutron/plugins/vmware/nsx.ini | 17 +- etc/policy.json | 4 +- .../1421183d533f_nsx_dhcp_metadata.py | 72 ++ neutron/plugins/nicira/NeutronPlugin.py | 4 +- neutron/plugins/nicira/common/config.py | 3 +- neutron/plugins/nicira/common/exceptions.py | 4 + neutron/plugins/nicira/dbexts/lsn_db.py | 130 ++++ neutron/plugins/nicira/dhcp_meta/combined.py | 95 +++ neutron/plugins/nicira/dhcp_meta/constants.py | 28 + .../plugins/nicira/dhcp_meta/lsnmanager.py | 449 +++++++++++++ neutron/plugins/nicira/dhcp_meta/migration.py | 181 +++++ neutron/plugins/nicira/dhcp_meta/nsx.py | 317 +++++++++ neutron/plugins/nicira/dhcp_meta/nvp.py | 625 ------------------ neutron/plugins/nicira/dhcpmeta_modes.py | 115 ++-- neutron/plugins/nicira/extensions/lsn.py | 82 +++ neutron/plugins/nicira/nsxlib/lsn.py | 13 + neutron/plugins/nicira/shell/__init__.py | 41 ++ neutron/plugins/nicira/shell/commands.py | 66 ++ neutron/tests/unit/nicira/test_dhcpmeta.py | 482 ++++++++++++-- neutron/tests/unit/nicira/test_lsn_db.py | 103 +++ neutron/tests/unit/nicira/test_lsn_lib.py | 26 +- setup.cfg | 1 + 23 files changed, 2161 insertions(+), 714 deletions(-) create mode 100644 neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py create mode 100644 neutron/plugins/nicira/dbexts/lsn_db.py create mode 100644 neutron/plugins/nicira/dhcp_meta/combined.py create mode 100644 neutron/plugins/nicira/dhcp_meta/constants.py create mode 100644 neutron/plugins/nicira/dhcp_meta/lsnmanager.py create mode 100644 neutron/plugins/nicira/dhcp_meta/migration.py create mode 100644 neutron/plugins/nicira/dhcp_meta/nsx.py delete mode 100644 neutron/plugins/nicira/dhcp_meta/nvp.py create mode 100644 neutron/plugins/nicira/extensions/lsn.py create mode 100644 neutron/plugins/nicira/shell/__init__.py create mode 100644 neutron/plugins/nicira/shell/commands.py create mode 100644 neutron/tests/unit/nicira/test_lsn_db.py diff --git a/etc/neutron/plugins/nicira/nvp.ini b/etc/neutron/plugins/nicira/nvp.ini index f031d381b..b22448ec4 100644 --- a/etc/neutron/plugins/nicira/nvp.ini +++ b/etc/neutron/plugins/nicira/nvp.ini @@ -119,11 +119,18 @@ # metadata proxy services to tenant instances. If 'agent' is chosen (default) # the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to # provide such services. In this mode, the plugin supports API extensions 'agent' -# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Havana), +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), # the plugin will use NSX logical services for DHCP and metadata proxy. This # simplifies the deployment model for Neutron, in that the plugin no longer requires # the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode -# becomes ineffective. The mode 'agentless' is not supported for NSX 4.0 or below. +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. # agent_mode = agent [nsx_sync] @@ -165,6 +172,12 @@ # a considerable impact on overall performance. # always_read_status = False +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + [nsx_dhcp] # (Optional) Comma separated list of additional dns servers. Default is an empty list # extra_domain_name_servers = diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini index f2b47a3ae..f30c501f9 100644 --- a/etc/neutron/plugins/vmware/nsx.ini +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -114,11 +114,18 @@ # metadata proxy services to tenant instances. If 'agent' is chosen (default) # the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to # provide such services. In this mode, the plugin supports API extensions 'agent' -# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Havana), +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), # the plugin will use NSX logical services for DHCP and metadata proxy. This # simplifies the deployment model for Neutron, in that the plugin no longer requires # the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode -# becomes ineffective. The mode 'agentless' is not supported for NSX 4.0 or below. +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. # agent_mode = agent [nsx_sync] @@ -160,6 +167,12 @@ # a considerable impact on overall performance. # always_read_status = False +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + [nsx_dhcp] # (Optional) Comma separated list of additional dns servers. Default is an empty list # extra_domain_name_servers = diff --git a/etc/policy.json b/etc/policy.json index cd65e6b96..a72d3a93d 100644 --- a/etc/policy.json +++ b/etc/policy.json @@ -131,5 +131,7 @@ "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", - "get_service_provider": "rule:regular_user" + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" } diff --git a/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py b/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py new file mode 100644 index 000000000..2cfbf7505 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py @@ -0,0 +1,72 @@ +# Copyright 2014 VMware, Inc. + +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NSX DHCP/metadata support + +Revision ID: 1421183d533f +Revises: 8f682276ee4 +Create Date: 2013-10-11 14:33:37.303215 + +""" + +revision = '1421183d533f' +down_revision = '8f682276ee4' + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'lsn', + sa.Column('net_id', + sa.String(length=36), nullable=False), + sa.Column('lsn_id', + sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('lsn_id')) + + op.create_table( + 'lsn_port', + sa.Column('lsn_port_id', + sa.String(length=36), nullable=False), + sa.Column('lsn_id', + sa.String(length=36), nullable=False), + sa.Column('sub_id', + sa.String(length=36), nullable=False, unique=True), + sa.Column('mac_addr', + sa.String(length=32), nullable=False, unique=True), + sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('lsn_port_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('lsn_port') + op.drop_table('lsn') diff --git a/neutron/plugins/nicira/NeutronPlugin.py b/neutron/plugins/nicira/NeutronPlugin.py index ded70319f..7e82ae475 100644 --- a/neutron/plugins/nicira/NeutronPlugin.py +++ b/neutron/plugins/nicira/NeutronPlugin.py @@ -120,10 +120,8 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin, functionality using NVP. """ - supported_extension_aliases = ["agent", - "allowed-address-pairs", + supported_extension_aliases = ["allowed-address-pairs", "binding", - "dhcp_agent_scheduler", "dist-router", "ext-gw-mode", "extraroute", diff --git a/neutron/plugins/nicira/common/config.py b/neutron/plugins/nicira/common/config.py index 41c3c0679..9f1b73534 100644 --- a/neutron/plugins/nicira/common/config.py +++ b/neutron/plugins/nicira/common/config.py @@ -19,9 +19,8 @@ from oslo.config import cfg class AgentModes: AGENT = 'agent' - # TODO(armando-migliaccio): support to be added, maybe we could add a - # mixed mode to support no-downtime migrations? AGENTLESS = 'agentless' + COMBINED = 'combined' class MetadataModes: diff --git a/neutron/plugins/nicira/common/exceptions.py b/neutron/plugins/nicira/common/exceptions.py index 14add6201..d4b67cc4f 100644 --- a/neutron/plugins/nicira/common/exceptions.py +++ b/neutron/plugins/nicira/common/exceptions.py @@ -105,5 +105,9 @@ class LsnPortNotFound(q_exc.NotFound): 'and %(entity)s %(entity_id)s')) +class LsnMigrationConflict(q_exc.Conflict): + message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") + + class LsnConfigurationConflict(NvpPluginException): message = _("Configuration conflict on Logical Service Node %(lsn_id)s") diff --git a/neutron/plugins/nicira/dbexts/lsn_db.py b/neutron/plugins/nicira/dbexts/lsn_db.py new file mode 100644 index 000000000..6cd6b7ca3 --- /dev/null +++ b/neutron/plugins/nicira/dbexts/lsn_db.py @@ -0,0 +1,130 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy import Column +from sqlalchemy import ForeignKey +from sqlalchemy import orm +from sqlalchemy import String + +from neutron.db import models_v2 +from neutron.openstack.common.db import exception as d_exc +from neutron.openstack.common import log as logging +from neutron.plugins.nicira.common import exceptions as p_exc + + +LOG = logging.getLogger(__name__) + + +class LsnPort(models_v2.model_base.BASEV2): + + __tablename__ = 'lsn_port' + + lsn_port_id = Column(String(36), primary_key=True) + + lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE")) + sub_id = Column(String(36), nullable=False, unique=True) + mac_addr = Column(String(32), nullable=False, unique=True) + + def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): + self.lsn_port_id = lsn_port_id + self.lsn_id = lsn_id + self.sub_id = subnet_id + self.mac_addr = mac_address + + +class Lsn(models_v2.model_base.BASEV2): + __tablename__ = 'lsn' + + lsn_id = Column(String(36), primary_key=True) + net_id = Column(String(36), nullable=False) + + def __init__(self, net_id, lsn_id): + self.net_id = net_id + self.lsn_id = lsn_id + + +def lsn_add(context, network_id, lsn_id): + """Add Logical Service Node information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn = Lsn(network_id, lsn_id) + context.session.add(lsn) + + +def lsn_remove(context, lsn_id): + """Remove Logical Service Node information from datastore given its id.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete() + + +def lsn_remove_for_network(context, network_id): + """Remove information about the Logical Service Node given its network.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(net_id=network_id).delete() + + +def lsn_get_for_network(context, network_id, raise_on_err=True): + """Retrieve LSN information given its network id.""" + query = context.session.query(Lsn) + try: + return query.filter_by(net_id=network_id).one() + except (orm.exc.NoResultFound, d_exc.DBError): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + +def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): + """Add Logical Service Node Port information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id) + context.session.add(lsn_port) + + +def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): + """Return Logical Service Node Port information given its subnet id.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(sub_id=subnet_id).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='subnet', + entity_id=subnet_id) + + +def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): + """Return Logical Service Node Port information given its mac address.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(mac_addr=mac_address).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='mac', + entity_id=mac_address) + + +def lsn_port_remove(context, lsn_port_id): + """Remove Logical Service Node port from the given Logical Service Node.""" + with context.session.begin(subtransactions=True): + (context.session.query(LsnPort). + filter_by(lsn_port_id=lsn_port_id).delete()) diff --git a/neutron/plugins/nicira/dhcp_meta/combined.py b/neutron/plugins/nicira/dhcp_meta/combined.py new file mode 100644 index 000000000..3e59a202b --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/combined.py @@ -0,0 +1,95 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import topics +from neutron.plugins.nicira.dhcp_meta import nsx as nsx_svc +from neutron.plugins.nicira.dhcp_meta import rpc as nsx_rpc + + +class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI): + + def __init__(self, plugin, manager): + super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT) + self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager) + + def notify(self, context, data, methodname): + [resource, action, _e] = methodname.split('.') + lsn_manager = self.agentless_notifier.plugin.lsn_manager + plugin = self.agentless_notifier.plugin + if resource == 'network': + net_id = data['network']['id'] + elif resource in ['port', 'subnet']: + net_id = data[resource]['network_id'] + else: + # no valid resource + return + lsn_exists = lsn_manager.lsn_exists(context, net_id) + treat_dhcp_owner_specially = False + if lsn_exists: + # if lsn exists, the network is one created with the new model + if (resource == 'subnet' and action == 'create' and + const.DEVICE_OWNER_DHCP not in plugin.port_special_owners): + # network/subnet provisioned in the new model have a plain + # nsx lswitch port, no vif attachment + plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP) + treat_dhcp_owner_specially = True + if (resource == 'port' and action == 'update' or + resource == 'subnet'): + self.agentless_notifier.notify(context, data, methodname) + elif not lsn_exists and resource in ['port', 'subnet']: + # call notifier for the agent-based mode + super(DhcpAgentNotifyAPI, self).notify(context, data, methodname) + if treat_dhcp_owner_specially: + # if subnets belong to networks created with the old model + # dhcp port does not need to be special cased, so put things + # back, since they were modified + plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP) + + +def handle_network_dhcp_access(plugin, context, network, action): + nsx_svc.handle_network_dhcp_access(plugin, context, network, action) + + +def handle_port_dhcp_access(plugin, context, port, action): + if plugin.lsn_manager.lsn_exists(context, port['network_id']): + nsx_svc.handle_port_dhcp_access(plugin, context, port, action) + else: + nsx_rpc.handle_port_dhcp_access(plugin, context, port, action) + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if plugin.lsn_manager.lsn_exists(context, port['network_id']): + nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete) + else: + nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + if interface: + subnet = plugin.get_subnet(context, interface['subnet_id']) + network_id = subnet['network_id'] + if plugin.lsn_manager.lsn_exists(context, network_id): + nsx_svc.handle_router_metadata_access( + plugin, context, router_id, interface) + else: + nsx_rpc.handle_router_metadata_access( + plugin, context, router_id, interface) + else: + nsx_rpc.handle_router_metadata_access( + plugin, context, router_id, interface) diff --git a/neutron/plugins/nicira/dhcp_meta/constants.py b/neutron/plugins/nicira/dhcp_meta/constants.py new file mode 100644 index 000000000..1e9476a5b --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/constants.py @@ -0,0 +1,28 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from neutron.common import constants as const +from neutron.db import l3_db + +# A unique MAC to quickly identify the LSN port used for metadata services +# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. +METADATA_MAC = "fa:15:73:74:d4:74" +METADATA_PORT_ID = 'metadata:id' +METADATA_PORT_NAME = 'metadata:name' +METADATA_DEVICE_ID = 'metadata:device' +SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, + const.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF) diff --git a/neutron/plugins/nicira/dhcp_meta/lsnmanager.py b/neutron/plugins/nicira/dhcp_meta/lsnmanager.py new file mode 100644 index 000000000..4c2bd3480 --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/lsnmanager.py @@ -0,0 +1,449 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import log as logging +from neutron.plugins.nicira.common import exceptions as p_exc +from neutron.plugins.nicira.dbexts import lsn_db +from neutron.plugins.nicira.dhcp_meta import constants as const +from neutron.plugins.nicira.nsxlib import lsn as lsn_api +from neutron.plugins.nicira import nvplib as nsxlib + +LOG = logging.getLogger(__name__) + +META_CONF = 'metadata-proxy' +DHCP_CONF = 'dhcp' + + +lsn_opts = [ + cfg.BoolOpt('sync_on_missing_data', default=False, + help=_('Pull LSN information from NSX in case it is missing ' + 'from the local data store. This is useful to rebuild ' + 'the local store in case of server recovery.')) +] + + +def register_lsn_opts(config): + config.CONF.register_opts(lsn_opts, "NSX_LSN") + + +class LsnManager(object): + """Manage LSN entities associated with networks.""" + + def __init__(self, plugin): + self.plugin = plugin + + @property + def cluster(self): + return self.plugin.cluster + + def lsn_exists(self, context, network_id): + """Return True if a Logical Service Node exists for the network.""" + return self.lsn_get( + context, network_id, raise_on_err=False) is not None + + def lsn_get(self, context, network_id, raise_on_err=True): + """Retrieve the LSN id associated to the network.""" + try: + return lsn_api.lsn_for_network_get(self.cluster, network_id) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + def lsn_create(self, context, network_id): + """Create a LSN associated to the network.""" + try: + return lsn_api.lsn_for_network_create(self.cluster, network_id) + except nsxlib.NvpApiClient.NvpApiException: + err_msg = _('Unable to create LSN for network %s') % network_id + raise p_exc.NvpPluginException(err_msg=err_msg) + + def lsn_delete(self, context, lsn_id): + """Delete a LSN given its id.""" + try: + lsn_api.lsn_delete(self.cluster, lsn_id) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) + + def lsn_delete_by_network(self, context, network_id): + """Delete a LSN associated to the network.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=False) + if lsn_id: + self.lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + """Retrieve LSN and LSN port for the network and the subnet.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_subnet_get( + self.cluster, lsn_id, subnet_id) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and subnet %(subnet_id)s') + % {'lsn_id': lsn_id, 'subnet_id': subnet_id}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='subnet', + entity_id=subnet_id) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + """Retrieve LSN and LSN port given network and mac address.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_mac_get( + self.cluster, lsn_id, mac) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and mac address %(mac)s') + % {'lsn_id': lsn_id, 'mac': mac}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='MAC', + entity_id=mac) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_create(self, context, lsn_id, subnet_info): + """Create and return LSN port for associated subnet.""" + try: + return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) + except n_exc.NotFound: + raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) + except nsxlib.NvpApiClient.NvpApiException: + err_msg = _('Unable to create port for LSN %s') % lsn_id + raise p_exc.NvpPluginException(err_msg=err_msg) + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + """Delete a LSN port from the Logical Service Node.""" + try: + lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) + + def lsn_port_dispose(self, context, network_id, mac_address): + """Delete a LSN port given the network and the mac address.""" + lsn_id, lsn_port_id = self.lsn_port_get_by_mac( + context, network_id, mac_address, raise_on_err=False) + if lsn_port_id: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + if mac_address == const.METADATA_MAC: + try: + lswitch_port_id = nsxlib.get_port_by_neutron_tag( + self.cluster, network_id, + const.METADATA_PORT_ID)['uuid'] + nsxlib.delete_port( + self.cluster, network_id, lswitch_port_id) + except (n_exc.PortNotFoundOnNetwork, + nsxlib.NvpApiClient.NvpApiException): + LOG.warn(_("Metadata port not found while attempting " + "to delete it from network %s"), network_id) + else: + LOG.warn(_("Unable to find Logical Services Node " + "Port with MAC %s"), mac_address) + + def lsn_port_dhcp_setup( + self, context, network_id, port_id, port_data, subnet_config=None): + """Connect network to LSN via specified port and port_data.""" + try: + lsn_id = None + lswitch_port_id = nsxlib.get_port_by_neutron_tag( + self.cluster, network_id, port_id)['uuid'] + lsn_id = self.lsn_get(context, network_id) + lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) + except (n_exc.NotFound, p_exc.NvpPluginException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + if subnet_config: + self.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet_config) + else: + return (lsn_id, lsn_port_id) + + def lsn_port_metadata_setup(self, context, lsn_id, subnet): + """Connect subnet to specified LSN.""" + data = { + "mac_address": const.METADATA_MAC, + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + network_id = subnet['network_id'] + tenant_id = subnet['tenant_id'] + lswitch_port_id = None + try: + lswitch_port_id = nsxlib.create_lport( + self.cluster, network_id, tenant_id, + const.METADATA_PORT_ID, const.METADATA_PORT_NAME, + const.METADATA_DEVICE_ID, True)['uuid'] + lsn_port_id = self.lsn_port_create(self.cluster, lsn_id, data) + except (n_exc.NotFound, p_exc.NvpPluginException, + nsxlib.NvpApiClient.NvpApiException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + nsxlib.delete_port(self.cluster, network_id, lswitch_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): + """Enable/disable dhcp services with the given config options.""" + is_enabled = subnet["enable_dhcp"] + dhcp_options = { + "domain_name": cfg.CONF.NSX_DHCP.domain_name, + "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, + } + dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] + dns_servers.extend(subnet["dns_nameservers"]) + if subnet['gateway_ip']: + dhcp_options["routers"] = subnet["gateway_ip"] + if dns_servers: + dhcp_options["domain_name_servers"] = ",".join(dns_servers) + if subnet["host_routes"]: + dhcp_options["classless_static_routes"] = ( + ",".join(subnet["host_routes"]) + ) + try: + lsn_api.lsn_port_dhcp_configure( + self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + err_msg = (_('Unable to configure dhcp for Logical Service ' + 'Node %(lsn_id)s and port %(lsn_port_id)s') + % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) + LOG.error(err_msg) + raise p_exc.NvpPluginException(err_msg=err_msg) + + def lsn_metadata_configure(self, context, subnet_id, is_enabled): + """Configure metadata service for the specified subnet.""" + subnet = self.plugin.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + meta_conf = cfg.CONF.NSX_METADATA + metadata_options = { + 'metadata_server_ip': meta_conf.metadata_server_address, + 'metadata_server_port': meta_conf.metadata_server_port, + 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret + } + try: + lsn_id = self.lsn_get(context, network_id) + lsn_api.lsn_metadata_configure( + self.cluster, lsn_id, is_enabled, metadata_options) + except (p_exc.LsnNotFound, nsxlib.NvpApiClient.NvpApiException): + err_msg = (_('Unable to configure metadata ' + 'for subnet %s') % subnet_id) + LOG.error(err_msg) + raise p_exc.NvpPluginException(err_msg=err_msg) + if is_enabled: + try: + # test that the lsn port exists + self.lsn_port_get(context, network_id, subnet_id) + except p_exc.LsnPortNotFound: + # this might happen if subnet had dhcp off when created + # so create one, and wire it + self.lsn_port_metadata_setup(context, lsn_id, subnet) + else: + self.lsn_port_dispose(context, network_id, const.METADATA_MAC) + + def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): + lsn_id = None + lsn_port_id = None + try: + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id) + hdlr(self.cluster, lsn_id, lsn_port_id, data) + except (n_exc.NotFound, nsxlib.NvpApiClient.NvpApiException): + LOG.error(_('Error while configuring LSN ' + 'port %s'), lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_add) + + def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_remove) + + def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_add) + + def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_remove) + + def lsn_port_update( + self, context, network_id, subnet_id, dhcp=None, meta=None): + """Update the specified configuration for the LSN port.""" + if not dhcp and not meta: + return + try: + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + if dhcp and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) + if meta and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, META_CONF, meta) + except nsxlib.NvpApiClient.NvpApiException: + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + +class PersistentLsnManager(LsnManager): + """Add local persistent state to LSN Manager.""" + + def __init__(self, plugin): + super(PersistentLsnManager, self).__init__(plugin) + self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data + + def lsn_get(self, context, network_id, raise_on_err=True): + try: + obj = lsn_db.lsn_get_for_network( + context, network_id, raise_on_err=raise_on_err) + return obj.lsn_id if obj else None + except p_exc.LsnNotFound: + if self.sync_on_missing: + lsn_id = super(PersistentLsnManager, self).lsn_get( + context, network_id, raise_on_err=raise_on_err) + self.lsn_save(context, network_id, lsn_id) + return lsn_id + if raise_on_err: + raise + + def lsn_save(self, context, network_id, lsn_id): + """Save LSN-Network mapping to the DB.""" + try: + lsn_db.lsn_add(context, network_id, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN for network %s') % network_id + LOG.exception(err_msg) + raise p_exc.NvpPluginException(err_msg=err_msg) + + def lsn_create(self, context, network_id): + lsn_id = super(PersistentLsnManager, + self).lsn_create(context, network_id) + try: + self.lsn_save(context, network_id, lsn_id) + except p_exc.NvpPluginException: + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + raise + return lsn_id + + def lsn_delete(self, context, lsn_id): + lsn_db.lsn_remove(context, lsn_id) + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_subnet( + context, subnet_id, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get( + context, network_id, subnet_id, + raise_on_err=raise_on_err)) + mac_addr = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id)['mac_address'] + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + raise + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_mac( + context, mac, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get_by_mac( + context, network_id, mac, + raise_on_err=raise_on_err)) + subnet_id = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id).get('subnet_id') + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + raise + + def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): + """Save LSN Port information to the DB.""" + try: + lsn_db.lsn_port_add_for_lsn( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN port for subnet %s') % subnet_id + LOG.exception(err_msg) + raise p_exc.NvpPluginException(err_msg=err_msg) + + def lsn_port_create(self, context, lsn_id, subnet_info): + lsn_port_id = super(PersistentLsnManager, + self).lsn_port_create(context, lsn_id, subnet_info) + try: + self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], + subnet_info['mac_address'], lsn_id) + except p_exc.NvpPluginException: + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) + raise + return lsn_port_id + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + lsn_db.lsn_port_remove(context, lsn_port_id) + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) diff --git a/neutron/plugins/nicira/dhcp_meta/migration.py b/neutron/plugins/nicira/dhcp_meta/migration.py new file mode 100644 index 000000000..17f0b0990 --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/migration.py @@ -0,0 +1,181 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.extensions import external_net +from neutron.openstack.common import log as logging +from neutron.plugins.nicira.common import exceptions as p_exc +from neutron.plugins.nicira.dhcp_meta import nsx +from neutron.plugins.nicira.dhcp_meta import rpc + +LOG = logging.getLogger(__name__) + + +class DhcpMetadataBuilder(object): + + def __init__(self, plugin, agent_notifier): + self.plugin = plugin + self.notifier = agent_notifier + + def dhcp_agent_get_all(self, context, network_id): + """Return the agents managing the network.""" + return self.plugin.list_dhcp_agents_hosting_network( + context, network_id)['agents'] + + def dhcp_port_get_all(self, context, network_id): + """Return the dhcp ports allocated for the network.""" + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_DHCP] + } + return self.plugin.get_ports(context, filters=filters) + + def router_id_get(self, context, subnet=None): + """Return the router and interface used for the subnet.""" + if not subnet: + return + network_id = subnet['network_id'] + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] + } + ports = self.plugin.get_ports(context, filters=filters) + for port in ports: + if port['fixed_ips'][0]['subnet_id'] == subnet['id']: + return port['device_id'] + else: + raise n_exc.NotFound() + + def metadata_deallocate(self, context, router_id, subnet_id): + """Deallocate metadata services for the subnet.""" + interface = {'subnet_id': subnet_id} + self.plugin.remove_router_interface(context, router_id, interface) + + def metadata_allocate(self, context, router_id, subnet_id): + """Allocate metadata resources for the subnet via the router.""" + interface = {'subnet_id': subnet_id} + self.plugin.add_router_interface(context, router_id, interface) + + def dhcp_deallocate(self, context, network_id, agents, ports): + """Deallocate dhcp resources for the network.""" + for agent in agents: + self.plugin.remove_network_from_dhcp_agent( + context, agent['id'], network_id) + for port in ports: + try: + self.plugin.delete_port(context, port['id']) + except n_exc.PortNotFound: + LOG.error(_('Port %s is already gone'), port['id']) + + def dhcp_allocate(self, context, network_id, subnet): + """Allocate dhcp resources for the subnet.""" + # Create LSN resources + network_data = {'id': network_id} + nsx.handle_network_dhcp_access(self.plugin, context, + network_data, 'create_network') + if subnet: + subnet_data = {'subnet': subnet} + self.notifier.notify(context, subnet_data, 'subnet.create.end') + # Get DHCP host and metadata entries created for the LSN + port = { + 'network_id': network_id, + 'fixed_ips': [{'subnet_id': subnet['id']}] + } + self.notifier.notify(context, {'port': port}, 'port.update.end') + + +class MigrationManager(object): + + def __init__(self, plugin, lsn_manager, agent_notifier): + self.plugin = plugin + self.manager = lsn_manager + self.builder = DhcpMetadataBuilder(plugin, agent_notifier) + + def validate(self, context, network_id): + """Validate and return subnet's dhcp info for migration.""" + network = self.plugin.get_network(context, network_id) + + if self.manager.lsn_exists(context, network_id): + reason = _("LSN already exist") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + + if network[external_net.EXTERNAL]: + reason = _("Cannot migrate an external network") + raise n_exc.BadRequest(resource='network', msg=reason) + + filters = {'network_id': [network_id]} + subnets = self.plugin.get_subnets(context, filters=filters) + count = len(subnets) + if count == 0: + return None + elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: + reason = _("Cannot migrate a 'metadata' network") + raise n_exc.BadRequest(resource='network', msg=reason) + elif count > 1: + reason = _("Unable to support multiple subnets per network") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + else: + return subnets[0] + + def migrate(self, context, network_id, subnet=None): + """Migrate subnet resources to LSN.""" + router_id = self.builder.router_id_get(context, subnet) + if router_id and subnet: + # Deallocate resources taken for the router, if any + self.builder.metadata_deallocate(context, router_id, subnet['id']) + if subnet: + # Deallocate reources taken for the agent, if any + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + self.builder.dhcp_deallocate(context, network_id, agents, ports) + # (re)create the configuration for LSN + self.builder.dhcp_allocate(context, network_id, subnet) + if router_id and subnet: + # Allocate resources taken for the router, if any + self.builder.metadata_allocate(context, router_id, subnet['id']) + + def report(self, context, network_id, subnet_id=None): + """Return a report of the dhcp and metadata resources in use.""" + if subnet_id: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + else: + subnet = self.validate(context, network_id) + if subnet: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnet['id'], raise_on_err=False) + else: + lsn_id = self.manager.lsn_get(context, network_id, + raise_on_err=False) + lsn_port_id = None + if lsn_id: + ports = [lsn_port_id] if lsn_port_id else [] + report = { + 'type': 'lsn', + 'services': [lsn_id], + 'ports': ports + } + else: + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + report = { + 'type': 'agent', + 'services': [a['id'] for a in agents], + 'ports': [p['id'] for p in ports] + } + return report diff --git a/neutron/plugins/nicira/dhcp_meta/nsx.py b/neutron/plugins/nicira/dhcp_meta/nsx.py new file mode 100644 index 000000000..f12671493 --- /dev/null +++ b/neutron/plugins/nicira/dhcp_meta/nsx.py @@ -0,0 +1,317 @@ +# Copyright 2013 VMware, Inc. + +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.v2 import attributes as attr +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.extensions import external_net +from neutron.openstack.common import log as logging +from neutron.plugins.nicira.common import exceptions as p_exc +from neutron.plugins.nicira.dhcp_meta import constants as d_const +from neutron.plugins.nicira.nsxlib import lsn as lsn_api + +LOG = logging.getLogger(__name__) + + +dhcp_opts = [ + cfg.ListOpt('extra_domain_name_servers', + deprecated_group='NVP_DHCP', + default=[], + help=_('Comma separated list of additional ' + 'domain name servers')), + cfg.StrOpt('domain_name', + deprecated_group='NVP_DHCP', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.IntOpt('default_lease_time', default=43200, + deprecated_group='NVP_DHCP', + help=_("Default DHCP lease time")), +] + + +metadata_opts = [ + cfg.StrOpt('metadata_server_address', + deprecated_group='NVP_METADATA', + default='127.0.0.1', + help=_("IP address used by Metadata server.")), + cfg.IntOpt('metadata_server_port', + deprecated_group='NVP_METADATA', + default=8775, + help=_("TCP Port used by Metadata server.")), + cfg.StrOpt('metadata_shared_secret', + deprecated_group='NVP_METADATA', + default='', + help=_('Shared secret to sign instance-id request'), + secret=True) +] + + +def register_dhcp_opts(config): + config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") + + +def register_metadata_opts(config): + config.CONF.register_opts(metadata_opts, group="NSX_METADATA") + + +class DhcpAgentNotifyAPI(object): + + def __init__(self, plugin, lsn_manager): + self.plugin = plugin + self.lsn_manager = lsn_manager + self._handle_subnet_dhcp_access = {'create': self._subnet_create, + 'update': self._subnet_update, + 'delete': self._subnet_delete} + + def notify(self, context, data, methodname): + [resource, action, _e] = methodname.split('.') + if resource == 'subnet': + self._handle_subnet_dhcp_access[action](context, data['subnet']) + elif resource == 'port' and action == 'update': + self._port_update(context, data['port']) + + def _port_update(self, context, port): + # With no fixed IP's there's nothing that can be updated + if not port["fixed_ips"]: + return + network_id = port['network_id'] + subnet_id = port["fixed_ips"][0]['subnet_id'] + filters = {'network_id': [network_id]} + # Because NVP does not support updating a single host entry we + # got to build the whole list from scratch and update in bulk + ports = self.plugin.get_ports(context, filters) + if not ports: + return + dhcp_conf = [ + {'mac_address': p['mac_address'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p) + ] + meta_conf = [ + {'instance_id': p['device_id'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p, check_dev_id=True) + ] + self.lsn_manager.lsn_port_update( + context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) + + def _subnet_create(self, context, subnet, clean_on_err=True): + if subnet['enable_dhcp']: + network_id = subnet['network_id'] + # Create port for DHCP service + dhcp_port = { + "name": "", + "admin_state_up": True, + "device_id": "", + "device_owner": const.DEVICE_OWNER_DHCP, + "network_id": network_id, + "tenant_id": subnet["tenant_id"], + "mac_address": attr.ATTR_NOT_SPECIFIED, + "fixed_ips": [{"subnet_id": subnet['id']}] + } + try: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.create_port(context, {'port': dhcp_port}) + except p_exc.PortConfigurationError as e: + err_msg = (_("Error while creating subnet %(cidr)s for " + "network %(network)s. Please, contact " + "administrator") % + {"cidr": subnet["cidr"], + "network": network_id}) + LOG.error(err_msg) + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + self.plugin, context, e.port_id) + if clean_on_err: + self.plugin.delete_subnet(context, subnet['id']) + raise n_exc.Conflict() + + def _subnet_update(self, context, subnet): + network_id = subnet['network_id'] + try: + lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( + context, network_id, subnet['id']) + self.lsn_manager.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet) + except p_exc.LsnPortNotFound: + # It's possible that the subnet was created with dhcp off; + # check if the subnet was uplinked onto a router, and if so + # remove the patch attachment between the metadata port and + # the lsn port, in favor on the one we'll be creating during + # _subnet_create + self.lsn_manager.lsn_port_dispose( + context, network_id, d_const.METADATA_MAC) + # also, check that a dhcp port exists first and provision it + # accordingly + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + ports = self.plugin.get_ports(context, filters=filters) + if ports: + handle_port_dhcp_access( + self.plugin, context, ports[0], 'create_port') + else: + self._subnet_create(context, subnet, clean_on_err=False) + + def _subnet_delete(self, context, subnet): + # FIXME(armando-migliaccio): it looks like that a subnet filter + # is ineffective; so filter by network for now. + network_id = subnet['network_id'] + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + # FIXME(armando-migliaccio): this may be race-y + ports = self.plugin.get_ports(context, filters=filters) + if ports: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.delete_port(context, ports[0]['id']) + + +def is_user_port(p, check_dev_id=False): + usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS + return usable if not check_dev_id else usable and p['device_id'] + + +def check_services_requirements(cluster): + ver = cluster.api_client.get_nvp_version() + # It sounds like 4.1 is the first one where DHCP in NSX + # will have the experimental feature + if ver.major >= 4 and ver.minor >= 1: + cluster_id = cfg.CONF.default_service_cluster_uuid + if not lsn_api.service_cluster_exists(cluster, cluster_id): + raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) + else: + raise p_exc.NvpInvalidVersion(version=ver) + + +def handle_network_dhcp_access(plugin, context, network, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": network}) + if action == 'create_network': + network_id = network['id'] + plugin.lsn_manager.lsn_create(context, network_id) + elif action == 'delete_network': + # NOTE(armando-migliaccio): on delete_network, network + # is just the network id + network_id = network + plugin.lsn_manager.lsn_delete_by_network(context, network_id) + LOG.info(_("Logical Services Node for network " + "%s configured successfully"), network_id) + + +def handle_port_dhcp_access(plugin, context, port, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": port}) + if port["device_owner"] == const.DEVICE_OWNER_DHCP: + network_id = port["network_id"] + if action == "create_port": + # at this point the port must have a subnet and a fixed ip + subnet_id = port["fixed_ips"][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + subnet_data = { + "mac_address": port["mac_address"], + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + try: + plugin.lsn_manager.lsn_port_dhcp_setup( + context, network_id, port['id'], subnet_data, subnet) + except p_exc.PortConfigurationError: + err_msg = (_("Error while configuring DHCP for " + "port %s"), port['id']) + LOG.error(err_msg) + raise n_exc.NeutronException() + elif action == "delete_port": + plugin.lsn_manager.lsn_port_dispose(context, network_id, + port['mac_address']) + elif port["device_owner"] != const.DEVICE_OWNER_DHCP: + if port.get("fixed_ips"): + # do something only if there are IP's and dhcp is enabled + subnet_id = port["fixed_ips"][0]['subnet_id'] + if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: + LOG.info(_("DHCP is disabled for subnet %s: nothing " + "to do"), subnet_id) + return + host_data = { + "mac_address": port["mac_address"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + network_id = port["network_id"] + if action == "create_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_add + elif action == "delete_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + if action == 'create_port': + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + raise + LOG.info(_("DHCP for port %s configured successfully"), port['id']) + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if is_user_port(port, check_dev_id=True): + network_id = port["network_id"] + network = plugin.get_network(context, network_id) + if network[external_net.EXTERNAL]: + LOG.info(_("Network %s is external: nothing to do"), network_id) + return + subnet_id = port["fixed_ips"][0]['subnet_id'] + host_data = { + "instance_id": port["device_id"], + "tenant_id": port["tenant_id"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + LOG.info(_("Configuring metadata entry for port %s"), port) + if not is_delete: + handler = plugin.lsn_manager.lsn_port_meta_host_add + else: + handler = plugin.lsn_manager.lsn_port_meta_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + if not is_delete: + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + raise + LOG.info(_("Metadata for port %s configured successfully"), port['id']) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + LOG.info(_("Handle metadata access via router: %(r)s and " + "interface %(i)s") % {'r': router_id, 'i': interface}) + if interface: + try: + plugin.get_port(context, interface['port_id']) + is_enabled = True + except n_exc.NotFound: + is_enabled = False + subnet_id = interface['subnet_id'] + try: + plugin.lsn_manager.lsn_metadata_configure( + context, subnet_id, is_enabled) + except p_exc.NvpPluginException: + if is_enabled: + l3_db.L3_NAT_db_mixin.remove_router_interface( + plugin, context, router_id, interface) + raise + LOG.info(_("Metadata for router %s handled successfully"), router_id) diff --git a/neutron/plugins/nicira/dhcp_meta/nvp.py b/neutron/plugins/nicira/dhcp_meta/nvp.py deleted file mode 100644 index 574a65218..000000000 --- a/neutron/plugins/nicira/dhcp_meta/nvp.py +++ /dev/null @@ -1,625 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo.config import cfg - -from neutron.api.v2 import attributes as attr -from neutron.common import constants as const -from neutron.common import exceptions as n_exc -from neutron.db import db_base_plugin_v2 -from neutron.db import l3_db -from neutron.extensions import external_net -from neutron.openstack.common import log as logging -from neutron.plugins.nicira.common import exceptions as p_exc -from neutron.plugins.nicira.nsxlib import lsn as lsn_api -from neutron.plugins.nicira import nvplib - - -LOG = logging.getLogger(__name__) -# A unique MAC to quickly identify the LSN port used for metadata services -# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. -METADATA_MAC = "fa:15:73:74:d4:74" -METADATA_PORT_ID = 'metadata:id' -METADATA_PORT_NAME = 'metadata:name' -METADATA_DEVICE_ID = 'metadata:device' -META_CONF = 'metadata-proxy' -DHCP_CONF = 'dhcp' -SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, - const.DEVICE_OWNER_ROUTER_GW, - l3_db.DEVICE_OWNER_ROUTER_INTF) - -dhcp_opts = [ - cfg.ListOpt('extra_domain_name_servers', - deprecated_group='NVP_DHCP', - default=[], - help=_('Comma separated list of additional ' - 'domain name servers')), - cfg.StrOpt('domain_name', - deprecated_group='NVP_DHCP', - default='openstacklocal', - help=_('Domain to use for building the hostnames')), - cfg.IntOpt('default_lease_time', default=43200, - deprecated_group='NVP_DHCP', - help=_("Default DHCP lease time")), -] - - -metadata_opts = [ - cfg.StrOpt('metadata_server_address', - deprecated_group='NVP_METADATA', - default='127.0.0.1', - help=_("IP address used by Metadata server.")), - cfg.IntOpt('metadata_server_port', - deprecated_group='NVP_METADATA', - default=8775, - help=_("TCP Port used by Metadata server.")), - cfg.StrOpt('metadata_shared_secret', - deprecated_group='NVP_METADATA', - default='', - help=_('Shared secret to sign instance-id request'), - secret=True) -] - - -def register_dhcp_opts(config): - config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") - - -def register_metadata_opts(config): - config.CONF.register_opts(metadata_opts, group="NSX_METADATA") - - -class LsnManager(object): - """Manage LSN entities associated with networks.""" - - def __init__(self, plugin): - self.plugin = plugin - - @property - def cluster(self): - return self.plugin.cluster - - def lsn_get(self, context, network_id, raise_on_err=True): - """Retrieve the LSN id associated to the network.""" - try: - return lsn_api.lsn_for_network_get(self.cluster, network_id) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node for ' - 'network %s'), network_id) - if raise_on_err: - raise p_exc.LsnNotFound(entity='network', - entity_id=network_id) - - def lsn_create(self, context, network_id): - """Create a LSN associated to the network.""" - try: - return lsn_api.lsn_for_network_create(self.cluster, network_id) - except nvplib.NvpApiClient.NvpApiException: - err_msg = _('Unable to create LSN for network %s') % network_id - raise p_exc.NvpPluginException(err_msg=err_msg) - - def lsn_delete(self, context, lsn_id): - """Delete a LSN given its id.""" - try: - lsn_api.lsn_delete(self.cluster, lsn_id) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) - - def lsn_delete_by_network(self, context, network_id): - """Delete a LSN associated to the network.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=False) - if lsn_id: - self.lsn_delete(context, lsn_id) - - def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): - """Retrieve LSN and LSN port for the network and the subnet.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) - if lsn_id: - try: - lsn_port_id = lsn_api.lsn_port_by_subnet_get( - self.cluster, lsn_id, subnet_id) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node Port for ' - 'LSN %(lsn_id)s and subnet %(subnet_id)s') - % {'lsn_id': lsn_id, 'subnet_id': subnet_id}) - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=lsn_id, - entity='subnet', - entity_id=subnet_id) - return (lsn_id, None) - else: - return (lsn_id, lsn_port_id) - else: - return (None, None) - - def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): - """Retrieve LSN and LSN port given network and mac address.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) - if lsn_id: - try: - lsn_port_id = lsn_api.lsn_port_by_mac_get( - self.cluster, lsn_id, mac) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node Port for ' - 'LSN %(lsn_id)s and mac address %(mac)s') - % {'lsn_id': lsn_id, 'mac': mac}) - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=lsn_id, - entity='MAC', - entity_id=mac) - return (lsn_id, None) - else: - return (lsn_id, lsn_port_id) - else: - return (None, None) - - def lsn_port_create(self, context, lsn_id, subnet_info): - """Create and return LSN port for associated subnet.""" - try: - return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) - except n_exc.NotFound: - raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) - except nvplib.NvpApiClient.NvpApiException: - err_msg = _('Unable to create port for LSN %s') % lsn_id - raise p_exc.NvpPluginException(err_msg=err_msg) - - def lsn_port_delete(self, context, lsn_id, lsn_port_id): - """Delete a LSN port from the Logical Service Node.""" - try: - lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) - - def lsn_port_dispose(self, context, network_id, mac_address): - """Delete a LSN port given the network and the mac address.""" - # NOTE(armando-migliaccio): dispose and delete are functionally - # equivalent, but they use different paraments to identify LSN - # and LSN port resources. - lsn_id, lsn_port_id = self.lsn_port_get_by_mac( - context, network_id, mac_address, raise_on_err=False) - if lsn_port_id: - self.lsn_port_delete(context, lsn_id, lsn_port_id) - if mac_address == METADATA_MAC: - try: - lswitch_port = nvplib.get_port_by_neutron_tag( - self.cluster, network_id, METADATA_PORT_ID) - if lswitch_port: - lswitch_port_id = lswitch_port['uuid'] - nvplib.delete_port( - self.cluster, network_id, lswitch_port_id) - else: - LOG.warn(_("Metadata port not found while attempting " - "to delete it from network %s"), network_id) - except (n_exc.PortNotFoundOnNetwork, - nvplib.NvpApiClient.NvpApiException): - LOG.warn(_("Metadata port not found while attempting " - "to delete it from network %s"), network_id) - else: - LOG.warn(_("Unable to find Logical Services Node " - "Port with MAC %s"), mac_address) - - def lsn_port_dhcp_setup( - self, context, network_id, port_id, port_data, subnet_config=None): - """Connect network to LSN via specified port and port_data.""" - try: - lsn_id = None - lswitch_port_id = nvplib.get_port_by_neutron_tag( - self.cluster, network_id, port_id)['uuid'] - lsn_id = self.lsn_get(context, network_id) - lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) - except (n_exc.NotFound, p_exc.NvpPluginException): - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=port_id) - try: - lsn_api.lsn_port_plug_network( - self.cluster, lsn_id, lsn_port_id, lswitch_port_id) - except p_exc.LsnConfigurationConflict: - self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=port_id) - if subnet_config: - self.lsn_port_dhcp_configure( - context, lsn_id, lsn_port_id, subnet_config) - else: - return (lsn_id, lsn_port_id) - - def lsn_port_metadata_setup(self, context, lsn_id, subnet): - """Connect subnet to specified LSN.""" - data = { - "mac_address": METADATA_MAC, - "ip_address": subnet['cidr'], - "subnet_id": subnet['id'] - } - network_id = subnet['network_id'] - tenant_id = subnet['tenant_id'] - lswitch_port_id = None - try: - lswitch_port_id = nvplib.create_lport( - self.cluster, network_id, tenant_id, - METADATA_PORT_ID, METADATA_PORT_NAME, - METADATA_DEVICE_ID, True)['uuid'] - lsn_port_id = self.lsn_port_create(self.cluster, lsn_id, data) - except (n_exc.NotFound, p_exc.NvpPluginException, - nvplib.NvpApiClient.NvpApiException): - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) - else: - try: - lsn_api.lsn_port_plug_network( - self.cluster, lsn_id, lsn_port_id, lswitch_port_id) - except p_exc.LsnConfigurationConflict: - self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) - nvplib.delete_port(self.cluster, network_id, lswitch_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): - """Enable/disable dhcp services with the given config options.""" - is_enabled = subnet["enable_dhcp"] - dhcp_options = { - "domain_name": cfg.CONF.NSX_DHCP.domain_name, - "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, - } - dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers - dns_servers.extend(subnet["dns_nameservers"]) - if subnet['gateway_ip']: - dhcp_options["routers"] = subnet["gateway_ip"] - if dns_servers: - dhcp_options["domain_name_servers"] = ",".join(dns_servers) - if subnet["host_routes"]: - dhcp_options["classless_static_routes"] = ( - ",".join(subnet["host_routes"]) - ) - try: - lsn_api.lsn_port_dhcp_configure( - self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - err_msg = (_('Unable to configure dhcp for Logical Service ' - 'Node %(lsn_id)s and port %(lsn_port_id)s') - % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) - LOG.error(err_msg) - raise p_exc.NvpPluginException(err_msg=err_msg) - - def lsn_metadata_configure(self, context, subnet_id, is_enabled): - """Configure metadata service for the specified subnet.""" - subnet = self.plugin.get_subnet(context, subnet_id) - network_id = subnet['network_id'] - meta_conf = cfg.CONF.NSX_METADATA - metadata_options = { - 'metadata_server_ip': meta_conf.metadata_server_address, - 'metadata_server_port': meta_conf.metadata_server_port, - 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret - } - try: - lsn_id = self.lsn_get(context, network_id) - lsn_api.lsn_metadata_configure( - self.cluster, lsn_id, is_enabled, metadata_options) - except (p_exc.LsnNotFound, nvplib.NvpApiClient.NvpApiException): - err_msg = (_('Unable to configure metadata access ' - 'for subnet %s') % subnet_id) - LOG.error(err_msg) - raise p_exc.NvpPluginException(err_msg=err_msg) - if is_enabled: - try: - # test that the lsn port exists - self.lsn_port_get(context, network_id, subnet_id) - except p_exc.LsnPortNotFound: - # this might happen if subnet had dhcp off when created - # so create one, and wire it - self.lsn_port_metadata_setup(context, lsn_id, subnet) - else: - self.lsn_port_dispose(context, network_id, METADATA_MAC) - - def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): - lsn_id = None - lsn_port_id = None - try: - lsn_id, lsn_port_id = self.lsn_port_get( - context, network_id, subnet_id) - hdlr(self.cluster, lsn_id, lsn_port_id, data) - except (n_exc.NotFound, nvplib.NvpApiClient.NvpApiException): - LOG.error(_('Error while configuring LSN ' - 'port %s'), lsn_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): - """Add dhcp host entry to LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_dhcp_host_add) - - def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): - """Remove dhcp host entry from LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_dhcp_host_remove) - - def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): - """Add metadata host entry to LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_metadata_host_add) - - def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): - """Remove meta host entry from LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_metadata_host_remove) - - def lsn_port_update( - self, context, network_id, subnet_id, dhcp=None, meta=None): - """Update the specified configuration for the LSN port.""" - if not dhcp and not meta: - return - try: - lsn_id, lsn_port_id = self.lsn_port_get( - context, network_id, subnet_id, raise_on_err=False) - if dhcp and lsn_id and lsn_port_id: - lsn_api.lsn_port_host_entries_update( - self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) - if meta and lsn_id and lsn_port_id: - lsn_api.lsn_port_host_entries_update( - self.cluster, lsn_id, lsn_port_id, META_CONF, meta) - except nvplib.NvpApiClient.NvpApiException: - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - -class DhcpAgentNotifyAPI(object): - - def __init__(self, plugin, lsn_manager): - self.plugin = plugin - self.lsn_manager = lsn_manager - self._handle_subnet_dhcp_access = {'create': self._subnet_create, - 'update': self._subnet_update, - 'delete': self._subnet_delete} - - def notify(self, context, data, methodname): - [resource, action, _e] = methodname.split('.') - if resource == 'subnet': - self._handle_subnet_dhcp_access[action](context, data['subnet']) - elif resource == 'port' and action == 'update': - self._port_update(context, data['port']) - - def _port_update(self, context, port): - # With no fixed IP's there's nothing that can be updated - if not port["fixed_ips"]: - return - network_id = port['network_id'] - subnet_id = port["fixed_ips"][0]['subnet_id'] - filters = {'network_id': [network_id]} - # Because NVP does not support updating a single host entry we - # got to build the whole list from scratch and update in bulk - ports = self.plugin.get_ports(context, filters) - if not ports: - return - dhcp_conf = [ - {'mac_address': p['mac_address'], - 'ip_address': p["fixed_ips"][0]['ip_address']} - for p in ports if is_user_port(p) - ] - meta_conf = [ - {'instance_id': p['device_id'], - 'ip_address': p["fixed_ips"][0]['ip_address']} - for p in ports if is_user_port(p, check_dev_id=True) - ] - self.lsn_manager.lsn_port_update( - context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) - - def _subnet_create(self, context, subnet, clean_on_err=True): - if subnet['enable_dhcp']: - network_id = subnet['network_id'] - # Create port for DHCP service - dhcp_port = { - "name": "", - "admin_state_up": True, - "device_id": "", - "device_owner": const.DEVICE_OWNER_DHCP, - "network_id": network_id, - "tenant_id": subnet["tenant_id"], - "mac_address": attr.ATTR_NOT_SPECIFIED, - "fixed_ips": [{"subnet_id": subnet['id']}] - } - try: - # This will end up calling handle_port_dhcp_access - # down below as well as handle_port_metadata_access - self.plugin.create_port(context, {'port': dhcp_port}) - except p_exc.PortConfigurationError as e: - err_msg = (_("Error while creating subnet %(cidr)s for " - "network %(network)s. Please, contact " - "administrator") % - {"cidr": subnet["cidr"], - "network": network_id}) - LOG.error(err_msg) - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - self.plugin, context, e.port_id) - if clean_on_err: - self.plugin.delete_subnet(context, subnet['id']) - raise n_exc.Conflict() - - def _subnet_update(self, context, subnet): - network_id = subnet['network_id'] - try: - lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( - context, network_id, subnet['id']) - self.lsn_manager.lsn_port_dhcp_configure( - context, lsn_id, lsn_port_id, subnet) - except p_exc.LsnPortNotFound: - # It's possible that the subnet was created with dhcp off; - # check if the subnet was uplinked onto a router, and if so - # remove the patch attachment between the metadata port and - # the lsn port, in favor on the one we'll be creating during - # _subnet_create - self.lsn_manager.lsn_port_dispose( - context, network_id, METADATA_MAC) - # also, check that a dhcp port exists first and provision it - # accordingly - filters = dict(network_id=[network_id], - device_owner=[const.DEVICE_OWNER_DHCP]) - ports = self.plugin.get_ports(context, filters=filters) - if ports: - handle_port_dhcp_access( - self.plugin, context, ports[0], 'create_port') - else: - self._subnet_create(context, subnet, clean_on_err=False) - - def _subnet_delete(self, context, subnet): - # FIXME(armando-migliaccio): it looks like that a subnet filter - # is ineffective; so filter by network for now. - network_id = subnet['network_id'] - filters = dict(network_id=[network_id], - device_owner=[const.DEVICE_OWNER_DHCP]) - # FIXME(armando-migliaccio): this may be race-y - ports = self.plugin.get_ports(context, filters=filters) - if ports: - # This will end up calling handle_port_dhcp_access - # down below as well as handle_port_metadata_access - self.plugin.delete_port(context, ports[0]['id']) - - -def is_user_port(p, check_dev_id=False): - usable = p['fixed_ips'] and p['device_owner'] not in SPECIAL_OWNERS - return usable if not check_dev_id else usable and p['device_id'] - - -def check_services_requirements(cluster): - ver = cluster.api_client.get_nvp_version() - # It sounds like 4.1 is the first one where DHCP in NSX - # will have the experimental feature - if ver.major >= 4 and ver.minor >= 1: - cluster_id = cfg.CONF.default_service_cluster_uuid - if not lsn_api.service_cluster_exists(cluster, cluster_id): - raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) - else: - raise p_exc.NvpInvalidVersion(version=ver) - - -def handle_network_dhcp_access(plugin, context, network, action): - LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") - % {"action": action, "resource": network}) - if action == 'create_network': - network_id = network['id'] - plugin.lsn_manager.lsn_create(context, network_id) - elif action == 'delete_network': - # NOTE(armando-migliaccio): on delete_network, network - # is just the network id - network_id = network - plugin.lsn_manager.lsn_delete_by_network(context, network_id) - LOG.info(_("Logical Services Node for network " - "%s configured successfully"), network_id) - - -def handle_port_dhcp_access(plugin, context, port, action): - LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") - % {"action": action, "resource": port}) - if port["device_owner"] == const.DEVICE_OWNER_DHCP: - network_id = port["network_id"] - if action == "create_port": - # at this point the port must have a subnet and a fixed ip - subnet_id = port["fixed_ips"][0]['subnet_id'] - subnet = plugin.get_subnet(context, subnet_id) - subnet_data = { - "mac_address": port["mac_address"], - "ip_address": subnet['cidr'], - "subnet_id": subnet['id'] - } - try: - plugin.lsn_manager.lsn_port_dhcp_setup( - context, network_id, port['id'], subnet_data, subnet) - except p_exc.PortConfigurationError: - err_msg = (_("Error while configuring DHCP for " - "port %s"), port['id']) - LOG.error(err_msg) - raise n_exc.NeutronException() - elif action == "delete_port": - plugin.lsn_manager.lsn_port_dispose(context, network_id, - port['mac_address']) - elif port["device_owner"] != const.DEVICE_OWNER_DHCP: - if port.get("fixed_ips"): - # do something only if there are IP's and dhcp is enabled - subnet_id = port["fixed_ips"][0]['subnet_id'] - if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: - LOG.info(_("DHCP is disabled for subnet %s: nothing " - "to do"), subnet_id) - return - host_data = { - "mac_address": port["mac_address"], - "ip_address": port["fixed_ips"][0]['ip_address'] - } - network_id = port["network_id"] - if action == "create_port": - handler = plugin.lsn_manager.lsn_port_dhcp_host_add - elif action == "delete_port": - handler = plugin.lsn_manager.lsn_port_dhcp_host_remove - try: - handler(context, network_id, subnet_id, host_data) - except p_exc.PortConfigurationError: - if action == 'create_port': - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - plugin, context, port['id']) - raise - LOG.info(_("DHCP for port %s configured successfully"), port['id']) - - -def handle_port_metadata_access(plugin, context, port, is_delete=False): - if is_user_port(port, check_dev_id=True): - network_id = port["network_id"] - network = plugin.get_network(context, network_id) - if network[external_net.EXTERNAL]: - LOG.info(_("Network %s is external: nothing to do"), network_id) - return - subnet_id = port["fixed_ips"][0]['subnet_id'] - host_data = { - "instance_id": port["device_id"], - "tenant_id": port["tenant_id"], - "ip_address": port["fixed_ips"][0]['ip_address'] - } - LOG.info(_("Configuring metadata entry for port %s"), port) - if not is_delete: - handler = plugin.lsn_manager.lsn_port_meta_host_add - else: - handler = plugin.lsn_manager.lsn_port_meta_host_remove - try: - handler(context, network_id, subnet_id, host_data) - except p_exc.PortConfigurationError: - if not is_delete: - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - plugin, context, port['id']) - raise - LOG.info(_("Metadata for port %s configured successfully"), port['id']) - - -def handle_router_metadata_access(plugin, context, router_id, interface=None): - LOG.info(_("Handle metadata access via router: %(r)s and " - "interface %(i)s") % {'r': router_id, 'i': interface}) - if interface: - try: - plugin.get_port(context, interface['port_id']) - is_enabled = True - except n_exc.NotFound: - is_enabled = False - subnet_id = interface['subnet_id'] - try: - plugin.lsn_manager.lsn_metadata_configure( - context, subnet_id, is_enabled) - except p_exc.NvpPluginException: - if is_enabled: - l3_db.L3_NAT_db_mixin.remove_router_interface( - plugin, context, router_id, interface) - raise - LOG.info(_("Metadata for router %s handled successfully"), router_id) diff --git a/neutron/plugins/nicira/dhcpmeta_modes.py b/neutron/plugins/nicira/dhcpmeta_modes.py index 030eeb2ef..4e63abf26 100644 --- a/neutron/plugins/nicira/dhcpmeta_modes.py +++ b/neutron/plugins/nicira/dhcpmeta_modes.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 VMware, Inc. +# # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,9 +24,13 @@ from neutron.openstack.common import importutils from neutron.openstack.common import log as logging from neutron.openstack.common import rpc from neutron.plugins.nicira.common import config -from neutron.plugins.nicira.common import exceptions as nvp_exc -from neutron.plugins.nicira.dhcp_meta import nvp as nvp_svc -from neutron.plugins.nicira.dhcp_meta import rpc as nvp_rpc +from neutron.plugins.nicira.common import exceptions as nsx_exc +from neutron.plugins.nicira.dhcp_meta import combined +from neutron.plugins.nicira.dhcp_meta import lsnmanager +from neutron.plugins.nicira.dhcp_meta import migration +from neutron.plugins.nicira.dhcp_meta import nsx as nsx_svc +from neutron.plugins.nicira.dhcp_meta import rpc as nsx_rpc +from neutron.plugins.nicira.extensions import lsn LOG = logging.getLogger(__name__) @@ -36,12 +39,21 @@ class DhcpMetadataAccess(object): def setup_dhcpmeta_access(self): """Initialize support for DHCP and Metadata services.""" + self._init_extensions() if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: self._setup_rpc_dhcp_metadata() - mod = nvp_rpc + mod = nsx_rpc elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: - self._setup_nvp_dhcp_metadata() - mod = nvp_svc + self._setup_nsx_dhcp_metadata() + mod = nsx_svc + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + notifier = self._setup_nsx_dhcp_metadata() + self._setup_rpc_dhcp_metadata(notifier=notifier) + mod = combined + else: + error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode + LOG.error(error) + raise nsx_exc.NvpPluginException(err_msg=error) self.handle_network_dhcp_access_delegate = ( mod.handle_network_dhcp_access ) @@ -55,49 +67,78 @@ class DhcpMetadataAccess(object): mod.handle_router_metadata_access ) - def _setup_rpc_dhcp_metadata(self): + def _setup_rpc_dhcp_metadata(self, notifier=None): self.topic = topics.PLUGIN self.conn = rpc.create_connection(new=True) - self.dispatcher = nvp_rpc.NVPRpcCallbacks().create_rpc_dispatcher() - self.conn.create_consumer(self.topic, self.dispatcher, - fanout=False) + self.dispatcher = nsx_rpc.NVPRpcCallbacks().create_rpc_dispatcher() + self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) + notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) self.conn.consume_in_thread() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) + self.supported_extension_aliases.extend( + ['agent', 'dhcp_agent_scheduler']) - def _setup_nvp_dhcp_metadata(self): - # In agentless mode the following extensions, and related - # operations, are not supported; so do not publish them - if "agent" in self.supported_extension_aliases: - self.supported_extension_aliases.remove("agent") - if "dhcp_agent_scheduler" in self.supported_extension_aliases: - self.supported_extension_aliases.remove( - "dhcp_agent_scheduler") - nvp_svc.register_dhcp_opts(cfg) - nvp_svc.register_metadata_opts(cfg) - self.lsn_manager = nvp_svc.LsnManager(self) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - nvp_svc.DhcpAgentNotifyAPI(self, self.lsn_manager)) - # In agentless mode, ports whose owner is DHCP need to - # be special cased; so add it to the list of special - # owners list - if const.DEVICE_OWNER_DHCP not in self.port_special_owners: - self.port_special_owners.append(const.DEVICE_OWNER_DHCP) + def _setup_nsx_dhcp_metadata(self): + self._check_services_requirements() + nsx_svc.register_dhcp_opts(cfg) + nsx_svc.register_metadata_opts(cfg) + lsnmanager.register_lsn_opts(cfg) + lsn_manager = lsnmanager.PersistentLsnManager(self) + self.lsn_manager = lsn_manager + if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: + notifier = nsx_svc.DhcpAgentNotifyAPI(self, lsn_manager) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier + # In agentless mode, ports whose owner is DHCP need to + # be special cased; so add it to the list of special + # owners list + if const.DEVICE_OWNER_DHCP not in self.port_special_owners: + self.port_special_owners.append(const.DEVICE_OWNER_DHCP) + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + # This becomes ineffective, as all new networks creations + # are handled by Logical Services Nodes in NSX + cfg.CONF.set_override('network_auto_schedule', False) + LOG.warn(_('network_auto_schedule has been disabled')) + notifier = combined.DhcpAgentNotifyAPI(self, lsn_manager) + self.supported_extension_aliases.append(lsn.EXT_ALIAS) + # Add the capability to migrate dhcp and metadata services over + self.migration_manager = ( + migration.MigrationManager(self, lsn_manager, notifier)) + return notifier + + def _init_extensions(self): + extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler') + for ext in extensions: + if ext in self.supported_extension_aliases: + self.supported_extension_aliases.remove(ext) + + def _check_services_requirements(self): try: error = None - nvp_svc.check_services_requirements(self.cluster) - except nvp_exc.NvpInvalidVersion: + nsx_svc.check_services_requirements(self.cluster) + except nsx_exc.NvpInvalidVersion: error = _("Unable to run Neutron with config option '%s', as NSX " - "does not support it") % config.AgentModes.AGENTLESS - except nvp_exc.ServiceClusterUnavailable: + "does not support it") % cfg.CONF.NSX.agent_mode + except nsx_exc.ServiceClusterUnavailable: error = _("Unmet dependency for config option " - "'%s'") % config.AgentModes.AGENTLESS + "'%s'") % cfg.CONF.NSX.agent_mode if error: LOG.exception(error) - raise nvp_exc.NvpPluginException(err_msg=error) + raise nsx_exc.NvpPluginException(err_msg=error) + + def get_lsn(self, context, network_id, fields=None): + report = self.migration_manager.report(context, network_id) + return {'network': network_id, 'report': report} + + def create_lsn(self, context, lsn): + network_id = lsn['lsn']['network'] + subnet = self.migration_manager.validate(context, network_id) + subnet_id = None if not subnet else subnet['id'] + self.migration_manager.migrate(context, network_id, subnet) + r = self.migration_manager.report(context, network_id, subnet_id) + return {'network': network_id, 'report': r} def handle_network_dhcp_access(self, context, network, action): self.handle_network_dhcp_access_delegate(self, context, diff --git a/neutron/plugins/nicira/extensions/lsn.py b/neutron/plugins/nicira/extensions/lsn.py new file mode 100644 index 000000000..4a7d3ca3d --- /dev/null +++ b/neutron/plugins/nicira/extensions/lsn.py @@ -0,0 +1,82 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron import manager + + +EXT_ALIAS = 'lsn' +COLLECTION_NAME = "%ss" % EXT_ALIAS + +RESOURCE_ATTRIBUTE_MAP = { + COLLECTION_NAME: { + 'network': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'report': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, 'is_visible': True}, + }, +} + + +class Lsn(object): + """Enable LSN configuration for Neutron NSX networks.""" + + @classmethod + def get_name(cls): + return "Logical Service Node configuration" + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return "Enables configuration of NSX Logical Services Node." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/%s/api/v2.0" % EXT_ALIAS + + @classmethod + def get_updated(cls): + return "2013-10-05T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = EXT_ALIAS + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=False) + ex = extensions.ResourceExtension(collection_name, controller) + exts.append(ex) + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff --git a/neutron/plugins/nicira/nsxlib/lsn.py b/neutron/plugins/nicira/nsxlib/lsn.py index 38966b441..ed291e5dc 100644 --- a/neutron/plugins/nicira/nsxlib/lsn.py +++ b/neutron/plugins/nicira/nsxlib/lsn.py @@ -141,6 +141,19 @@ def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): return _lsn_port_get(cluster, lsn_id, filters) +def lsn_port_info_get(cluster, lsn_id, lsn_port_id): + result = do_request(HTTP_GET, + _build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id), + cluster=cluster) + for tag in result['tags']: + if tag['scope'] == 'n_subnet_id': + result['subnet_id'] = tag['tag'] + break + return result + + def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): patch_obj = { "type": "PatchAttachment", diff --git a/neutron/plugins/nicira/shell/__init__.py b/neutron/plugins/nicira/shell/__init__.py new file mode 100644 index 000000000..be3c07985 --- /dev/null +++ b/neutron/plugins/nicira/shell/__init__.py @@ -0,0 +1,41 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from neutron.plugins.nicira.shell import commands as cmd +from neutronclient import shell + + +class NsxManage(shell.NeutronShell): + + def __init__(self, api_version): + super(NsxManage, self).__init__(api_version) + self.command_manager.add_command('net-migrate', cmd.NetworkMigrate) + self.command_manager.add_command('net-report', cmd.NetworkReport) + + def build_option_parser(self, description, version): + parser = super(NsxManage, self).build_option_parser( + description, version) + return parser + + def initialize_app(self, argv): + super(NsxManage, self).initialize_app(argv) + self.client = self.client_manager.neutron + + +def main(): + return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:]) diff --git a/neutron/plugins/nicira/shell/commands.py b/neutron/plugins/nicira/shell/commands.py new file mode 100644 index 000000000..b49fe5092 --- /dev/null +++ b/neutron/plugins/nicira/shell/commands.py @@ -0,0 +1,66 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutronclient.neutron.v2_0 import find_resourceid_by_name_or_id +from neutronclient.neutron.v2_0 import NeutronCommand + +LSN_PATH = '/lsns' + + +def print_report(write_func, report): + write_func(_("\nService type = %s\n") % report['report']['type']) + services = ','.join(report['report']['services']) + ports = ','.join(report['report']['ports']) + write_func(_("Service uuids = %s\n") % services) + write_func(_("Port uuids = %s\n\n") % ports) + + +class NetworkReport(NeutronCommand): + """Retrieve network migration report.""" + + def get_parser(self, prog_name): + parser = super(NetworkReport, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to run report on')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = find_resourceid_by_name_or_id(self.app.client, 'network', net) + res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) + if res: + self.app.stdout.write(_('Migration report is:\n')) + print_report(self.app.stdout.write, res['lsn']) + + +class NetworkMigrate(NeutronCommand): + """Perform network migration.""" + + def get_parser(self, prog_name): + parser = super(NetworkMigrate, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to migrate')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = find_resourceid_by_name_or_id(self.app.client, 'network', net) + body = {'network': net_id} + res = self.app.client.post(LSN_PATH, body={'lsn': body}) + if res: + self.app.stdout.write(_('Migration has been successful:\n')) + print_report(self.app.stdout.write, res['lsn']) diff --git a/neutron/tests/unit/nicira/test_dhcpmeta.py b/neutron/tests/unit/nicira/test_dhcpmeta.py index 5c9cd9407..0be43af5a 100644 --- a/neutron/tests/unit/nicira/test_dhcpmeta.py +++ b/neutron/tests/unit/nicira/test_dhcpmeta.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +18,239 @@ import mock from oslo.config import cfg from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as db from neutron.plugins.nicira.common import exceptions as p_exc -from neutron.plugins.nicira.dhcp_meta import nvp +from neutron.plugins.nicira.dbexts import lsn_db +from neutron.plugins.nicira.dhcp_meta import constants +from neutron.plugins.nicira.dhcp_meta import lsnmanager as lsn_man +from neutron.plugins.nicira.dhcp_meta import migration as mig_man +from neutron.plugins.nicira.dhcp_meta import nsx +from neutron.plugins.nicira.dhcp_meta import rpc from neutron.plugins.nicira.NvpApiClient import NvpApiException from neutron.tests import base +class DhcpMetadataBuilderTestCase(base.BaseTestCase): + + def setUp(self): + super(DhcpMetadataBuilderTestCase, self).setUp() + self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock()) + self.network_id = 'foo_network_id' + self.subnet_id = 'foo_subnet_id' + self.router_id = 'foo_router_id' + + def test_dhcp_agent_get_all(self): + expected = [] + self.builder.plugin.list_dhcp_agents_hosting_network.return_value = ( + {'agents': expected}) + agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id) + self.assertEqual(expected, agents) + + def test_dhcp_port_get_all(self): + expected = [] + self.builder.plugin.get_ports.return_value = expected + ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id) + self.assertEqual(expected, ports) + + def test_router_id_get(self): + port = { + 'device_id': self.router_id, + 'network_id': self.network_id, + 'fixed_ips': [{'subnet_id': self.subnet_id}] + } + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + self.builder.plugin.get_ports.return_value = [port] + result = self.builder.router_id_get(context, subnet) + self.assertEqual(self.router_id, result) + + def test_router_id_get_none_subnet(self): + self.assertIsNone(self.builder.router_id_get(mock.ANY, None)) + + def test_metadata_deallocate(self): + self.builder.metadata_deallocate( + mock.ANY, self.router_id, self.subnet_id) + self.assertTrue(self.builder.plugin.remove_router_interface.call_count) + + def test_metadata_allocate(self): + self.builder.metadata_allocate( + mock.ANY, self.router_id, self.subnet_id) + self.assertTrue(self.builder.plugin.add_router_interface.call_count) + + def test_dhcp_deallocate(self): + agents = [{'id': 'foo_agent_id'}] + ports = [{'id': 'foo_port_id'}] + self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports) + self.assertTrue( + self.builder.plugin.remove_network_from_dhcp_agent.call_count) + self.assertTrue(self.builder.plugin.delete_port.call_count) + + def _test_dhcp_allocate(self, subnet, expected_notify_count): + with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f: + self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet) + self.assertTrue(f.call_count) + self.assertEqual(expected_notify_count, + self.builder.notifier.notify.call_count) + + def test_dhcp_allocate(self): + subnet = {'network_id': self.network_id, 'id': self.subnet_id} + self._test_dhcp_allocate(subnet, 2) + + def test_dhcp_allocate_none_subnet(self): + self._test_dhcp_allocate(None, 0) + + +class MigrationManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(MigrationManagerTestCase, self).setUp() + self.manager = mig_man.MigrationManager(mock.Mock(), + mock.Mock(), + mock.Mock()) + self.network_id = 'foo_network_id' + self.router_id = 'foo_router_id' + self.subnet_id = 'foo_subnet_id' + self.mock_builder_p = mock.patch.object(self.manager, 'builder') + self.mock_builder = self.mock_builder_p.start() + self.addCleanup(self.mock_builder_p.stop) + + def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None): + network = {'router:external': ext_net} + self.manager.manager.lsn_exists.return_value = lsn_exists + self.manager.plugin.get_network.return_value = network + self.manager.plugin.get_subnets.return_value = subnets + result = self.manager.validate(mock.ANY, self.network_id) + if len(subnets): + self.assertEqual(subnets[0], result) + else: + self.assertIsNone(result) + + def test_validate_no_subnets(self): + self._test_validate(subnets=[]) + + def test_validate_with_one_subnet(self): + self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}]) + + def test_validate_raise_conflict_many_subnets(self): + self.assertRaises(p_exc.LsnMigrationConflict, + self._test_validate, + subnets=[{'id': 'sub1'}, {'id': 'sub2'}]) + + def test_validate_raise_conflict_lsn_exists(self): + self.assertRaises(p_exc.LsnMigrationConflict, + self._test_validate, + lsn_exists=True) + + def test_validate_raise_badrequest_external_net(self): + self.assertRaises(n_exc.BadRequest, + self._test_validate, + ext_net=True) + + def test_validate_raise_badrequest_metadata_net(self): + self.assertRaises(n_exc.BadRequest, + self._test_validate, + ext_net=False, + subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}]) + + def _test_migrate(self, router, subnet, expected_calls): + self.mock_builder.router_id_get.return_value = router + self.manager.migrate(mock.ANY, self.network_id, subnet) + # testing the exact the order of calls is important + self.assertEqual(expected_calls, self.mock_builder.mock_calls) + + def test_migrate(self): + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + call_sequence = [ + mock.call.router_id_get(mock.ANY, subnet), + mock.call.metadata_deallocate( + mock.ANY, self.router_id, self.subnet_id), + mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), + mock.call.dhcp_port_get_all(mock.ANY, self.network_id), + mock.call.dhcp_deallocate( + mock.ANY, self.network_id, mock.ANY, mock.ANY), + mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), + mock.call.metadata_allocate( + mock.ANY, self.router_id, self.subnet_id) + ] + self._test_migrate(self.router_id, subnet, call_sequence) + + def test_migrate_no_router_uplink(self): + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + call_sequence = [ + mock.call.router_id_get(mock.ANY, subnet), + mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), + mock.call.dhcp_port_get_all(mock.ANY, self.network_id), + mock.call.dhcp_deallocate( + mock.ANY, self.network_id, mock.ANY, mock.ANY), + mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), + ] + self._test_migrate(None, subnet, call_sequence) + + def test_migrate_no_subnet(self): + call_sequence = [ + mock.call.router_id_get(mock.ANY, None), + mock.call.dhcp_allocate(mock.ANY, self.network_id, None), + ] + self._test_migrate(None, None, call_sequence) + + def _test_report(self, lsn_attrs, expected): + self.manager.manager.lsn_port_get.return_value = lsn_attrs + report = self.manager.report(mock.ANY, self.network_id, self.subnet_id) + self.assertEqual(expected, report) + + def test_report_for_lsn(self): + self._test_report(('foo_lsn_id', 'foo_lsn_port_id'), + {'ports': ['foo_lsn_port_id'], + 'services': ['foo_lsn_id'], 'type': 'lsn'}) + + def test_report_for_lsn_without_lsn_port(self): + self._test_report(('foo_lsn_id', None), + {'ports': [], + 'services': ['foo_lsn_id'], 'type': 'lsn'}) + + def _test_report_for_lsn_without_subnet(self, validated_subnet): + with mock.patch.object(self.manager, 'validate', + return_value=validated_subnet): + self.manager.manager.lsn_port_get.return_value = ( + ('foo_lsn_id', 'foo_lsn_port_id')) + report = self.manager.report(context, self.network_id) + expected = { + 'ports': ['foo_lsn_port_id'] if validated_subnet else [], + 'services': ['foo_lsn_id'], 'type': 'lsn' + } + self.assertEqual(expected, report) + + def test_report_for_lsn_without_subnet_subnet_found(self): + self._test_report_for_lsn_without_subnet({'id': self.subnet_id}) + + def test_report_for_lsn_without_subnet_subnet_not_found(self): + self.manager.manager.lsn_get.return_value = 'foo_lsn_id' + self._test_report_for_lsn_without_subnet(None) + + def test_report_for_dhcp_agent(self): + self.manager.manager.lsn_port_get.return_value = (None, None) + self.mock_builder.dhcp_agent_get_all.return_value = ( + [{'id': 'foo_agent_id'}]) + self.mock_builder.dhcp_port_get_all.return_value = ( + [{'id': 'foo_dhcp_port_id'}]) + result = self.manager.report(mock.ANY, self.network_id, self.subnet_id) + expected = { + 'ports': ['foo_dhcp_port_id'], + 'services': ['foo_agent_id'], + 'type': 'agent' + } + self.assertEqual(expected, result) + + class LsnManagerTestCase(base.BaseTestCase): def setUp(self): @@ -37,11 +262,11 @@ class LsnManagerTestCase(base.BaseTestCase): self.mac = 'aa:bb:cc:dd:ee:ff' self.lsn_port_id = 'foo_lsn_port_id' self.tenant_id = 'foo_tenant_id' - self.manager = nvp.LsnManager(mock.Mock()) - self.mock_lsn_api_p = mock.patch.object(nvp, 'lsn_api') + self.manager = lsn_man.LsnManager(mock.Mock()) + self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') self.mock_lsn_api = self.mock_lsn_api_p.start() - nvp.register_dhcp_opts(cfg) - nvp.register_metadata_opts(cfg) + nsx.register_dhcp_opts(cfg) + nsx.register_metadata_opts(cfg) self.addCleanup(cfg.CONF.reset) self.addCleanup(self.mock_lsn_api_p.stop) @@ -121,7 +346,7 @@ class LsnManagerTestCase(base.BaseTestCase): def _test_lsn_delete_by_network_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc - with mock.patch.object(nvp.LOG, 'warn') as l: + with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.assertEqual(1, l.call_count) @@ -195,7 +420,7 @@ class LsnManagerTestCase(base.BaseTestCase): def _test_lsn_port_delete_with_exc(self, exc): self.mock_lsn_api.lsn_port_delete.side_effect = exc - with mock.patch.object(nvp.LOG, 'warn') as l: + with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, l.call_count) @@ -210,7 +435,7 @@ class LsnManagerTestCase(base.BaseTestCase): self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): - with mock.patch.object(nvp.nvplib, 'get_port_by_neutron_tag'): + with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag'): expected = self.manager.lsn_port_dhcp_setup( mock.ANY, mock.ANY, mock.ANY, mock.ANY, subnet_config=sub) self.assertEqual( @@ -228,7 +453,7 @@ class LsnManagerTestCase(base.BaseTestCase): self.assertEqual(1, f.call_count) def test_lsn_port_dhcp_setup_with_not_found(self): - with mock.patch.object(nvp.nvplib, 'get_port_by_neutron_tag') as f: + with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, @@ -237,7 +462,7 @@ class LsnManagerTestCase(base.BaseTestCase): def test_lsn_port_dhcp_setup_with_conflict(self): self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) - with mock.patch.object(nvp.nvplib, 'get_port_by_neutron_tag'): + with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag'): with mock.patch.object(self.manager, 'lsn_port_delete') as g: self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, @@ -333,7 +558,7 @@ class LsnManagerTestCase(base.BaseTestCase): 'network_id': self.net_id, 'tenant_id': self.tenant_id } - with mock.patch.object(nvp.nvplib, 'create_lport') as f: + with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f: f.return_value = {'uuid': self.port_id} self.manager.lsn_port_metadata_setup(mock.ANY, self.lsn_id, subnet) self.assertEqual(1, self.mock_lsn_api.lsn_port_create.call_count) @@ -347,7 +572,7 @@ class LsnManagerTestCase(base.BaseTestCase): 'network_id': self.net_id, 'tenant_id': self.tenant_id } - with mock.patch.object(nvp.nvplib, 'create_lport') as f: + with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_metadata_setup, @@ -360,8 +585,8 @@ class LsnManagerTestCase(base.BaseTestCase): 'network_id': self.net_id, 'tenant_id': self.tenant_id } - with mock.patch.object(nvp.nvplib, 'create_lport') as f: - with mock.patch.object(nvp.nvplib, 'delete_port') as g: + with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f: + with mock.patch.object(lsn_man.nsxlib, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) @@ -385,14 +610,14 @@ class LsnManagerTestCase(base.BaseTestCase): self.lsn_id, self.lsn_port_id, 1) def test_lsn_port_dispose_meta_mac(self): - self.mac = nvp.METADATA_MAC - with mock.patch.object(nvp.nvplib, 'get_port_by_neutron_tag') as f: - with mock.patch.object(nvp.nvplib, 'delete_port') as g: + self.mac = constants.METADATA_MAC + with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag') as f: + with mock.patch.object(lsn_man.nsxlib, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self._test_lsn_port_dispose_with_values( self.lsn_id, self.lsn_port_id, 1) f.assert_called_once_with( - mock.ANY, self.net_id, nvp.METADATA_PORT_ID) + mock.ANY, self.net_id, constants.METADATA_PORT_ID) g.assert_called_once_with(mock.ANY, self.net_id, self.port_id) def test_lsn_port_dispose_lsn_not_found(self): @@ -403,7 +628,7 @@ class LsnManagerTestCase(base.BaseTestCase): def test_lsn_port_dispose_api_error(self): self.mock_lsn_api.lsn_port_delete.side_effect = NvpApiException - with mock.patch.object(nvp.LOG, 'warn') as l: + with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.assertEqual(1, l.call_count) @@ -455,11 +680,188 @@ class LsnManagerTestCase(base.BaseTestCase): mock.ANY, mock.ANY, mock.ANY, mock.ANY) +class PersistentLsnManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(PersistentLsnManagerTestCase, self).setUp() + self.net_id = 'foo_network_id' + self.sub_id = 'foo_subnet_id' + self.port_id = 'foo_port_id' + self.lsn_id = 'foo_lsn_id' + self.mac = 'aa:bb:cc:dd:ee:ff' + self.lsn_port_id = 'foo_lsn_port_id' + self.tenant_id = 'foo_tenant_id' + db.configure_db() + nsx.register_dhcp_opts(cfg) + nsx.register_metadata_opts(cfg) + lsn_man.register_lsn_opts(cfg) + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + self.context = context.get_admin_context() + self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') + self.mock_lsn_api = self.mock_lsn_api_p.start() + self.addCleanup(cfg.CONF.reset) + self.addCleanup(self.mock_lsn_api_p.stop) + self.addCleanup(db.clear_db) + + def test_lsn_get(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + result = self.manager.lsn_get(self.context, self.net_id) + self.assertEqual(self.lsn_id, result) + + def test_lsn_get_raise_not_found(self): + self.assertRaises(p_exc.LsnNotFound, + self.manager.lsn_get, self.context, self.net_id) + + def test_lsn_get_silent_not_found(self): + result = self.manager.lsn_get( + self.context, self.net_id, raise_on_err=False) + self.assertIsNone(result) + + def test_lsn_get_sync_on_missing(self): + cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + with mock.patch.object(self.manager, 'lsn_save') as f: + self.manager.lsn_get(self.context, self.net_id, raise_on_err=True) + self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count) + self.assertTrue(f.call_count) + + def test_lsn_save(self): + self.manager.lsn_save(self.context, self.net_id, self.lsn_id) + result = self.manager.lsn_get(self.context, self.net_id) + self.assertEqual(self.lsn_id, result) + + def test_lsn_create(self): + self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id + with mock.patch.object(self.manager, 'lsn_save') as f: + result = self.manager.lsn_create(self.context, self.net_id) + self.assertTrue( + self.mock_lsn_api.lsn_for_network_create.call_count) + self.assertTrue(f.call_count) + self.assertEqual(self.lsn_id, result) + + def test_lsn_create_failure(self): + with mock.patch.object( + self.manager, 'lsn_save', + side_effect=p_exc.NvpPluginException(err_msg='')): + self.assertRaises(p_exc.NvpPluginException, + self.manager.lsn_create, + self.context, self.net_id) + self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) + + def test_lsn_delete(self): + self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id + self.manager.lsn_create(self.context, self.net_id) + self.manager.lsn_delete(self.context, self.lsn_id) + self.assertIsNone(self.manager.lsn_get( + self.context, self.net_id, raise_on_err=False)) + + def test_lsn_delete_not_existent(self): + self.manager.lsn_delete(self.context, self.lsn_id) + self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) + + def test_lsn_port_get(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id) + self.assertEqual((self.lsn_id, self.lsn_port_id), res) + + def test_lsn_port_get_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + self.manager.lsn_port_get, + self.context, self.net_id, self.sub_id) + + def test_lsn_port_get_silent_not_found(self): + result = self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((None, None), result) + + def test_lsn_port_get_sync_on_missing(self): + return + cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id + self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( + self.lsn_id, self.lsn_port_id) + with mock.patch.object(self.manager, 'lsn_save') as f: + with mock.patch.object(self.manager, 'lsn_port_save') as g: + self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id) + self.assertTrue( + self.mock_lsn_api.lsn_port_by_subnet_get.call_count) + self.assertTrue( + self.mock_lsn_api.lsn_port_info_get.call_count) + self.assertTrue(f.call_count) + self.assertTrue(g.call_count) + + def test_lsn_port_get_by_mac(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + res = self.manager.lsn_port_get_by_mac( + self.context, self.net_id, self.mac) + self.assertEqual((self.lsn_id, self.lsn_port_id), res) + + def test_lsn_port_get_by_mac_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + self.manager.lsn_port_get_by_mac, + self.context, self.net_id, self.sub_id) + + def test_lsn_port_get_by_mac_silent_not_found(self): + result = self.manager.lsn_port_get_by_mac( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((None, None), result) + + def test_lsn_port_create(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id + subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} + with mock.patch.object(self.manager, 'lsn_port_save') as f: + result = self.manager.lsn_port_create( + self.context, self.net_id, subnet) + self.assertTrue( + self.mock_lsn_api.lsn_port_create.call_count) + self.assertTrue(f.call_count) + self.assertEqual(self.lsn_port_id, result) + + def test_lsn_port_create_failure(self): + subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} + with mock.patch.object( + self.manager, 'lsn_port_save', + side_effect=p_exc.NvpPluginException(err_msg='')): + self.assertRaises(p_exc.NvpPluginException, + self.manager.lsn_port_create, + self.context, self.net_id, subnet) + self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) + + def test_lsn_port_delete(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + self.manager.lsn_port_delete( + self.context, self.lsn_id, self.lsn_port_id) + self.assertEqual((None, None), self.manager.lsn_port_get( + self.context, self.lsn_id, self.sub_id, raise_on_err=False)) + + def test_lsn_port_delete_not_existent(self): + self.manager.lsn_port_delete( + self.context, self.lsn_id, self.lsn_port_id) + self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) + + def test_lsn_port_save(self): + self.manager.lsn_save(self.context, self.net_id, self.lsn_id) + self.manager.lsn_port_save(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + result = self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((self.lsn_id, self.lsn_port_id), result) + + class DhcpAgentNotifyAPITestCase(base.BaseTestCase): def setUp(self): super(DhcpAgentNotifyAPITestCase, self).setUp() - self.notifier = nvp.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock()) + self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock()) self.plugin = self.notifier.plugin self.lsn_manager = self.notifier.lsn_manager @@ -626,7 +1028,7 @@ class DhcpAgentNotifyAPITestCase(base.BaseTestCase): self._test_subnet_create(False) def test_subnet_create_raise_port_config_error(self): - with mock.patch.object(nvp.db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self._test_subnet_create( True, @@ -673,7 +1075,7 @@ class DhcpAgentNotifyAPITestCase(base.BaseTestCase): entity_id=subnet['id'])) self.notifier.plugin.get_ports.return_value = dhcp_port count = 0 if dhcp_port is None else 1 - with mock.patch.object(nvp, 'handle_port_dhcp_access') as h: + with mock.patch.object(nsx, 'handle_port_dhcp_access') as h: self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.update.end') self.assertEqual(count, h.call_count) @@ -723,7 +1125,7 @@ class DhcpTestCase(base.BaseTestCase): def test_handle_create_network(self): network = {'id': 'foo_network_id'} - nvp.handle_network_dhcp_access( + nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network, 'create_network') self.plugin.lsn_manager.lsn_create.assert_called_once_with( mock.ANY, network['id']) @@ -732,7 +1134,7 @@ class DhcpTestCase(base.BaseTestCase): network_id = 'foo_network_id' self.plugin.lsn_manager.lsn_delete_by_network.return_value = ( 'foo_lsn_id') - nvp.handle_network_dhcp_access( + nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network_id, 'delete_network') self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with( mock.ANY, 'foo_network_id') @@ -756,7 +1158,7 @@ class DhcpTestCase(base.BaseTestCase): } self.plugin.get_subnet.return_value = subnet if exc is None: - nvp.handle_port_dhcp_access( + nsx.handle_port_dhcp_access( self.plugin, mock.ANY, port, 'create_port') (self.plugin.lsn_manager.lsn_port_dhcp_setup. assert_called_once_with(mock.ANY, port['network_id'], @@ -764,7 +1166,7 @@ class DhcpTestCase(base.BaseTestCase): else: self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc self.assertRaises(n_exc.NeutronException, - nvp.handle_port_dhcp_access, + nsx.handle_port_dhcp_access, self.plugin, mock.ANY, port, 'create_port') def test_handle_create_dhcp_owner_port(self): @@ -784,7 +1186,7 @@ class DhcpTestCase(base.BaseTestCase): 'fixed_ips': [], 'mac_address': 'aa:bb:cc:dd:ee:ff' } - nvp.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port') + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port') self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with( mock.ANY, port['network_id'], port['mac_address']) @@ -802,7 +1204,7 @@ class DhcpTestCase(base.BaseTestCase): 'mac_address': 'aa:bb:cc:dd:ee:ff' } self.plugin.get_subnet.return_value = {'enable_dhcp': True} - nvp.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) handler.assert_called_once_with( mock.ANY, port['network_id'], 'foo_subnet_id', expected_data) @@ -824,7 +1226,7 @@ class DhcpTestCase(base.BaseTestCase): 'ip_address': '1.2.3.4'}] } self.plugin.get_subnet.return_value = {'enable_dhcp': False} - nvp.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_disabled_dhcp(self): @@ -842,7 +1244,7 @@ class DhcpTestCase(base.BaseTestCase): 'network_id': 'foo_network_id', 'fixed_ips': [] } - nvp.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_no_fixed_ips(self): @@ -869,7 +1271,7 @@ class MetadataTestCase(base.BaseTestCase): 'device_id': dev_id, 'fixed_ips': ips or [] } - nvp.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) + nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( @@ -884,7 +1286,7 @@ class MetadataTestCase(base.BaseTestCase): 'fixed_ips': [{'subnet_id': 'foo_subnet'}] } self.plugin.get_network.return_value = {'router:external': True} - nvp.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) + nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( @@ -930,10 +1332,10 @@ class MetadataTestCase(base.BaseTestCase): if raise_exc: mock_func.side_effect = p_exc.PortConfigurationError( lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None) - with mock.patch.object(nvp.db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self.assertRaises(p_exc.PortConfigurationError, - nvp.handle_port_metadata_access, + nsx.handle_port_metadata_access, self.plugin, mock.ANY, port, is_delete=is_delete) if not is_delete: @@ -941,7 +1343,7 @@ class MetadataTestCase(base.BaseTestCase): else: self.assertFalse(d.call_count) else: - nvp.handle_port_metadata_access( + nsx.handle_port_metadata_access( self.plugin, mock.ANY, port, is_delete=is_delete) mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta) @@ -971,17 +1373,17 @@ class MetadataTestCase(base.BaseTestCase): if not is_port_found: self.plugin.get_port.side_effect = n_exc.NotFound if raise_exc: - with mock.patch.object(nvp.l3_db.L3_NAT_db_mixin, + with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin, 'remove_router_interface') as d: mock_func.side_effect = p_exc.NvpPluginException(err_msg='') self.assertRaises(p_exc.NvpPluginException, - nvp.handle_router_metadata_access, + nsx.handle_router_metadata_access, self.plugin, mock.ANY, 'foo_router_id', interface) d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id', interface) else: - nvp.handle_router_metadata_access( + nsx.handle_router_metadata_access( self.plugin, mock.ANY, 'foo_router_id', interface) mock_func.assert_called_once_with( mock.ANY, subnet['id'], is_port_found) diff --git a/neutron/tests/unit/nicira/test_lsn_db.py b/neutron/tests/unit/nicira/test_lsn_db.py new file mode 100644 index 000000000..f84cb7b07 --- /dev/null +++ b/neutron/tests/unit/nicira/test_lsn_db.py @@ -0,0 +1,103 @@ +# Copyright 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import orm + +from neutron import context +from neutron.db import api as db +from neutron.plugins.nicira.common import exceptions as p_exc +from neutron.plugins.nicira.dbexts import lsn_db +from neutron.tests import base + + +class LSNTestCase(base.BaseTestCase): + + def setUp(self): + super(LSNTestCase, self).setUp() + db.configure_db() + self.ctx = context.get_admin_context() + self.addCleanup(db.clear_db) + self.net_id = 'foo_network_id' + self.lsn_id = 'foo_lsn_id' + self.lsn_port_id = 'foo_port_id' + self.subnet_id = 'foo_subnet_id' + self.mac_addr = 'aa:bb:cc:dd:ee:ff' + + def test_lsn_add(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn = (self.ctx.session.query(lsn_db.Lsn). + filter_by(lsn_id=self.lsn_id).one()) + self.assertEqual(self.lsn_id, lsn.lsn_id) + + def test_lsn_remove(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_remove(self.ctx, self.lsn_id) + q = self.ctx.session.query(lsn_db.Lsn).filter_by(lsn_id=self.lsn_id) + self.assertRaises(orm.exc.NoResultFound, q.one) + + def test_lsn_remove_for_network(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_remove_for_network(self.ctx, self.net_id) + q = self.ctx.session.query(lsn_db.Lsn).filter_by(lsn_id=self.lsn_id) + self.assertRaises(orm.exc.NoResultFound, q.one) + + def test_lsn_get_for_network(self): + result = lsn_db.lsn_get_for_network(self.ctx, self.net_id, + raise_on_err=False) + self.assertIsNone(result) + + def test_lsn_get_for_network_raise_not_found(self): + self.assertRaises(p_exc.LsnNotFound, + lsn_db.lsn_get_for_network, + self.ctx, self.net_id) + + def test_lsn_port_add(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = (self.ctx.session.query(lsn_db.LsnPort). + filter_by(lsn_port_id=self.lsn_port_id).one()) + self.assertEqual(self.lsn_port_id, result.lsn_port_id) + + def test_lsn_port_get_for_mac(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr) + self.assertEqual(self.mac_addr, result.mac_addr) + + def test_lsn_port_get_for_mac_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + lsn_db.lsn_port_get_for_mac, + self.ctx, self.mac_addr) + + def test_lsn_port_get_for_subnet(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id) + self.assertEqual(self.subnet_id, result.sub_id) + + def test_lsn_port_get_for_subnet_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + lsn_db.lsn_port_get_for_subnet, + self.ctx, self.mac_addr) + + def test_lsn_port_remove(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id) + q = (self.ctx.session.query(lsn_db.LsnPort). + filter_by(lsn_port_id=self.lsn_port_id)) + self.assertRaises(orm.exc.NoResultFound, q.one) diff --git a/neutron/tests/unit/nicira/test_lsn_lib.py b/neutron/tests/unit/nicira/test_lsn_lib.py index 86daa39aa..855a25533 100644 --- a/neutron/tests/unit/nicira/test_lsn_lib.py +++ b/neutron/tests/unit/nicira/test_lsn_lib.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -199,6 +197,30 @@ class LSNTestCase(base.BaseTestCase): lsnlib._lsn_port_get, self.cluster, "lsn_id", None) + def test_lsn_port_info_get(self): + self.mock_request.return_value = { + "tags": [ + {"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"}, + {"scope": "n_subnet_id", "tag": "foo_subnet_id"}, + ], + "mac_address": "aa:bb:cc:dd:ee:ff", + "ip_address": "0.0.0.0/0", + "uuid": "foo_lsn_port_id" + } + result = lsnlib.lsn_port_info_get( + self.cluster, 'foo_lsn_id', 'foo_lsn_port_id') + self.mock_request.assert_called_once_with( + 'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id', + cluster=self.cluster) + self.assertIn('subnet_id', result) + self.assertIn('mac_address', result) + + def test_lsn_port_info_get_raise_not_found(self): + self.mock_request.side_effect = exceptions.NotFound + self.assertRaises(exceptions.NotFound, + lsnlib.lsn_port_info_get, + self.cluster, mock.ANY, mock.ANY) + def test_lsn_port_plug_network(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" diff --git a/setup.cfg b/setup.cfg index 4e3a998a8..c12b50088 100644 --- a/setup.cfg +++ b/setup.cfg @@ -93,6 +93,7 @@ console_scripts = neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main neutron-netns-cleanup = neutron.agent.netns_cleanup_util:main neutron-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main + neutron-nsx-manage = neutron.plugins.nicira.shell:main neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main neutron-ovs-cleanup = neutron.agent.ovs_cleanup_util:main neutron-ryu-agent = neutron.plugins.ryu.agent.ryu_neutron_agent:main -- 2.45.2