import hashlib
import logging
+from sqlalchemy.orm import exc as sa_exc
import webob.exc
from quantum.api.v2 import attributes as attr
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
+from quantum.db import l3_db
+from quantum.db import models_v2
from quantum.db import portsecurity_db
# NOTE: quota_db cannot be removed, it is for db model
from quantum.db import quota_db
from quantum.db import securitygroups_db
+from quantum.extensions import l3
from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
'nova_zone_id':
nvp_conf[cluster_name].nova_zone_id,
'nvp_controller_connection':
- nvp_conf[cluster_name].nvp_controller_connection, })
- LOG.debug(_("Cluster options: %s"), clusters_options)
+ nvp_conf[cluster_name].nvp_controller_connection,
+ 'default_l3_gw_service_uuid':
+ nvp_conf[cluster_name].default_l3_gw_service_uuid})
+ LOG.debug(_("Cluster options:%s"), clusters_options)
return nvp_options, clusters_options
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
+ l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
nvp_sec.NVPSecurityGroups):
"""
supported_extension_aliases = ["provider", "quotas", "port-security",
- "security-group"]
+ "router", "security-group"]
__native_bulk_support = True
# Default controller cluster
+ # Map nova zones to cluster for easy retrieval
+ novazone_cluster_map = {}
+ # Default controller cluster (to be used when nova zone id is unspecified)
default_cluster = None
provider_network_view = "extension:provider_network:view"
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
+ # Routines for managing logical ports in NVP
+ self._port_drivers = {
+ 'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
+ self._nvp_create_ext_gw_port,
+ l3_db.DEVICE_OWNER_ROUTER_INTF:
+ self._nvp_create_port,
+ l3_db.DEVICE_OWNER_FLOATINGIP:
+ self._nvp_create_fip_port,
+ 'default': self._nvp_create_port},
+ 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
+ self._nvp_delete_ext_gw_port,
+ l3_db.DEVICE_OWNER_ROUTER_INTF:
+ self._nvp_delete_router_port,
+ l3_db.DEVICE_OWNER_FLOATINGIP:
+ self._nvp_delete_fip_port,
+ 'default': self._nvp_delete_port}
+ }
+
self.nvp_opts, self.clusters_opts = parse_config()
self.clusters = {}
for c_opts in self.clusters_opts:
try:
args.extend([c_opts['default_tz_uuid'],
c_opts['nvp_cluster_uuid'],
- c_opts['nova_zone_id']])
+ c_opts['nova_zone_id'],
+ c_opts['default_l3_gw_service_uuid']])
cluster.add_controller(*args)
except Exception:
LOG.exception(_("Invalid connection parameters for "
# Set up RPC interface for DHCP agent
self.setup_rpc()
+ def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
+ """ Build ip_addresses data structure for logical router port
+
+ No need to perform validation on IPs - this has already been
+ done in the l3_db mixin class
+ """
+ ip_addresses = []
+ for ip in fixed_ips:
+ if not subnet_ids or (ip['subnet_id'] in subnet_ids):
+ subnet = self._get_subnet(context, ip['subnet_id'])
+ ip_prefix = '%s/%s' % (ip['ip_address'],
+ subnet['cidr'].split('/')[1])
+ ip_addresses.append(ip_prefix)
+ return ip_addresses
+
+ def _create_and_attach_router_port(self, cluster, context,
+ router_id, port_data,
+ attachment_type, attachment,
+ subnet_ids=None):
+ # Use a fake IP address if gateway port is not 'real'
+ ip_addresses = (port_data.get('fake_ext_gw') and
+ ['0.0.0.0/31'] or
+ self._build_ip_address_list(context,
+ port_data['fixed_ips'],
+ subnet_ids))
+ try:
+ lrouter_port = nvplib.create_router_lport(
+ cluster, router_id, port_data.get('tenant_id', 'fake'),
+ port_data.get('id', 'fake'), port_data.get('name', 'fake'),
+ port_data.get('admin_state_up', True), ip_addresses)
+ LOG.debug(_("Created NVP router port:%s"), lrouter_port['uuid'])
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("Unable to create port on NVP logical router %s"),
+ router_id)
+ raise nvp_exc.NvpPluginException(_("Unable to create logical "
+ "router port for quantum port "
+ "id %(port_id)s on router "
+ "%(router_id)s"),
+ {'port_id': port_data.get('id'),
+ 'router_id': router_id})
+ try:
+ # Add a L3 gateway attachment
+ # TODO(Salvatore-Orlando): Allow per router specification of
+ # l3 gw service uuid as well as per-tenant specification
+ nvplib.plug_router_port_attachment(cluster, router_id,
+ lrouter_port['uuid'],
+ attachment,
+ attachment_type)
+ LOG.debug(_("Attached %(att)s to NVP router port %(port)s"),
+ {'att': attachment, 'port': lrouter_port['uuid']})
+ except NvpApiClient.NvpApiException:
+ # Must remove NVP logical port
+ nvplib.delete_router_lport(cluster, router_id,
+ lrouter_port['uuid'])
+ LOG.exception(_("Unable to plug attachment in NVP logical "
+ "router port %(r_port_id)s, associated with "
+ "Quantum %(q_port_id)s"),
+ {'r_port_id': lrouter_port['uuid'],
+ 'q_port_id': port_data.get('id')})
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("Unable to plug attachment in router port "
+ "%(r_port_id)s for quantum port id %(q_port_id)s "
+ "on router %(router_id)s") %
+ {'r_port_id': lrouter_port['uuid'],
+ 'q_port_id': port_data.get('id'),
+ 'router_id': router_id}))
+ return lrouter_port
+
+ def _get_port_by_device_id(self, context, device_id, device_owner):
+ """ Retrieve ports associated with a specific device id.
+
+ Used for retrieving all quantum ports attached to a given router.
+ """
+ port_qry = context.session.query(models_v2.Port)
+ return port_qry.filter_by(
+ device_id=device_id,
+ device_owner=device_owner,).all()
+
+ def _find_router_subnets_cidrs(self, context, router_id):
+ """ Retrieve subnets attached to the specified router """
+ ports = self._get_port_by_device_id(context, router_id,
+ l3_db.DEVICE_OWNER_ROUTER_INTF)
+ # No need to check for overlapping CIDRs
+ cidrs = []
+ for port in ports:
+ for ip in port.get('fixed_ips', []):
+ cidrs.append(self._get_subnet(context,
+ ip.subnet_id).cidr)
+ return cidrs
+
+ def _nvp_create_port(self, context, port_data):
+ """ Driver for creating a logical switch port on NVP platform """
+ # FIXME(salvatore-orlando): On the NVP platform we do not really have
+ # external networks. So if as user tries and create a "regular" VIF
+ # port on an external network we are unable to actually create.
+ # However, in order to not break unit tests, we need to still create
+ # the DB object and return success
+ if self._network_is_external(context, port_data['network_id']):
+ LOG.error(_("NVP plugin does not support regular VIF ports on "
+ "external networks. Port %s will be down."),
+ port_data['network_id'])
+ # No need to actually update the DB state - the default is down
+ return port_data
+ network = self._get_network(context, port_data['network_id'])
+ network_binding = nicira_db.get_network_binding(
+ context.session, port_data['network_id'])
+ max_ports = self.nvp_opts.max_lp_per_overlay_ls
+ allow_extra_lswitches = False
+ if (network_binding and
+ network_binding.binding_type in (NetworkTypes.FLAT,
+ NetworkTypes.VLAN)):
+ max_ports = self.nvp_opts.max_lp_per_bridged_ls
+ allow_extra_lswitches = True
+ try:
+ cluster = self._find_target_cluster(port_data)
+ selected_lswitch = self._handle_lswitch_selection(
+ cluster, network, network_binding, max_ports,
+ allow_extra_lswitches)
+ lswitch_uuid = selected_lswitch['uuid']
+ lport = nvplib.create_lport(cluster,
+ lswitch_uuid,
+ port_data['tenant_id'],
+ port_data['id'],
+ port_data['name'],
+ port_data['device_id'],
+ port_data['admin_state_up'],
+ port_data['mac_address'],
+ port_data['fixed_ips'],
+ port_data[psec.PORTSECURITY],
+ port_data[ext_sg.SECURITYGROUPS])
+ nicira_db.add_quantum_nvp_port_mapping(
+ context.session, port_data['id'], lport['uuid'])
+ d_owner = port_data['device_owner']
+ if (not d_owner in (l3_db.DEVICE_OWNER_ROUTER_GW,
+ l3_db.DEVICE_OWNER_ROUTER_INTF)):
+ nvplib.plug_interface(cluster, lswitch_uuid,
+ lport['uuid'], "VifAttachment",
+ port_data['id'])
+ LOG.debug(_("_nvp_create_port completed for port %(port_name)s "
+ "on network %(net_id)s. The new port id is "
+ "%(port_id)s. NVP port id is %(nvp_port_id)s"),
+ {'port_name': port_data['name'],
+ 'net_id': port_data['network_id'],
+ 'port_id': port_data['id'],
+ 'nvp_port_id': lport['uuid']})
+ except Exception:
+ # failed to create port in NVP delete port from quantum_db
+ LOG.exception(_("An exception occured while plugging "
+ "the interface"))
+ raise
+
+ def _nvp_delete_port(self, context, port_data):
+ # FIXME(salvatore-orlando): On the NVP platform we do not really have
+ # external networks. So deleting regular ports from external networks
+ # does not make sense. However we cannot raise as this would break
+ # unit tests.
+ if self._network_is_external(context, port_data['network_id']):
+ LOG.error(_("NVP plugin does not support regular VIF ports on "
+ "external networks. Port %s will be down."),
+ port_data['network_id'])
+ return
+
+ port = nicira_db.get_nvp_port_id(context.session, port_data['id'])
+ if port is None:
+ raise q_exc.PortNotFound(port_id=port_data['id'])
+ # TODO(bgh): if this is a bridged network and the lswitch we just got
+ # back will have zero ports after the delete we should garbage collect
+ # the lswitch.
+ nvplib.delete_port(self.default_cluster,
+ port_data['network_id'],
+ port)
+ LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
+ "on network %(net_id)s"),
+ {'port_id': port_data['id'],
+ 'net_id': port_data['network_id']})
+
+ def _nvp_delete_router_port(self, context, port_data):
+ # Delete logical router port
+ lrouter_id = port_data['device_id']
+ nvp_port_id = nicira_db.get_nvp_port_id(context.session,
+ port_data['id'])
+ if not nvp_port_id:
+ raise q_exc.PortNotFound(port_id=port_data['id'])
+
+ try:
+ nvplib.delete_peer_router_lport(self.default_cluster,
+ lrouter_id,
+ port_data['network_id'],
+ nvp_port_id)
+ except (NvpApiClient.NvpApiException, NvpApiClient.ResourceNotFound):
+ # Do not raise because the issue might as well be that the
+ # router has already been deleted, so there would be nothing
+ # to do here
+ LOG.exception(_("Ignoring exception as this means the peer "
+ "for port '%s' has already been deleted."),
+ nvp_port_id)
+
+ # Delete logical switch port
+ self._nvp_delete_port(context, port_data)
+
+ def _find_router_gw_port(self, context, port_data):
+ router_id = port_data['device_id']
+ cluster = self._find_target_cluster(port_data)
+ if not router_id:
+ raise q_exc.BadRequest(_("device_id field must be populated in "
+ "order to create an external gateway "
+ "port for network %s"),
+ port_data['network_id'])
+
+ lr_port = nvplib.find_router_gw_port(context, cluster, router_id)
+ if not lr_port:
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("The gateway port for the router %s "
+ "was not found on the NVP backend")
+ % router_id))
+ return lr_port
+
+ def _nvp_create_ext_gw_port(self, context, port_data):
+ """ Driver for creating an external gateway port on NVP platform """
+ # TODO(salvatore-orlando): Handle NVP resource
+ # rollback when something goes not quite as expected
+ lr_port = self._find_router_gw_port(context, port_data)
+ ip_addresses = self._build_ip_address_list(context,
+ port_data['fixed_ips'])
+ # This operation actually always updates a NVP logical port
+ # instead of creating one. This is because the gateway port
+ # is created at the same time as the NVP logical router, otherwise
+ # the fabric status of the NVP router will be down.
+ # admin_status should always be up for the gateway port
+ # regardless of what the user specifies in quantum
+ cluster = self._find_target_cluster(port_data)
+ router_id = port_data['device_id']
+ nvplib.update_router_lport(cluster,
+ router_id,
+ lr_port['uuid'],
+ port_data['tenant_id'],
+ port_data['id'],
+ port_data['name'],
+ True,
+ ip_addresses)
+ # Set the SNAT rule for each subnet (only first IP)
+ for cidr in self._find_router_subnets_cidrs(context, router_id):
+ nvplib.create_lrouter_snat_rule(
+ cluster, router_id,
+ ip_addresses[0].split('/')[0],
+ ip_addresses[0].split('/')[0],
+ source_ip_addresses=cidr)
+
+ LOG.debug(_("_nvp_create_ext_gw_port completed on external network "
+ "%(ext_net_id)s, attached to router:%(router_id)s. "
+ "NVP port id is %(nvp_port_id)s"),
+ {'ext_net_id': port_data['network_id'],
+ 'router_id': router_id,
+ 'nvp_port_id': lr_port['uuid']})
+
+ def _nvp_delete_ext_gw_port(self, context, port_data):
+ lr_port = self._find_router_gw_port(context, port_data)
+ # TODO(salvatore-orlando): Handle NVP resource
+ # rollback when something goes not quite as expected
+ try:
+ # Delete is actually never a real delete, otherwise the NVP
+ # logical router will stop working
+ cluster = self._find_target_cluster(port_data)
+ router_id = port_data['device_id']
+ nvplib.update_router_lport(cluster,
+ router_id,
+ lr_port['uuid'],
+ port_data['tenant_id'],
+ port_data['id'],
+ port_data['name'],
+ True,
+ ['0.0.0.0/31'])
+ # Delete the SNAT rule for each subnet
+ for cidr in self._find_router_subnets_cidrs(context, router_id):
+ nvplib.delete_nat_rules_by_match(
+ cluster, router_id, "SourceNatRule",
+ max_num_expected=1, min_num_expected=1,
+ source_ip_addresses=cidr)
+
+ except NvpApiClient.ResourceNotFound:
+ raise nvp_exc.NvpPluginException(
+ err_msg=_("Logical router resource %s not found "
+ "on NVP platform") % router_id)
+ except NvpApiClient.NvpApiException:
+ raise nvp_exc.NvpPluginException(
+ err_msg=_("Unable to update logical router"
+ "on NVP Platform"))
+ LOG.debug(_("_nvp_delete_ext_gw_port completed on external network "
+ "%(ext_net_id)s, attached to router:%(router_id)s"),
+ {'ext_net_id': port_data['network_id'],
+ 'router_id': router_id})
+
+ def _nvp_create_fip_port(self, context, port_data):
+ # As we do not create ports for floating IPs in NVP,
+ # this is a no-op driver
+ pass
+
+ def _nvp_delete_fip_port(self, context, port_data):
+ # As we do not create ports for floating IPs in NVP,
+ # this is a no-op driver
+ pass
+
def _extend_fault_map(self):
""" Extends the Quantum Fault Map
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
+ def _ensure_metadata_host_route(self, context, fixed_ip_data,
+ is_delete=False):
+ subnet = self._get_subnet(context, fixed_ip_data['subnet_id'])
+ metadata_routes = [r for r in subnet.routes
+ if r['destination'] == '169.254.169.254/32']
+ if metadata_routes:
+ # We should have only a single metadata route at any time
+ # because the route logic forbids two routes with the same
+ # destination. Update next hop with the provided IP address
+ if not is_delete:
+ metadata_routes[0].nexthop = fixed_ip_data['ip_address']
+ else:
+ context.session.delete(metadata_routes[0])
+ else:
+ # add the metadata route
+ route = models_v2.Route(subnet_id=subnet.id,
+ destination='169.254.169.254/32',
+ nexthop=fixed_ip_data['ip_address'])
+ context.session.add(route)
+
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
return networks
def create_network(self, context, network):
- net_data = network['network'].copy()
+ net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
target_cluster = self._find_target_cluster(net_data)
- nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
- if nvp_binding_type in ('flat', 'vlan'):
- nvp_binding_type = 'bridge'
- lswitch = nvplib.create_lswitch(
- target_cluster, tenant_id, net_data.get('name'),
- nvp_binding_type,
- net_data.get(pnet.PHYSICAL_NETWORK),
- net_data.get(pnet.SEGMENTATION_ID))
- network['network']['id'] = lswitch['uuid']
+ external = net_data.get(l3.EXTERNAL)
+ if (not attr.is_attr_set(external) or
+ attr.is_attr_set(external) and not external):
+ nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
+ if nvp_binding_type in ('flat', 'vlan'):
+ nvp_binding_type = 'bridge'
+ lswitch = nvplib.create_lswitch(
+ target_cluster, tenant_id, net_data.get('name'),
+ nvp_binding_type,
+ net_data.get(pnet.PHYSICAL_NETWORK),
+ net_data.get(pnet.SEGMENTATION_ID))
+ net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
- self._process_network_create_port_security(context,
- network['network'])
+ # Ensure there's an id in net_data
+ net_data['id'] = new_net['id']
+ self._process_network_create_port_security(context, net_data)
+ # DB Operations for setting the network as external
+ self._process_l3_create(context, net_data, new_net['id'])
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
self._extend_network_dict_provider(context, new_net,
net_binding)
self._extend_network_port_security_dict(context, new_net)
+ self._extend_network_dict_l3(context, new_net)
return new_net
def delete_network(self, context, id):
- super(NvpPluginV2, self).delete_network(context, id)
+ """
+ Deletes the network with the specified network identifier
+ belonging to the specified tenant.
- # FIXME(salvatore-orlando): Failures here might lead NVP
- # and quantum state to diverge
- pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
- for (cluster, switches) in pairs:
- nvplib.delete_networks(cluster, id, switches)
+ :returns: None
+ :raises: exception.NetworkInUse
+ :raises: exception.NetworkNotFound
+ """
+ external = self._network_is_external(context, id)
+ # Before deleting ports, ensure the peer of a NVP logical
+ # port with a patch attachment is removed too
+ port_filter = {'network_id': [id],
+ 'device_owner': ['network:router_interface']}
+ router_iface_ports = self.get_ports(context, filters=port_filter)
+ for port in router_iface_ports:
+ nvp_port_id = nicira_db.get_nvp_port_id(context.session,
+ port['id'])
+ if nvp_port_id:
+ port['nvp_port_id'] = nvp_port_id
+ else:
+ LOG.warning(_("A nvp lport identifier was not found for "
+ "quantum port '%s'"), port['id'])
+
+ super(NvpPluginV2, self).delete_network(context, id)
+ # clean up network owned ports
+ for port in router_iface_ports:
+ try:
+ if 'nvp_port_id' in port:
+ nvplib.delete_peer_router_lport(self.default_cluster,
+ port['device_id'],
+ port['network_id'],
+ port['nvp_port_id'])
+ except (TypeError, KeyError,
+ NvpApiClient.NvpApiException,
+ NvpApiClient.ResourceNotFound):
+ # Do not raise because the issue might as well be that the
+ # router has already been deleted, so there would be nothing
+ # to do here
+ LOG.warning(_("Ignoring exception as this means the peer for "
+ "port '%s' has already been deleted."),
+ nvp_port_id)
+
+ # Do not go to NVP for external networks
+ if not external:
+ # FIXME(salvatore-orlando): Failures here might lead NVP
+ # and quantum state to diverge
+ pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
+ for (cluster, switches) in pairs:
+ nvplib.delete_networks(cluster, id, switches)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
return pairs
def get_network(self, context, id, fields=None):
+ """
+ Retrieves all attributes of the network, NOT including
+ the ports of that network.
+
+ :returns: a sequence of mappings with the following signature:
+ {'id': UUID representing the network.
+ 'name': Human-readable name identifying the network.
+ 'tenant_id': Owner of network. only admin user
+ can specify a tenant_id other than its own.
+ 'admin_state_up': Sets admin state of network. if down,
+ network does not forward packets.
+ 'status': Indicates whether network is currently
+ operational (limit values to "ACTIVE", "DOWN",
+ "BUILD", and "ERROR"?
+ 'subnets': Subnets associated with this network. Plan
+ to allow fully specified subnets as part of
+ network create.
+ }
+
+ :raises: exception.NetworkNotFound
+ :raises: exception.QuantumException
+ """
with context.session.begin(subtransactions=True):
+ # goto to the plugin DB and fetch the network
network = self._get_network(context, id)
+ # if the network is external, do not go to NVP
+ if not self._network_is_external(context, id):
+ # verify the fabric status of the corresponding
+ # logical switch(es) in nvp
+ try:
+ # FIXME(salvatore-orlando): This is not going to work
+ # unless we store the nova_id in the database once we'll
+ # enable multiple clusters
+ cluster = self._find_target_cluster(network)
+ lswitches = nvplib.get_lswitches(cluster, id)
+ nvp_net_status = constants.NET_STATUS_ACTIVE
+ quantum_status = network.status
+ for lswitch in lswitches:
+ relations = lswitch.get('_relations')
+ if relations:
+ lswitch_status = relations.get(
+ 'LogicalSwitchStatus')
+ # FIXME(salvatore-orlando): Being unable to fetch
+ # logical switch status should be an exception.
+ if (lswitch_status and
+ not lswitch_status.get('fabric_status',
+ None)):
+ nvp_net_status = constants.NET_STATUS_DOWN
+ break
+ LOG.debug(_("Current network status:%(nvp_net_status)s; "
+ "Status in Quantum DB:%(quantum_status)s"),
+ locals())
+ if nvp_net_status != network.status:
+ # update the network status
+ network.status = nvp_net_status
+ except Exception:
+ err_msg = _("Unable to get logical switches")
+ LOG.exception(err_msg)
+ raise nvp_exc.NvpPluginException(err_msg=err_msg)
+ # Don't do field selection here otherwise we won't be able
+ # to add provider networks fields
net_result = self._make_network_dict(network, None)
self._extend_network_dict_provider(context, net_result)
self._extend_network_port_security_dict(context, net_result)
-
- # verify the fabric status of the corresponding
- # logical switch(es) in nvp
- try:
- # FIXME(salvatore-orlando): This is not going to work unless
- # nova_id is stored in db once multiple clusters are enabled
- cluster = self._find_target_cluster(network)
- # Returns multiple lswitches if provider network.
- lswitches = nvplib.get_lswitches(cluster, id)
- for lswitch in lswitches:
- lswitch_status = (lswitch['_relations']['LogicalSwitchStatus']
- ['fabric_status'])
- if not lswitch_status:
- net_result['status'] = constants.NET_STATUS_DOWN
- break
- else:
- net_result['status'] = constants.NET_STATUS_ACTIVE
- except Exception:
- err_msg = _("Unable to get lswitches")
- LOG.exception(err_msg)
- raise nvp_exc.NvpPluginException(err_msg=err_msg)
-
- # Don't do field selection here otherwise we won't be able
- # to add provider networks fields
+ self._extend_network_dict_l3(context, net_result)
return self._fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None):
- nvp_lswitches = []
+ nvp_lswitches = {}
+ filters = filters or {}
with context.session.begin(subtransactions=True):
quantum_lswitches = (
super(NvpPluginV2, self).get_networks(context, filters))
for net in quantum_lswitches:
self._extend_network_dict_provider(context, net)
self._extend_network_port_security_dict(context, net)
-
- if context.is_admin and not filters.get("tenant_id"):
+ self._extend_network_dict_l3(context, net)
+ quantum_lswitches = self._filter_nets_l3(context,
+ quantum_lswitches,
+ filters)
+ tenant_ids = filters and filters.get('tenant_id') or None
+ filter_fmt = "&tag=%s&tag_scope=os_tid"
+ if context.is_admin and not tenant_ids:
tenant_filter = ""
- elif filters.get("tenant_id"):
- tenant_filter = ""
- for tenant in filters.get("tenant_id"):
- tenant_filter += "&tag=%s&tag_scope=os_tid" % tenant
else:
- tenant_filter = "&tag=%s&tag_scope=os_tid" % context.tenant_id
+ tenant_ids = tenant_ids or [context.tenant_id]
+ tenant_filter = ''.join(filter_fmt % tid for tid in tenant_ids)
lswitch_filters = "uuid,display_name,fabric_status,tags"
lswitch_url_path = (
for c in self.clusters.itervalues():
res = nvplib.get_all_query_pages(
lswitch_url_path, c)
-
- nvp_lswitches.extend(res)
+ nvp_lswitches.update(dict(
+ (ls['uuid'], ls) for ls in res))
except Exception:
err_msg = _("Unable to get logical switches")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
- # TODO (Aaron) This can be optimized
- if filters.get("id"):
- filtered_lswitches = []
- for nvp_lswitch in nvp_lswitches:
- for id in filters.get("id"):
- if id == nvp_lswitch['uuid']:
- filtered_lswitches.append(nvp_lswitch)
- nvp_lswitches = filtered_lswitches
+ if filters.get('id'):
+ nvp_lswitches = dict(
+ (uuid, ls) for (uuid, ls) in nvp_lswitches.iteritems()
+ if uuid in set(filters['id']))
for quantum_lswitch in quantum_lswitches:
- for nvp_lswitch in nvp_lswitches:
- # TODO(salvatore-orlando): watch out for "extended" lswitches
- if nvp_lswitch['uuid'] == quantum_lswitch["id"]:
- if (nvp_lswitch["_relations"]["LogicalSwitchStatus"]
- ["fabric_status"]):
- quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
- else:
- quantum_lswitch["status"] = constants.NET_STATUS_DOWN
- quantum_lswitch["name"] = nvp_lswitch["display_name"]
- nvp_lswitches.remove(nvp_lswitch)
- break
- else:
+ # Skip external networks as they do not exist in NVP
+ if quantum_lswitch[l3.EXTERNAL]:
+ continue
+ elif quantum_lswitch['id'] not in nvp_lswitches:
raise nvp_exc.NvpOutOfSyncException()
+ # TODO(salvatore-orlando): be careful about "extended"
+ # logical switches
+ ls = nvp_lswitches.pop(quantum_lswitch['id'])
+ if (ls["_relations"]["LogicalSwitchStatus"]["fabric_status"]):
+ quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
+ else:
+ quantum_lswitch["status"] = constants.NET_STATUS_DOWN
+
# do not make the case in which switches are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lswitches):
"networks are not "
"supported."))
with context.session.begin(subtransactions=True):
- quantum_db = super(NvpPluginV2, self).update_network(
- context, id, network)
+ net = super(NvpPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._update_network_security_binding(
context, id, network['network'][psec.PORTSECURITY])
- self._extend_network_port_security_dict(
- context, quantum_db)
- return quantum_db
+ self._extend_network_port_security_dict(context, net)
+ self._process_l3_update(context, network['network'], id)
+ self._extend_network_dict_provider(context, net)
+ self._extend_network_dict_l3(context, net)
+ return net
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
for quantum_lport in quantum_lports:
self._extend_port_port_security_dict(context, quantum_lport)
self._extend_port_dict_security_group(context, quantum_lport)
+ if (filters.get('network_id') and len(filters.get('network_id')) and
+ self._network_is_external(context, filters['network_id'][0])):
+ # Do not perform check on NVP platform
+ return quantum_lports
vm_filter = ""
tenant_filter = ""
lports = []
for quantum_lport in quantum_lports:
+ # if a quantum port is not found in NVP, this migth be because
+ # such port is not mapped to a logical switch - ie: floating ip
+ if quantum_lport['device_owner'] in (l3_db.DEVICE_OWNER_FLOATINGIP,
+ l3_db.DEVICE_OWNER_ROUTER_GW):
+ lports.append(quantum_lport)
+ continue
try:
quantum_lport["admin_state_up"] = (
nvp_lports[quantum_lport["id"]]["admin_status_enabled"])
- quantum_lport["name"] = (
- nvp_lports[quantum_lport["id"]]["display_name"])
-
if (nvp_lports[quantum_lport["id"]]
["_relations"]
["LogicalPortStatus"]
self.port_security_enabled_create)
port_data = port['port']
with context.session.begin(subtransactions=True):
+ # Set admin_state_up False since not created in NVP set
+ # TODO(salvatore-orlando) : verify whether subtransactions can help
+ # us avoiding multiple operations on the db. This might also allow
+ # us to use the same identifier for the NVP and the Quantum port
+ # Set admin_state_up False since not created in NVP yet
+ requested_admin_state = port["port"]["admin_state_up"]
+ port["port"]["admin_state_up"] = False
+
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
+ # If we have just created a dhcp port, and metadata request are
+ # forwarded there, we need to verify the appropriate host route is
+ # in place
+ if (cfg.CONF.metadata_dhcp_host_route and
+ (quantum_db.get('device_owner') ==
+ constants.DEVICE_OWNER_DHCP)):
+ if (quantum_db.get('fixed_ips') and
+ len(quantum_db.get('fixed_ips'))):
+ self._ensure_metadata_host_route(
+ context, quantum_db.get('fixed_ips')[0])
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
-
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
context, quantum_db['id'], port_data[ext_sg.SECURITYGROUPS])
# provider networking extension checks
# Fetch the network and network binding from Quantum db
- network = self._get_network(context, port_data['network_id'])
- network_binding = nicira_db.get_network_binding(
- context.session, port_data['network_id'])
- max_ports = self.nvp_opts.max_lp_per_overlay_ls
- allow_extra_lswitches = False
- if (network_binding and
- network_binding.binding_type in (NetworkTypes.FLAT,
- NetworkTypes.VLAN)):
- max_ports = self.nvp_opts.max_lp_per_bridged_ls
- allow_extra_lswitches = True
try:
- q_net_id = port_data['network_id']
- cluster = self._find_target_cluster(port_data)
- selected_lswitch = self._handle_lswitch_selection(
- cluster, network, network_binding, max_ports,
- allow_extra_lswitches)
- lswitch_uuid = selected_lswitch['uuid']
- lport = nvplib.create_lport(cluster,
- lswitch_uuid,
- port_data['tenant_id'],
- port_data['id'],
- port_data['name'],
- port_data['device_id'],
- port_data['admin_state_up'],
- port_data['mac_address'],
- port_data['fixed_ips'],
- port_data[psec.PORTSECURITY],
- port_data[ext_sg.SECURITYGROUPS])
- # Get NVP ls uuid for quantum network
- nvplib.plug_interface(cluster, selected_lswitch['uuid'],
- lport['uuid'], "VifAttachment",
- port_data['id'])
- except nvp_exc.NvpNoMorePortsException as e:
- LOG.error(_("Number of available ports for network %s "
- "exhausted"), port_data['network_id'])
- raise e
- except Exception:
- # failed to create port in NVP delete port from quantum_db
+ port_data = port['port'].copy()
+ port_data['admin_state_up'] = requested_admin_state
+ port_create_func = self._port_drivers['create'].get(
+ port_data['device_owner'],
+ self._port_drivers['create']['default'])
+
+ port_create_func(context, port_data)
+ except Exception as e:
+ # failed to create port in NVP - Delete port from quantum_db
# FIXME (arosen) or the plugin_interface call failed in which
# case we need to garbage collect the left over port in nvp.
- err_msg = _("An exception occured while plugging the interface"
- " in NVP for port %s") % port_data['id']
+ err_msg = _("An exception occured while plugging the "
+ "interface in NVP for port %s") % port_data['id']
LOG.exception(err_msg)
- raise nvp_exc.NvpPluginException(err_desc=err_msg)
+ try:
+ super(NvpPluginV2, self).delete_port(context,
+ port['port']['id'])
+ except q_exc.PortNotFound:
+ LOG.warning(_("The delete port operation failed for %s. "
+ "This means the port was already deleted"),
+ port['port']['id'])
+ raise e
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
def update_port(self, context, id, port):
self._enforce_set_auth(context, port,
self.port_security_enabled_update)
- tenant_id = self._get_tenant_id_for_create(context, port)
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
+
with context.session.begin(subtransactions=True):
ret_port = super(NvpPluginV2, self).update_port(
context, id, port)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
+ # TODO(salvatore-orlando): We might need transaction management
+ # but the change for metadata support should not be too disruptive
+ fixed_ip_data = port['port'].get('fixed_ips')
+ if (cfg.CONF.metadata_dhcp_host_route and
+ ret_port.get('device_owner') == constants.DEVICE_OWNER_DHCP
+ and fixed_ip_data):
+ self._ensure_metadata_host_route(context,
+ fixed_ip_data[0],
+ is_delete=True)
+
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id, ret_port[psec.PORTSECURITY])
self._extend_port_port_security_dict(context, ret_port)
self._extend_port_dict_security_group(context, ret_port)
- port_nvp, cluster = (
- nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
- ret_port["network_id"], id))
LOG.debug(_("Update port request: %s"), port)
- nvplib.update_port(cluster, ret_port['network_id'],
- port_nvp['uuid'], id, tenant_id,
+ nvp_port_id = nicira_db.get_nvp_port_id(context.session, id)
+ nvplib.update_port(self.default_cluster,
+ ret_port['network_id'],
+ nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
# the status.
try:
ret_port['status'] = nvplib.get_port_status(
- cluster, ret_port['network_id'], port_nvp['uuid'])
+ self.default_cluster, ret_port['network_id'], nvp_port_id)
except:
- LOG.warn(_("Unable to retrieve port status for: %s."),
- port_nvp['uuid'])
+ LOG.warn(_("Unable to retrieve port status for:%s."), nvp_port_id)
return ret_port
- def delete_port(self, context, id):
- # TODO(salvatore-orlando): pass only actual cluster
- port, cluster = nvplib.get_port_by_quantum_tag(
- self.clusters.itervalues(), '*', id)
- if port is None:
- raise q_exc.PortNotFound(port_id=id)
- # TODO(bgh): if this is a bridged network and the lswitch we just got
- # back will have zero ports after the delete we should garbage collect
- # the lswitch.
- nvplib.delete_port(cluster, port)
-
- LOG.debug(_("delete_port() completed for tenant: %s"),
- context.tenant_id)
- return super(NvpPluginV2, self).delete_port(context, id)
+ def delete_port(self, context, id, l3_port_check=True):
+ # if needed, check to see if this is a port owned by
+ # a l3 router. If so, we should prevent deletion here
+ if l3_port_check:
+ self.prevent_l3_port_deletion(context, id)
+ quantum_db_port = self._get_port(context, id)
+ port_delete_func = self._port_drivers['delete'].get(
+ quantum_db_port.device_owner,
+ self._port_drivers['delete']['default'])
+
+ port_delete_func(context, quantum_db_port)
+ self.disassociate_floatingips(context, id)
+ with context.session.begin(subtransactions=True):
+ if (cfg.CONF.metadata_dhcp_host_route and
+ quantum_db_port.device_owner == constants.DEVICE_OWNER_DHCP):
+ self._ensure_metadata_host_route(
+ context, quantum_db_port.fixed_ips[0], is_delete=True)
+ super(NvpPluginV2, self).delete_port(context, id)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
- quantum_db = super(NvpPluginV2, self).get_port(context, id, fields)
- self._extend_port_port_security_dict(context, quantum_db)
- self._extend_port_dict_security_group(context, quantum_db)
-
- #TODO: pass only the appropriate cluster here
- #Look for port in all lswitches
- port, cluster = (
- nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
- "*", id))
-
- quantum_db["admin_state_up"] = port["admin_status_enabled"]
- if port["_relations"]["LogicalPortStatus"]["fabric_status_up"]:
- quantum_db["status"] = constants.PORT_STATUS_ACTIVE
+ quantum_db_port = super(NvpPluginV2, self).get_port(context,
+ id, fields)
+ self._extend_port_port_security_dict(context, quantum_db_port)
+ self._extend_port_dict_security_group(context, quantum_db_port)
+
+ if self._network_is_external(context,
+ quantum_db_port['network_id']):
+ return quantum_db_port
+
+ nvp_id = nicira_db.get_nvp_port_id(context.session, id)
+ #TODO: pass the appropriate cluster here
+ port = nvplib.get_logical_port_status(
+ self.default_cluster, quantum_db_port['network_id'], nvp_id)
+ quantum_db_port["admin_state_up"] = port["admin_status_enabled"]
+ if port["fabric_status_up"]:
+ quantum_db_port["status"] = constants.PORT_STATUS_ACTIVE
+ else:
+ quantum_db_port["status"] = constants.PORT_STATUS_DOWN
+ return quantum_db_port
+
+ def create_router(self, context, router):
+ # NOTE(salvatore-orlando): We completely override this method in
+ # order to be able to use the NVP ID as Quantum ID
+ # TODO(salvatore-orlando): Propose upstream patch for allowing
+ # 3rd parties to specify IDs as we do with l2 plugin
+ r = router['router']
+ has_gw_info = False
+ tenant_id = self._get_tenant_id_for_create(context, r)
+ # default value to set - nvp wants it (even if we don't have it)
+ nexthop = '1.1.1.1'
+ try:
+ # if external gateway info are set, then configure nexthop to
+ # default external gateway
+ if 'external_gateway_info' in r and r.get('external_gateway_info'):
+ has_gw_info = True
+ gw_info = r['external_gateway_info']
+ del r['external_gateway_info']
+ # The following DB read will be performed again when updating
+ # gateway info. This is not great, but still better than
+ # creating NVP router here and updating it later
+ network_id = (gw_info.get('network_id', None) if gw_info
+ else None)
+ if network_id:
+ ext_net = self._get_network(context, network_id)
+ if not self._network_is_external(context, network_id):
+ msg = (_("Network '%s' is not a valid external "
+ "network") % network_id)
+ raise q_exc.BadRequest(resource='router', msg=msg)
+ if len(ext_net.subnets):
+ ext_subnet = ext_net.subnets[0]
+ nexthop = ext_subnet.gateway_ip
+ cluster = self._find_target_cluster(router)
+ lrouter = nvplib.create_lrouter(cluster, tenant_id,
+ router['router']['name'],
+ nexthop)
+ # Use NVP identfier for Quantum resource
+ router['router']['id'] = lrouter['uuid']
+ except NvpApiClient.NvpApiException:
+ raise nvp_exc.NvpPluginException(
+ err_msg=_("Unable to create logical router on NVP Platform"))
+ # Create the port here - and update it later if we have gw_info
+ self._create_and_attach_router_port(cluster,
+ context,
+ lrouter['uuid'],
+ {'fake_ext_gw': True},
+ "L3GatewayAttachment",
+ cluster.default_l3_gw_service_uuid)
+
+ with context.session.begin(subtransactions=True):
+ router_db = l3_db.Router(id=lrouter['uuid'],
+ tenant_id=tenant_id,
+ name=r['name'],
+ admin_state_up=r['admin_state_up'],
+ status="ACTIVE")
+ context.session.add(router_db)
+ if has_gw_info:
+ self._update_router_gw_info(context, router_db['id'], gw_info)
+ return self._make_router_dict(router_db)
+
+ def update_router(self, context, id, router):
+ try:
+ # Either nexthop is updated or should be kept as it was before
+ r = router['router']
+ nexthop = None
+ if 'external_gateway_info' in r and r.get('external_gateway_info'):
+ gw_info = r['external_gateway_info']
+ # The following DB read will be performed again when updating
+ # gateway info. This is not great, but still better than
+ # creating NVP router here and updating it later
+ network_id = (gw_info.get('network_id', None) if gw_info
+ else None)
+ if network_id:
+ ext_net = self._get_network(context, network_id)
+ if not self._network_is_external(context, network_id):
+ msg = (_("Network '%s' is not a valid external "
+ "network") % network_id)
+ raise q_exc.BadRequest(resource='router', msg=msg)
+ if len(ext_net.subnets):
+ ext_subnet = ext_net.subnets[0]
+ nexthop = ext_subnet.gateway_ip
+ cluster = self._find_target_cluster(router)
+ nvplib.update_lrouter(cluster, id,
+ router['router'].get('name'), nexthop)
+ except NvpApiClient.ResourceNotFound:
+ raise nvp_exc.NvpPluginException(
+ err_msg=_("Logical router %s not found on NVP Platform") % id)
+ except NvpApiClient.NvpApiException:
+ raise nvp_exc.NvpPluginException(
+ err_msg=_("Unable to update logical router on NVP Platform"))
+ return super(NvpPluginV2, self).update_router(context, id, router)
+
+ def delete_router(self, context, id):
+ with context.session.begin(subtransactions=True):
+ super(NvpPluginV2, self).delete_router(context, id)
+ # If removal is successful in Quantum it should be so on
+ # the NVP platform too - otherwise the transaction should
+ # be automatically aborted
+ # TODO(salvatore-orlando): Extend the object models in order to
+ # allow an extra field for storing the cluster information
+ # together with the resource
+ try:
+ nvplib.delete_lrouter(self.default_cluster, id)
+ except NvpApiClient.ResourceNotFound:
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("Logical router '%s' not found "
+ "on NVP Platform") % id))
+ except NvpApiClient.NvpApiException:
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("Unable to update logical router"
+ "on NVP Platform")))
+
+ def get_router(self, context, id, fields=None):
+ router = self._get_router(context, id)
+ try:
+ # FIXME(salvatore-orlando): We need to
+ # find the appropriate cluster!
+ cluster = self.default_cluster
+ lrouter = nvplib.get_lrouter(cluster, id)
+ router_op_status = constants.NET_STATUS_DOWN
+ relations = lrouter.get('_relations')
+ if relations:
+ lrouter_status = relations.get('LogicalRouterStatus')
+ # FIXME(salvatore-orlando): Being unable to fetch the
+ # logical router status should be an exception.
+ if lrouter_status:
+ router_op_status = (lrouter_status.get('fabric_status')
+ and constants.NET_STATUS_ACTIVE or
+ constants.NET_STATUS_DOWN)
+ LOG.debug(_("Current router status:%(router_status)s;"
+ "Status in Quantum DB:%(db_router_status)s"),
+ {'router_status': router_op_status,
+ 'db_router_status': router.status})
+ if router_op_status != router.status:
+ # update the network status
+ with context.session.begin(subtransactions=True):
+ router.status = router_op_status
+ except NvpApiClient.NvpApiException:
+ err_msg = _("Unable to get logical router")
+ LOG.exception(err_msg)
+ raise nvp_exc.NvpPluginException(err_msg=err_msg)
+ return self._make_router_dict(router, fields)
+
+ def get_routers(self, context, filters=None, fields=None):
+ router_query = self._apply_filters_to_query(
+ self._model_query(context, l3_db.Router),
+ l3_db.Router, filters)
+ routers = router_query.all()
+ # Query routers on NVP for updating operational status
+ if context.is_admin and not filters.get("tenant_id"):
+ tenant_id = None
+ elif 'tenant_id' in filters:
+ tenant_id = filters.get('tenant_id')[0]
+ del filters['tenant_id']
+ else:
+ tenant_id = context.tenant_id
+ try:
+ nvp_lrouters = nvplib.get_lrouters(self.default_cluster,
+ tenant_id,
+ fields)
+ except NvpApiClient.NvpApiException:
+ err_msg = _("Unable to get logical routers from NVP controller")
+ LOG.exception(err_msg)
+ raise nvp_exc.NvpPluginException(err_msg=err_msg)
+
+ nvp_lrouters_dict = {}
+ for nvp_lrouter in nvp_lrouters:
+ nvp_lrouters_dict[nvp_lrouter['uuid']] = nvp_lrouter
+ for router in routers:
+ nvp_lrouter = nvp_lrouters_dict.get(router['id'])
+ if nvp_lrouter:
+ if (nvp_lrouter["_relations"]["LogicalRouterStatus"]
+ ["fabric_status"]):
+ router.status = constants.NET_STATUS_ACTIVE
+ else:
+ router.status = constants.NET_STATUS_DOWN
+ nvp_lrouters.remove(nvp_lrouter)
+
+ # do not make the case in which routers are found in NVP
+ # but not in Quantum catastrophic.
+ if len(nvp_lrouters):
+ LOG.warning(_("Found %s logical routers not bound "
+ "to Quantum routers. Quantum and NVP are "
+ "potentially out of sync"), len(nvp_lrouters))
+
+ return [self._make_router_dict(router, fields)
+ for router in routers]
+
+ def add_router_interface(self, context, router_id, interface_info):
+ router_iface_info = super(NvpPluginV2, self).add_router_interface(
+ context, router_id, interface_info)
+ # If the above operation succeded interface_info contains a reference
+ # to a logical switch port
+ port_id = router_iface_info['port_id']
+ subnet_id = router_iface_info['subnet_id']
+ # Add port to the logical router as well
+ # TODO(salvatore-orlando): Identify the appropriate cluster, instead
+ # of always defaulting to self.default_cluster
+ cluster = self.default_cluster
+ # The owner of the router port is always the same as the owner of the
+ # router. Use tenant_id from the port instead of fetching more records
+ # from the Quantum database
+ port = self._get_port(context, port_id)
+ # Find the NVP port corresponding to quantum port_id
+ results = nvplib.query_lswitch_lports(
+ cluster, '*',
+ filters={'tag': port_id, 'tag_scope': 'q_port_id'})
+ if len(results):
+ ls_port = results[0]
else:
- quantum_db["status"] = constants.PORT_STATUS_DOWN
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("The port %(port_id)s, connected to the router "
+ "%(router_id)s was not found on the NVP backend.")
+ % locals()))
+
+ # Create logical router port and patch attachment
+ self._create_and_attach_router_port(
+ cluster, context, router_id, port,
+ "PatchAttachment", ls_port['uuid'],
+ subnet_ids=[subnet_id])
+
+ # If there is an external gateway we need to configure the SNAT rule.
+ # Fetch router from DB
+ router = self._get_router(context, router_id)
+ gw_port = router.gw_port
+ if gw_port:
+ # There is a change gw_port might have multiple IPs
+ # In that case we will consider only the first one
+ if gw_port.get('fixed_ips'):
+ snat_ip = gw_port['fixed_ips'][0]['ip_address']
+ subnet = self._get_subnet(context, subnet_id)
+ nvplib.create_lrouter_snat_rule(
+ cluster, router_id, snat_ip, snat_ip,
+ source_ip_addresses=subnet['cidr'])
+
+ LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
+ "and router:%(router_id)s"),
+ {'subnet_id': subnet_id, 'router_id': router_id})
+ return router_iface_info
+
+ def remove_router_interface(self, context, router_id, interface_info):
+ # TODO(salvatore-orlando): Usual thing about cluster selection
+ cluster = self.default_cluster
+ # The code below is duplicated from base class, but comes handy
+ # as we need to retrieve the router port id before removing the port
+ subnet = None
+ subnet_id = None
+ if 'port_id' in interface_info:
+ port_id = interface_info['port_id']
+ # find subnet_id - it is need for removing the SNAT rule
+ port = self._get_port(context, port_id)
+ if port.get('fixed_ips'):
+ subnet_id = port['fixed_ips'][0]['subnet_id']
+ elif 'subnet_id' in interface_info:
+ subnet_id = interface_info['subnet_id']
+ subnet = self._get_subnet(context, subnet_id)
+ rport_qry = context.session.query(models_v2.Port)
+ ports = rport_qry.filter_by(
+ device_id=router_id,
+ device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
+ network_id=subnet['network_id']).all()
+ for p in ports:
+ if p['fixed_ips'][0]['subnet_id'] == subnet_id:
+ port_id = p['id']
+ break
+ results = nvplib.query_lswitch_lports(
+ cluster, '*', relations="LogicalPortAttachment",
+ filters={'tag': port_id, 'tag_scope': 'q_port_id'})
+ if len(results):
+ lport = results[0]
+ attachment_data = lport['_relations'].get('LogicalPortAttachment')
+ lrouter_port_id = (attachment_data and
+ attachment_data.get('peer_port_uuid'))
+ else:
+ LOG.warning(_("The port %(port_id)s, connected to the router "
+ "%(router_id)s was not found on the NVP backend"),
+ locals())
+ # Finally remove the data from the Quantum DB
+ # This will also destroy the port on the logical switch
+ super(NvpPluginV2, self).remove_router_interface(context,
+ router_id,
+ interface_info)
+ # Destroy router port (no need to unplug the attachment)
+ # FIXME(salvatore-orlando): In case of failures in the Quantum plugin
+ # this migth leave a dangling port. We perform the operation here
+ # to leverage validation performed in the base class
+ if not lrouter_port_id:
+ LOG.warning(_("Unable to find NVP logical router port for "
+ "Quantum port id:%(q_port_id)s (NVP id: "
+ "%(nvp_port_id)s). Was this port "
+ "ever paired with a logical router?"),
+ {'q_port_id': port_id,
+ 'nvp_port_id': lport['uuid']})
+ return
+ try:
+ if not subnet:
+ subnet = self._get_subnet(context, subnet_id)
+ router = self._get_router(context, router_id)
+ # Remove SNAT rule if external gateway is configured
+ if router.gw_port:
+ nvplib.delete_nat_rules_by_match(
+ cluster, router_id, "SourceNatRule",
+ max_num_expected=1, min_num_expected=1,
+ source_ip_addresses=subnet['cidr'])
+ nvplib.delete_router_lport(cluster, router_id, lrouter_port_id)
+ except NvpApiClient.ResourceNotFound:
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("Logical router port resource %s not found "
+ "on NVP platform"), lrouter_port_id))
+ except NvpApiClient.NvpApiException:
+ raise nvp_exc.NvpPluginException(
+ err_msg=(_("Unable to update logical router"
+ "on NVP Platform")))
+
+ def _retrieve_and_delete_nat_rules(self, floating_ip_address,
+ internal_ip, router_id,
+ min_num_rules_expected=0):
+ #TODO(salvatore-orlando): Multiple cluster support
+ cluster = self.default_cluster
+ try:
+ nvplib.delete_nat_rules_by_match(
+ cluster, router_id, "DestinationNatRule",
+ max_num_expected=1,
+ min_num_expected=min_num_rules_expected,
+ destination_ip_addresses=floating_ip_address)
+
+ # Remove SNAT rule associated with the single fixed_ip
+ # to floating ip
+ nvplib.delete_nat_rules_by_match(
+ cluster, router_id, "SourceNatRule",
+ max_num_expected=1,
+ min_num_expected=min_num_rules_expected,
+ source_ip_addresses=internal_ip)
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occurred while removing NAT rules "
+ "on the NVP platform for floating ip:%s"),
+ floating_ip_address)
+ raise
+ except nvp_exc.NvpNatRuleMismatch:
+ # Do not surface to the user
+ LOG.warning(_("An incorrect number of matching NAT rules "
+ "was found on the NVP platform"))
+
+ def _remove_floatingip_address(self, context, fip_db):
+ # Remove floating IP address from logical router port
+ # Fetch logical port of router's external gateway
+ router_id = fip_db.router_id
+ nvp_gw_port_id = nvplib.find_router_gw_port(
+ context, self.default_cluster, router_id)['uuid']
+ ext_quantum_port_db = self._get_port(context.elevated(),
+ fip_db.floating_port_id)
+ nvp_floating_ips = self._build_ip_address_list(
+ context.elevated(), ext_quantum_port_db['fixed_ips'])
+ nvplib.update_lrouter_port_ips(self.default_cluster,
+ router_id,
+ nvp_gw_port_id,
+ ips_to_add=[],
+ ips_to_remove=nvp_floating_ips)
+
+ def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
+ """ Update floating IP association data.
+
+ Overrides method from base class.
+ The method is augmented for creating NAT rules in the process.
- LOG.debug(_("Port details for tenant %(tenant_id)s: %(quantum_db)s"),
- {'tenant_id': context.tenant_id, 'quantum_db': quantum_db})
- return quantum_db
+ """
+ if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
+ not ('port_id' in fip and fip['port_id'])):
+ msg = _("fixed_ip_address cannot be specified without a port_id")
+ raise q_exc.BadRequest(resource='floatingip', msg=msg)
+ port_id = internal_ip = router_id = None
+ if 'port_id' in fip and fip['port_id']:
+ port_qry = context.session.query(l3_db.FloatingIP)
+ try:
+ port_qry.filter_by(fixed_port_id=fip['port_id']).one()
+ raise l3.FloatingIPPortAlreadyAssociated(
+ port_id=fip['port_id'])
+ except sa_exc.NoResultFound:
+ pass
+ port_id, internal_ip, router_id = self.get_assoc_data(
+ context,
+ fip,
+ floatingip_db['floating_network_id'])
+
+ cluster = self._find_target_cluster(fip)
+ floating_ip = floatingip_db['floating_ip_address']
+ # Retrieve and delete existing NAT rules, if any
+ if not router_id and floatingip_db.get('fixed_port_id'):
+ # This happens if we're disassociating. Need to explicitly
+ # find the router serving this floating IP
+ tmp_fip = fip.copy()
+ tmp_fip['port_id'] = floatingip_db['fixed_port_id']
+ _pid, internal_ip, router_id = self.get_assoc_data(
+ context, tmp_fip, floatingip_db['floating_network_id'])
+ # If there's no association router_id will be None
+ if router_id:
+ self._retrieve_and_delete_nat_rules(floating_ip,
+ internal_ip,
+ router_id)
+ # Fetch logical port of router's external gateway
+ nvp_gw_port_id = nvplib.find_router_gw_port(
+ context, self.default_cluster, router_id)['uuid']
+ nvp_floating_ips = self._build_ip_address_list(
+ context.elevated(), external_port['fixed_ips'])
+ LOG.debug(_("Address list for NVP logical router "
+ "port:%s"), nvp_floating_ips)
+ # Re-create NAT rules only if a port id is specified
+ if 'port_id' in fip and fip['port_id']:
+ try:
+ # Create new NAT rules
+ nvplib.create_lrouter_dnat_rule(
+ cluster, router_id, internal_ip, internal_ip,
+ destination_ip_addresses=floating_ip)
+ # setup snat rule such that src ip of a IP packet when
+ # using floating is the floating ip itself.
+ nvplib.create_lrouter_snat_rule(
+ cluster, router_id, floating_ip, floating_ip,
+ source_ip_addresses=internal_ip)
+ # Add Floating IP address to router_port
+ nvplib.update_lrouter_port_ips(cluster,
+ router_id,
+ nvp_gw_port_id,
+ ips_to_add=nvp_floating_ips,
+ ips_to_remove=[])
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occurred while creating NAT "
+ "rules on the NVP platform for floating "
+ "ip:%(floating_ip)s mapped to "
+ "internal ip:%(internal_ip)s"),
+ {'floating_ip': floating_ip,
+ 'internal_ip': internal_ip})
+ raise nvp_exc.NvpPluginException(err_msg=msg)
+ elif floatingip_db['fixed_port_id']:
+ # This is a disassociation.
+ # Remove floating IP address from logical router port
+ nvplib.update_lrouter_port_ips(cluster,
+ router_id,
+ nvp_gw_port_id,
+ ips_to_add=[],
+ ips_to_remove=nvp_floating_ips)
+
+ floatingip_db.update({'fixed_ip_address': internal_ip,
+ 'fixed_port_id': port_id,
+ 'router_id': router_id})
+
+ def delete_floatingip(self, context, id):
+ fip_db = self._get_floatingip(context, id)
+ # Check whether the floating ip is associated or not
+ if fip_db.fixed_port_id:
+ self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
+ fip_db.fixed_ip_address,
+ fip_db.router_id,
+ min_num_rules_expected=1)
+ # Remove floating IP address from logical router port
+ self._remove_floatingip_address(context, fip_db)
+ return super(NvpPluginV2, self).delete_floatingip(context, id)
+
+ def disassociate_floatingips(self, context, port_id):
+ try:
+ fip_qry = context.session.query(l3_db.FloatingIP)
+ fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
+ self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
+ fip_db.fixed_ip_address,
+ fip_db.router_id,
+ min_num_rules_expected=1)
+ self._remove_floatingip_address(context, fip_db)
+ except sa_exc.NoResultFound:
+ LOG.debug(_("The port '%s' is not associated with floating IPs"),
+ port_id)
+ super(NvpPluginV2, self).disassociate_floatingips(context, port_id)
def get_plugin_version(self):
return PLUGIN_VERSION
from copy import copy
import hashlib
-import itertools
import json
import logging
# no quantum-specific logic in it
from quantum.common import constants
from quantum.common import exceptions as exception
+from quantum.plugins.nicira.nicira_nvp_plugin.common import (
+ exceptions as nvp_exc)
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
URI_PREFIX = "/ws.v1"
# Resources exposed by NVP API
LSWITCH_RESOURCE = "lswitch"
-LPORT_RESOURCE = "lport"
+LSWITCHPORT_RESOURCE = "lport-%s" % LSWITCH_RESOURCE
+LROUTER_RESOURCE = "lrouter"
+LROUTERPORT_RESOURCE = "lport-%s" % LROUTER_RESOURCE
+LROUTERNAT_RESOURCE = "nat-lrouter"
+
+# Constants for NAT rules
+MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
+ "destination_port_min", "source_ip_addresses",
+ "source_port_max", "source_port_min", "protocol"]
+
+SNAT_KEYS = ["to_src_port_min", "to_src_port_max", "to_src_ip_min",
+ "to_src_ip_max"]
+
+DNAT_KEYS = ["to_dst_port", "to_dst_ip_min", "to_dst_ip_max"]
+
LOCAL_LOGGING = False
if LOCAL_LOGGING:
resource_id=None,
parent_resource_id=None,
fields=None,
- relations=None, filters=None):
- # TODO(salvatore-orlando): This is ugly. do something more clever
- # and aovid the if statement
- if resource == LPORT_RESOURCE:
- res_path = ("%s/%s/%s" % (LSWITCH_RESOURCE,
- parent_resource_id,
- resource) +
- (resource_id and "/%s" % resource_id or ''))
- else:
- res_path = resource + (resource_id and
- "/%s" % resource_id or '')
-
+ relations=None, filters=None, is_attachment=False):
+ resources = resource.split('-')
+ res_path = resources[0] + (resource_id and "/%s" % resource_id or '')
+ if len(resources) > 1:
+ # There is also a parent resource to account for in the uri
+ res_path = "%s/%s/%s" % (resources[1],
+ parent_resource_id,
+ res_path)
+ if is_attachment:
+ res_path = "%s/attachment" % res_path
params = []
params.append(fields and "fields=%s" % fields)
params.append(relations and "relations=%s" % relations)
if filters:
params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()])
uri_path = "%s/%s" % (URI_PREFIX, res_path)
- query_string = reduce(lambda x, y: "%s&%s" % (x, y),
- itertools.ifilter(lambda x: x is not None, params),
- "")
- if query_string:
- uri_path += "?%s" % query_string
+ non_empty_params = [x for x in params if x is not None]
+ if len(non_empty_params):
+ query_string = '&'.join(non_empty_params)
+ if query_string:
+ uri_path += "?%s" % query_string
return uri_path
return obj
+def create_lrouter(cluster, tenant_id, display_name, nexthop):
+ """ Create a NVP logical router on the specified cluster.
+
+ :param cluster: The target NVP cluster
+ :param tenant_id: Identifier of the Openstack tenant for which
+ the logical router is being created
+ :param display_name: Descriptive name of this logical router
+ :param nexthop: External gateway IP address for the logical router
+ :raise NvpApiException: if there is a problem while communicating
+ with the NVP controller
+ """
+ tags = [{"tag": tenant_id, "scope": "os_tid"}]
+ lrouter_obj = {
+ "display_name": display_name,
+ "tags": tags,
+ "routing_config": {
+ "default_route_next_hop": {
+ "gateway_ip_address": nexthop,
+ "type": "RouterNextHop"
+ },
+ "type": "SingleDefaultRouteImplicitRoutingConfig"
+ },
+ "type": "LogicalRouterConfig"
+ }
+ try:
+ return json.loads(do_single_request("POST",
+ _build_uri_path(LROUTER_RESOURCE),
+ json.dumps(lrouter_obj),
+ cluster=cluster))
+ except NvpApiClient.NvpApiException:
+ # just log and re-raise - let the caller handle it
+ LOG.exception(_("An exception occured while communicating with "
+ "the NVP controller for cluster:%s"), cluster.name)
+ raise
+
+
+def delete_lrouter(cluster, lrouter_id):
+ try:
+ do_single_request("DELETE",
+ _build_uri_path(LROUTER_RESOURCE,
+ resource_id=lrouter_id),
+ cluster=cluster)
+ except NvpApiClient.NvpApiException:
+ # just log and re-raise - let the caller handle it
+ LOG.exception(_("An exception occured while communicating with "
+ "the NVP controller for cluster:%s"), cluster.name)
+ raise
+
+
+def get_lrouter(cluster, lrouter_id):
+ try:
+ return json.loads(do_single_request("GET",
+ _build_uri_path(LROUTER_RESOURCE,
+ resource_id=lrouter_id,
+ relations='LogicalRouterStatus'),
+ cluster=cluster))
+ except NvpApiClient.NvpApiException:
+ # just log and re-raise - let the caller handle it
+ LOG.exception(_("An exception occured while communicating with "
+ "the NVP controller for cluster:%s"), cluster.name)
+ raise
+
+
+def get_lrouters(cluster, tenant_id, fields=None, filters=None):
+ actual_filters = {}
+ if filters:
+ actual_filters.update(filters)
+ if tenant_id:
+ actual_filters['tag'] = tenant_id
+ actual_filters['tag_scope'] = 'os_tid'
+ lrouter_fields = "uuid,display_name,fabric_status,tags"
+ return get_all_query_pages(
+ _build_uri_path(LROUTER_RESOURCE,
+ fields=lrouter_fields,
+ relations='LogicalRouterStatus',
+ filters=actual_filters),
+ cluster)
+
+
+def update_lrouter(cluster, lrouter_id, display_name, nexthop):
+ lrouter_obj = get_lrouter(cluster, lrouter_id)
+ if not display_name and not nexthop:
+ # Nothing to update
+ return lrouter_obj
+ # It seems that this is faster than the doing an if on display_name
+ lrouter_obj["display_name"] = display_name or lrouter_obj["display_name"]
+ if nexthop:
+ nh_element = lrouter_obj["routing_config"].get(
+ "default_route_next_hop")
+ if nh_element:
+ nh_element["gateway_ip_address"] = nexthop
+ try:
+ return json.loads(do_single_request("PUT",
+ _build_uri_path(LROUTER_RESOURCE,
+ resource_id=lrouter_id),
+ json.dumps(lrouter_obj),
+ cluster=cluster))
+ except NvpApiClient.NvpApiException:
+ # just log and re-raise - let the caller handle it
+ LOG.exception(_("An exception occured while communicating with "
+ "the NVP controller for cluster:%s"), cluster.name)
+ raise
+
+
def get_all_networks(cluster, tenant_id, networks):
"""Append the quantum network uuids we can find in the given cluster to
"networks"
raise exception.QuantumException()
-def query_ports(cluster, network, relations=None, fields="*", filters=None):
- uri = "/ws.v1/lswitch/" + network + "/lport?"
- if relations:
- uri += "relations=%s" % relations
- uri += "&fields=%s" % fields
+def query_lswitch_lports(cluster, ls_uuid, fields="*",
+ filters=None, relations=None):
+ # Fix filter for attachments
if filters and "attachment" in filters:
- uri += "&attachment_vif_uuid=%s" % filters["attachment"]
+ filters['attachment_vif_uuid'] = filters["attachment"]
+ del filters['attachment']
+ uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
+ fields=fields, filters=filters, relations=relations)
try:
resp_obj = do_single_request("GET", uri, cluster=cluster)
- except NvpApiClient.ResourceNotFound as e:
- LOG.error(_("Network not found, Error: %s"), str(e))
- raise exception.NetworkNotFound(net_id=network)
- except NvpApiClient.NvpApiException as e:
- raise exception.QuantumException()
+ except NvpApiClient.ResourceNotFound:
+ LOG.exception(_("Logical switch: %s not found"), ls_uuid)
+ raise
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occurred while querying logical ports on "
+ "the NVP platform"))
+ raise
return json.loads(resp_obj)["results"]
-def delete_port(cluster, port):
+def query_lrouter_lports(cluster, lr_uuid, fields="*",
+ filters=None, relations=None):
+ uri = _build_uri_path(LROUTERPORT_RESOURCE, parent_resource_id=lr_uuid,
+ fields=fields, filters=filters, relations=relations)
try:
- do_single_request("DELETE", port['_href'], cluster=cluster)
+ resp_obj = do_single_request("GET", uri, cluster=cluster)
+ except NvpApiClient.ResourceNotFound:
+ LOG.exception(_("Logical router: %s not found"), lr_uuid)
+ raise
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occured while querying logical router "
+ "ports on the NVP platfom"))
+ raise
+ return json.loads(resp_obj)["results"]
+
+
+def delete_port(cluster, switch, port):
+ uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
+ try:
+ do_single_request("DELETE", uri, cluster=cluster)
except NvpApiClient.ResourceNotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port['uuid'])
raise exception.QuantumException()
-def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
- """Return (url, cluster_id) of port or raises ResourceNotFound
- """
- query = ("/ws.v1/lswitch/%s/lport?fields=admin_status_enabled,"
- "fabric_status_up,uuid&tag=%s&tag_scope=q_port_id"
- "&relations=LogicalPortStatus" % (lswitch, quantum_tag))
-
- LOG.debug(_("Looking for port with q_tag '%(quantum_tag)s' "
- "on: %(lswitch)s"),
- locals())
- for c in clusters:
- try:
- res_obj = do_single_request('GET', query, cluster=c)
- except Exception:
- continue
- res = json.loads(res_obj)
- if len(res["results"]) == 1:
- return (res["results"][0], c)
-
- LOG.error(_("Port or Network not found"))
- raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
+def get_logical_port_status(cluster, switch, port):
+ query = ("/ws.v1/lswitch/" + switch + "/lport/"
+ + port + "?relations=LogicalPortStatus")
+ try:
+ res_obj = do_single_request('GET', query, cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ LOG.error(_("Port or Network not found, Error: %s"), str(e))
+ raise exception.PortNotFound(port_id=port, net_id=switch)
+ except NvpApiClient.NvpApiException as e:
+ raise exception.QuantumException()
+ res = json.loads(res_obj)
+ # copy over admin_status_enabled
+ res["_relations"]["LogicalPortStatus"]["admin_status_enabled"] = (
+ res["admin_status_enabled"])
+ return res["_relations"]["LogicalPortStatus"]
def get_port_by_display_name(clusters, lswitch, display_name):
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles)
- path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid)
+ path = _build_uri_path(LSWITCHPORT_RESOURCE,
+ parent_resource_id=lswitch_uuid)
try:
resp_obj = do_single_request("POST", path,
json.dumps(lport_obj),
return result
+def create_router_lport(cluster, lrouter_uuid, tenant_id, quantum_port_id,
+ display_name, admin_status_enabled, ip_addresses):
+ """ Creates a logical port on the assigned logical router """
+ tags = [dict(scope='os_tid', tag=tenant_id),
+ dict(scope='q_port_id', tag=quantum_port_id)]
+ lport_obj = dict(
+ admin_status_enabled=admin_status_enabled,
+ display_name=display_name,
+ tags=tags,
+ ip_addresses=ip_addresses,
+ type="LogicalRouterPortConfig"
+ )
+ path = _build_uri_path(LROUTERPORT_RESOURCE,
+ parent_resource_id=lrouter_uuid)
+ try:
+ resp_obj = do_single_request("POST", path,
+ json.dumps(lport_obj),
+ cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ LOG.error(_("Logical router not found, Error: %s"), str(e))
+ raise
+
+ result = json.loads(resp_obj)
+ LOG.debug(_("Created logical port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s"),
+ {'lport_uuid': result['uuid'],
+ 'lrouter_uuid': lrouter_uuid})
+ return result
+
+
+def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
+ tenant_id, quantum_port_id, display_name,
+ admin_status_enabled, ip_addresses):
+ """ Updates a logical port on the assigned logical router """
+ lport_obj = dict(
+ admin_status_enabled=admin_status_enabled,
+ display_name=display_name,
+ tags=[dict(scope='os_tid', tag=tenant_id),
+ dict(scope='q_port_id', tag=quantum_port_id)],
+ ip_addresses=ip_addresses,
+ type="LogicalRouterPortConfig"
+ )
+ # Do not pass null items to NVP
+ for key in lport_obj.keys():
+ if lport_obj[key] is None:
+ del lport_obj[key]
+ path = _build_uri_path(LROUTERPORT_RESOURCE,
+ lrouter_port_uuid,
+ parent_resource_id=lrouter_uuid)
+ try:
+ resp_obj = do_single_request("PUT", path,
+ json.dumps(lport_obj),
+ cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ LOG.error(_("Logical router or router port not found, "
+ "Error: %s"), str(e))
+ raise
+
+ result = json.loads(resp_obj)
+ LOG.debug(_("Updated logical port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s"),
+ {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
+ return result
+
+
+def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
+ """ Creates a logical port on the assigned logical router """
+ path = _build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, lrouter_uuid)
+ try:
+ do_single_request("DELETE", path, cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ LOG.error(_("Logical router not found, Error: %s"), str(e))
+ raise
+ LOG.debug(_("Delete logical router port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s"),
+ {'lport_uuid': lport_uuid,
+ 'lrouter_uuid': lrouter_uuid})
+
+
+def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
+ nvp_port = get_port(cluster, ls_uuid, lp_uuid,
+ relations="LogicalPortAttachment")
+ try:
+ relations = nvp_port.get('_relations')
+ if relations:
+ att_data = relations.get('LogicalPortAttachment')
+ if att_data:
+ lrp_uuid = att_data.get('peer_port_uuid')
+ if lrp_uuid:
+ delete_router_lport(cluster, lr_uuid, lrp_uuid)
+ except (NvpApiClient.NvpApiException, NvpApiClient.ResourceNotFound):
+ LOG.exception(_("Unable to fetch and delete peer logical "
+ "router port for logical switch port:%s"),
+ lp_uuid)
+ raise
+
+
+def find_router_gw_port(context, cluster, router_id):
+ """ Retrieves the external gateway port for a NVP logical router """
+
+ # Find the uuid of nvp ext gw logical router port
+ # TODO(salvatore-orlando): Consider storing it in Quantum DB
+ results = query_lrouter_lports(
+ cluster, router_id,
+ filters={'attachment_gwsvc_uuid': cluster.default_l3_gw_service_uuid})
+ if len(results):
+ # Return logical router port
+ return results[0]
+
+
+def plug_router_port_attachment(cluster, router_id, port_id,
+ attachment_uuid, nvp_attachment_type):
+ """Attach a router port to the given attachment.
+ Current attachment types:
+ - PatchAttachment [-> logical switch port uuid]
+ - L3GatewayAttachment [-> L3GatewayService uuid]
+ """
+ uri = _build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
+ is_attachment=True)
+ attach_obj = {}
+ attach_obj["type"] = nvp_attachment_type
+ if nvp_attachment_type == "PatchAttachment":
+ attach_obj["peer_port_uuid"] = attachment_uuid
+ elif nvp_attachment_type == "L3GatewayAttachment":
+ attach_obj["l3_gateway_service_uuid"] = attachment_uuid
+ else:
+ raise Exception(_("Invalid NVP attachment type '%s'"),
+ nvp_attachment_type)
+ try:
+ resp_obj = do_single_request(
+ "PUT", uri, json.dumps(attach_obj), cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ LOG.exception(_("Router Port not found, Error: %s"), str(e))
+ raise
+ except NvpApiClient.Conflict as e:
+ LOG.exception(_("Conflict while setting router port attachment"))
+ raise
+ except NvpApiClient.NvpApiException as e:
+ LOG.exception(_("Unable to plug attachment into logical router port"))
+ raise
+ result = json.loads(resp_obj)
+ return result
+
+
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port"""
try:
'logical_port_ingress_rules': []}
update_security_group_rules(cluster, rsp['uuid'], rules)
- LOG.debug("Created Security Profile: %s" % rsp)
+ LOG.debug(_("Created Security Profile: %s"), rsp)
return rsp
except NvpApiClient.NvpApiException as e:
LOG.error(format_exception("Unknown", e, locals()))
raise exception.QuantumException()
- LOG.debug("Updated Security Profile: %s" % rsp)
+ LOG.debug(_("Updated Security Profile: %s"), rsp)
return rsp
except NvpApiClient.NvpApiException as e:
LOG.error(format_exception("Unknown", e, locals()))
raise exception.QuantumException()
+
+
+def _create_nat_match_obj(**kwargs):
+ nat_match_obj = {'ethertype': 'IPv4'}
+ delta = set(kwargs.keys()) - set(MATCH_KEYS)
+ if delta:
+ raise Exception(_("Invalid keys for NAT match: %s"), delta)
+ nat_match_obj.update(kwargs)
+ return nat_match_obj
+
+
+def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
+ LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj)
+ uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id)
+ try:
+ resp = do_single_request("POST", uri, json.dumps(nat_rule_obj),
+ cluster=cluster)
+ except NvpApiClient.ResourceNotFound:
+ LOG.exception(_("NVP Logical Router %s not found"), router_id)
+ raise
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occurred while creating the NAT rule "
+ "on the NVP platform"))
+ raise
+ rule = json.loads(resp)
+ return rule
+
+
+def create_lrouter_snat_rule(cluster, router_id,
+ min_src_ip, max_src_ip, **kwargs):
+
+ nat_match_obj = _create_nat_match_obj(**kwargs)
+ nat_rule_obj = {
+ "to_source_ip_address_min": min_src_ip,
+ "to_source_ip_address_max": max_src_ip,
+ "type": "SourceNatRule",
+ "match": nat_match_obj
+ }
+ return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
+
+
+def create_lrouter_dnat_rule(cluster, router_id, to_min_dst_ip,
+ to_max_dst_ip, to_dst_port=None, **kwargs):
+
+ nat_match_obj = _create_nat_match_obj(**kwargs)
+ nat_rule_obj = {
+ "to_destination_ip_address_min": to_min_dst_ip,
+ "to_destination_ip_address_max": to_max_dst_ip,
+ "type": "DestinationNatRule",
+ "match": nat_match_obj
+ }
+ if to_dst_port:
+ nat_rule_obj['to_destination_port'] = to_dst_port
+ return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
+
+
+def delete_nat_rules_by_match(cluster, router_id, rule_type,
+ max_num_expected,
+ min_num_expected=0,
+ **kwargs):
+ # remove nat rules
+ nat_rules = query_nat_rules(cluster, router_id)
+ to_delete_ids = []
+ for r in nat_rules:
+ if (r['type'] != rule_type):
+ continue
+
+ for key, value in kwargs.iteritems():
+ if not (key in r['match'] and r['match'][key] == value):
+ break
+ else:
+ to_delete_ids.append(r['uuid'])
+ if not (len(to_delete_ids) in
+ range(min_num_expected, max_num_expected + 1)):
+ raise nvp_exc.NvpNatRuleMismatch(actual_rules=len(to_delete_ids),
+ min_rules=min_num_expected,
+ max_rules=max_num_expected)
+
+ for rule_id in to_delete_ids:
+ delete_router_nat_rule(cluster, router_id, rule_id)
+
+
+def delete_router_nat_rule(cluster, router_id, rule_id):
+ uri = _build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
+ try:
+ do_single_request("DELETE", uri, cluster=cluster)
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occurred while removing NAT rule "
+ "'%(nat_rule_uuid)s' for logical "
+ "router '%(lrouter_uuid)s'"),
+ {'nat_rule_uuid': rule_id, 'lrouter_uuid': router_id})
+ raise
+
+
+def get_router_nat_rule(cluster, tenant_id, router_id, rule_id):
+ uri = _build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
+ try:
+ resp = do_single_request("GET", uri, cluster=cluster)
+ except NvpApiClient.ResourceNotFound:
+ LOG.exception(_("NAT rule %s not found"), rule_id)
+ raise
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occured while retrieving NAT rule '%s'"
+ "from NVP platform"), rule_id)
+ raise
+ res = json.loads(resp)
+ return res
+
+
+def query_nat_rules(cluster, router_id, fields="*", filters=None):
+ uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id,
+ fields=fields, filters=filters)
+ try:
+ resp = do_single_request("GET", uri, cluster=cluster)
+ except NvpApiClient.ResourceNotFound:
+ LOG.exception(_("NVP Logical Router '%s' not found"), router_id)
+ raise
+ except NvpApiClient.NvpApiException:
+ LOG.exception(_("An error occured while retrieving NAT rules for "
+ "NVP logical router '%s'"), router_id)
+ raise
+ res = json.loads(resp)
+ return res["results"]
+
+
+# NOTE(salvatore-orlando): The following FIXME applies in general to
+# each operation on list attributes.
+# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
+def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
+ ips_to_add, ips_to_remove):
+ uri = _build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
+ try:
+ port = json.loads(do_single_request("GET", uri, cluster=cluster))
+ # TODO(salvatore-orlando): Enforce ips_to_add intersection with
+ # ips_to_remove is empty
+ ip_address_set = set(port['ip_addresses'])
+ ip_address_set = ip_address_set - set(ips_to_remove)
+ ip_address_set = ip_address_set | set(ips_to_add)
+ # Set is not JSON serializable - convert to list
+ port['ip_addresses'] = list(ip_address_set)
+ do_single_request("PUT", uri, json.dumps(port), cluster=cluster)
+ except NvpApiClient.ResourceNotFound as e:
+ msg = (_("Router Port %(lport_id)s not found on router "
+ "%(lrouter_id)s") % locals())
+ LOG.exception(msg)
+ raise nvp_exc.NvpPluginException(err_desc=msg)
+ except NvpApiClient.NvpApiException as e:
+ msg = _("An exception occurred while updating IP addresses on a "
+ "router logical port:%s") % str(e)
+ LOG.exception(msg)
+ raise nvp_exc.NvpPluginException(err_desc=msg)
# under the License.
import json
-import logging
import urlparse
+from quantum.openstack.common import log as logging
from quantum.openstack.common import uuidutils
-LOG = logging.getLogger("fake_nvpapiclient")
-LOG.setLevel(logging.DEBUG)
+LOG = logging.getLogger(__name__)
class FakeClient:
+ LSWITCH_RESOURCE = 'lswitch'
+ LPORT_RESOURCE = 'lport'
+ LROUTER_RESOURCE = 'lrouter'
+ NAT_RESOURCE = 'nat'
+ SECPROF_RESOURCE = 'securityprofile'
+ LSWITCH_STATUS = 'lswitchstatus'
+ LROUTER_STATUS = 'lrouterstatus'
+ LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
+ LROUTER_LPORT_RESOURCE = 'lrouter_lport'
+ LROUTER_NAT_RESOURCE = 'lrouter_nat'
+ LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
+ LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
+ LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
+ LROUTER_LPORT_ATT = 'lrouter_lportattachment'
+
+ RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE,
+ LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE]
+
FAKE_GET_RESPONSES = {
- "lswitch": "fake_get_lswitch.json",
- "lport": "fake_get_lport.json",
- "lportstatus": "fake_get_lport_status.json"
+ LSWITCH_RESOURCE: "fake_get_lswitch.json",
+ LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
+ LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
+ LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
+ LROUTER_RESOURCE: "fake_get_lrouter.json",
+ LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
+ LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
+ LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
+ LROUTER_STATUS: "fake_get_lrouter_status.json",
+ LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json"
}
FAKE_POST_RESPONSES = {
- "lswitch": "fake_post_lswitch.json",
- "lport": "fake_post_lport.json",
- "securityprofile": "fake_post_security_profile.json"
+ LSWITCH_RESOURCE: "fake_post_lswitch.json",
+ LROUTER_RESOURCE: "fake_post_lrouter.json",
+ LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
+ LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
+ LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
+ SECPROF_RESOURCE: "fake_post_security_profile.json"
}
FAKE_PUT_RESPONSES = {
- "lswitch": "fake_post_lswitch.json",
- "lport": "fake_post_lport.json",
- "securityprofile": "fake_post_security_profile.json"
+ LSWITCH_RESOURCE: "fake_post_lswitch.json",
+ LROUTER_RESOURCE: "fake_post_lrouter.json",
+ LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
+ LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
+ LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
+ LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
+ LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
+ SECPROF_RESOURCE: "fake_post_security_profile.json"
+ }
+
+ MANAGED_RELATIONS = {
+ LSWITCH_RESOURCE: [],
+ LROUTER_RESOURCE: [],
+ LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
+ LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
}
_fake_lswitch_dict = {}
- _fake_lport_dict = {}
- _fake_lportstatus_dict = {}
+ _fake_lrouter_dict = {}
+ _fake_lswitch_lport_dict = {}
+ _fake_lrouter_lport_dict = {}
+ _fake_lrouter_nat_dict = {}
+ _fake_lswitch_lportstatus_dict = {}
+ _fake_lrouter_lportstatus_dict = {}
_fake_securityprofile_dict = {}
def __init__(self, fake_files_path):
fake_lswitch['lport_count'] = 0
return fake_lswitch
- def _add_lport(self, body, ls_uuid):
+ def _add_lrouter(self, body):
+ fake_lrouter = json.loads(body)
+ fake_lrouter['uuid'] = uuidutils.generate_uuid()
+ self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
+ fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
+ fake_lrouter['lport_count'] = 0
+ default_nexthop = fake_lrouter['routing_config'].get(
+ 'default_route_next_hop')
+ fake_lrouter['default_next_hop'] = default_nexthop.get(
+ 'gateway_ip_address', '0.0.0.0')
+ return fake_lrouter
+
+ def _add_lswitch_lport(self, body, ls_uuid):
fake_lport = json.loads(body)
- fake_lport['uuid'] = uuidutils.generate_uuid()
+ new_uuid = uuidutils.generate_uuid()
+ fake_lport['uuid'] = new_uuid
# put the tenant_id and the ls_uuid in the main dict
# for simplyfying templating
fake_lport['ls_uuid'] = ls_uuid
fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
'q_port_id')
fake_lport['quantum_device_id'] = self._get_tag(fake_lport, 'vm_id')
- self._fake_lport_dict[fake_lport['uuid']] = fake_lport
+ self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
fake_lswitch['lport_count'] += 1
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
fake_lport_status['ls_name'] = fake_lswitch['display_name']
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
- self._fake_lportstatus_dict[fake_lport['uuid']] = fake_lport_status
+ self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
+ return fake_lport
+
+ def _add_lrouter_lport(self, body, lr_uuid):
+ fake_lport = json.loads(body)
+ new_uuid = uuidutils.generate_uuid()
+ fake_lport['uuid'] = new_uuid
+ # put the tenant_id and the ls_uuid in the main dict
+ # for simplyfying templating
+ fake_lport['lr_uuid'] = lr_uuid
+ fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
+ fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
+ 'q_port_id')
+ # replace ip_address with its json dump
+ if 'ip_addresses' in fake_lport:
+ ip_addresses_json = json.dumps(fake_lport['ip_addresses'])
+ fake_lport['ip_addresses_json'] = ip_addresses_json
+ self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
+ fake_lrouter = self._fake_lrouter_dict[lr_uuid]
+ fake_lrouter['lport_count'] += 1
+ fake_lport_status = fake_lport.copy()
+ fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
+ fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
+ fake_lport_status['lr_name'] = fake_lrouter['display_name']
+ self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
return fake_lport
def _add_securityprofile(self, body):
fake_securityprofile)
return fake_securityprofile
+ def _add_lrouter_nat(self, body, lr_uuid):
+ fake_nat = json.loads(body)
+ new_uuid = uuidutils.generate_uuid()
+ fake_nat['uuid'] = new_uuid
+ fake_nat['lr_uuid'] = lr_uuid
+ self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
+ if 'match' in fake_nat:
+ match_json = json.dumps(fake_nat['match'])
+ fake_nat['match_json'] = match_json
+ return fake_nat
+
+ def _build_relation(self, src, dst, resource_type, relation):
+ if not relation in self.MANAGED_RELATIONS[resource_type]:
+ return # Relation is not desired in output
+ if not '_relations' in src or not src['_relations'].get(relation):
+ return # Item does not have relation
+ relation_data = src['_relations'].get(relation)
+ dst_relations = dst.get('_relations')
+ if not dst_relations:
+ dst_relations = {}
+ dst_relations[relation] = relation_data
+
+ def _fill_attachment(self, att_data, ls_uuid=None,
+ lr_uuid=None, lp_uuid=None):
+ new_data = att_data.copy()
+ for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
+ if locals().get(k):
+ new_data[k] = locals()[k]
+
+ def populate_field(field_name):
+ if field_name in att_data:
+ new_data['%s_field' % field_name] = ('"%s" : "%s",'
+ % (field_name,
+ att_data[field_name]))
+ del new_data[field_name]
+ else:
+ new_data['%s_field' % field_name] = ""
+
+ for field in ['vif_uuid', 'peer_port_href', 'peer_port_uuid']:
+ populate_field(field)
+ return new_data
+
def _get_resource_type(self, path):
- uri_split = path.split('/')
- resource_type = ('status' in uri_split and
- 'lport' in uri_split and 'lportstatus'
- or 'lport' in uri_split and 'lport'
- or 'lswitch' in uri_split and 'lswitch' or
- 'security-profile' in uri_split and 'securityprofile')
- switch_uuid = ('lswitch' in uri_split and
- len(uri_split) > 3 and uri_split[3])
- port_uuid = ('lport' in uri_split and
- len(uri_split) > 5 and uri_split[5])
- securityprofile_uuid = ('security-profile' in uri_split and
- len(uri_split) > 3 and uri_split[3])
- return (resource_type, switch_uuid, port_uuid, securityprofile_uuid)
+ """
+ Identifies resource type and relevant uuids in the uri
+
+ /ws.v1/lswitch/xxx
+ /ws.v1/lswitch/xxx/status
+ /ws.v1/lswitch/xxx/lport/yyy
+ /ws.v1/lswitch/xxx/lport/yyy/status
+ /ws.v1/lrouter/zzz
+ /ws.v1/lrouter/zzz/status
+ /ws.v1/lrouter/zzz/lport/www
+ /ws.v1/lrouter/zzz/lport/www/status
+ """
+ # The first element will always be 'ws.v1' - so we just discard it
+ uri_split = path.split('/')[1:]
+ # parse uri_split backwards
+ suffix = ""
+ idx = len(uri_split) - 1
+ if 'status' in uri_split[idx]:
+ suffix = "status"
+ idx = idx - 1
+ elif 'attachment' in uri_split[idx]:
+ suffix = "attachment"
+ idx = idx - 1
+ # then check if we have an uuid
+ uuids = []
+ if uri_split[idx].replace('-', '') not in self.RESOURCES:
+ uuids.append(uri_split[idx])
+ idx = idx - 1
+ resource_type = "%s%s" % (uri_split[idx], suffix)
+ if idx > 1:
+ uuids.insert(0, uri_split[idx - 1])
+ resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
+ return (resource_type.replace('-', ''), uuids)
def _list(self, resource_type, response_file,
- switch_uuid=None, query=None):
+ parent_uuid=None, query=None, relations=None):
(tag_filter, attr_filter) = self._get_filters(query)
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
- if switch_uuid == "*":
- switch_uuid = None
+ if parent_uuid == '*':
+ parent_uuid = None
def _attr_match(res_uuid):
if not attr_filter:
for x in res_dict[res_uuid]['tags']])
def _lswitch_match(res_uuid):
- if (not switch_uuid or
- res_dict[res_uuid].get('ls_uuid') == switch_uuid):
+ # verify that the switch exist
+ if parent_uuid and not parent_uuid in self._fake_lswitch_dict:
+ raise Exception(_("lswitch:%s not found" % parent_uuid))
+ if (not parent_uuid
+ or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
+ return True
+ return False
+
+ def _lrouter_match(res_uuid):
+ # verify that the router exist
+ if parent_uuid and not parent_uuid in self._fake_lrouter_dict:
+ raise Exception(_("lrouter:%s not found" % parent_uuid))
+ if (not parent_uuid or
+ res_dict[res_uuid].get('lr_uuid') == parent_uuid):
return True
return False
+
+ def _build_item(resource):
+ item = json.loads(response_template % resource)
+ if relations:
+ for relation in relations:
+ self._build_relation(resource, item,
+ resource_type, relation)
+ return item
+
for item in res_dict.itervalues():
if 'tags' in item:
item['tags_json'] = json.dumps(item['tags'])
- items = [json.loads(response_template % res_dict[res_uuid])
+ if resource_type in (self.LSWITCH_LPORT_RESOURCE,
+ self.LSWITCH_LPORT_ATT,
+ self.LSWITCH_LPORT_STATUS):
+ parent_func = _lswitch_match
+ elif resource_type in (self.LROUTER_LPORT_RESOURCE,
+ self.LROUTER_LPORT_ATT,
+ self.LROUTER_NAT_RESOURCE,
+ self.LROUTER_LPORT_STATUS):
+ parent_func = _lrouter_match
+ else:
+ parent_func = lambda x: True
+
+ items = [_build_item(res_dict[res_uuid])
for res_uuid in res_dict
- if (_lswitch_match(res_uuid) and
+ if (parent_func(res_uuid) and
_tag_match(res_uuid) and
_attr_match(res_uuid))]
'result_count': len(items)})
def _show(self, resource_type, response_file,
- switch_uuid, port_uuid=None):
- target_uuid = port_uuid or switch_uuid
+ uuid1, uuid2=None, relations=None):
+ target_uuid = uuid2 or uuid1
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
def handle_get(self, url):
#TODO(salvatore-orlando): handle field selection
parsedurl = urlparse.urlparse(url)
- (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
- parsedurl.path)
+ (res_type, uuids) = self._get_resource_type(parsedurl.path)
+ relations = urlparse.parse_qs(parsedurl.query).get('relations')
response_file = self.FAKE_GET_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
- if res_type == 'lport':
- if p_uuid:
- return self._show(res_type, response_file, s_uuid, p_uuid)
+ if 'lport' in res_type or 'nat' in res_type:
+ if len(uuids) > 1:
+ return self._show(res_type, response_file, uuids[0],
+ uuids[1], relations=relations)
else:
- return self._list(res_type, response_file, s_uuid,
- query=parsedurl.query)
- elif res_type == 'lportstatus':
- return self._show(res_type, response_file, s_uuid, p_uuid)
- elif res_type == 'lswitch':
- if s_uuid:
- return self._show(res_type, response_file, s_uuid)
+ return self._list(res_type, response_file, uuids[0],
+ query=parsedurl.query, relations=relations)
+ elif ('lswitch' in res_type or 'lrouter' in res_type
+ or self.SECPROF_RESOURCE in res_type):
+ if len(uuids) > 0:
+ return self._show(res_type, response_file, uuids[0],
+ relations=relations)
else:
return self._list(res_type, response_file,
- query=parsedurl.query)
+ query=parsedurl.query,
+ relations=relations)
else:
raise Exception("unknown resource:%s" % res_type)
def handle_post(self, url, body):
parsedurl = urlparse.urlparse(url)
- (res_type, s_uuid, _p, sec_uuid) = self._get_resource_type(
- parsedurl.path)
+ (res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_POST_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
response_template = f.read()
add_resource = getattr(self, '_add_%s' % res_type)
args = [body]
- if s_uuid:
- args.append(s_uuid)
+ if len(uuids):
+ args.append(uuids[0])
response = response_template % add_resource(*args)
return response
def handle_put(self, url, body):
parsedurl = urlparse.urlparse(url)
- (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
- parsedurl.path)
- target_uuid = p_uuid or s_uuid or sec_uuid
+ (res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
+ # Manage attachment operations
+ is_attachment = False
+ if res_type.endswith('attachment'):
+ is_attachment = True
+ res_type = res_type[:res_type.index('attachment')]
res_dict = getattr(self, '_fake_%s_dict' % res_type)
- resource = res_dict[target_uuid]
- resource.update(json.loads(body))
- response = response_template % resource
+ resource = res_dict[uuids[-1]]
+ if not is_attachment:
+ resource.update(json.loads(body))
+ else:
+ relations = resource.get("_relations")
+ if not relations:
+ relations = {}
+ relations['LogicalPortAttachment'] = json.loads(body)
+ resource['_relations'] = relations
+ body_2 = json.loads(body)
+ if body_2['type'] == "PatchAttachment":
+ # We need to do a trick here
+ if self.LROUTER_RESOURCE in res_type:
+ res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
+ self.LSWITCH_RESOURCE)
+ elif self.LSWITCH_RESOURCE in res_type:
+ res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
+ self.LROUTER_RESOURCE)
+ res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
+ body_2['peer_port_uuid'] = uuids[-1]
+ resource_2 = res_dict_2[json.loads(body)['peer_port_uuid']]
+ relations_2 = resource_2.get("_relations")
+ if not relations_2:
+ relations_2 = {}
+ relations_2['LogicalPortAttachment'] = body_2
+ resource_2['_relations'] = relations_2
+ elif body_2['type'] == "L3GatewayAttachment":
+ resource['attachment_gwsvc_uuid'] = (
+ body_2['l3_gateway_service_uuid'])
+ if not is_attachment:
+ response = response_template % resource
+ else:
+ if res_type == self.LROUTER_LPORT_RESOURCE:
+ lr_uuid = uuids[0]
+ ls_uuid = None
+ elif res_type == self.LSWITCH_LPORT_RESOURCE:
+ ls_uuid = uuids[0]
+ lr_uuid = None
+ lp_uuid = uuids[1]
+ response = response_template % self._fill_attachment(
+ json.loads(body), ls_uuid, lr_uuid, lp_uuid)
return response
def handle_delete(self, url):
parsedurl = urlparse.urlparse(url)
- (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
- parsedurl.path)
- target_uuid = p_uuid or s_uuid or sec_uuid
+ (res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
res_dict = getattr(self, '_fake_%s_dict' % res_type)
- del res_dict[target_uuid]
+ del res_dict[uuids[-1]]
return ""
def fake_request(self, *args, **kwargs):
def reset_all(self):
self._fake_lswitch_dict.clear()
- self._fake_lport_dict.clear()
- self._fake_lportstatus_dict.clear()
+ self._fake_lrouter_dict.clear()
+ self._fake_lswitch_lport_dict.clear()
+ self._fake_lrouter_lport_dict.clear()
+ self._fake_lswitch_lportstatus_dict.clear()
+ self._fake_lrouter_lportstatus_dict.clear()