From 724f4d63c8e686fcf1d2eb91b40bcbf43b5a705f Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Wed, 16 Jan 2013 10:06:52 -0800 Subject: [PATCH] Add NVP Security group support Implements blueprint security-groups-nvp Change-Id: Idfa7a756c7a2845e9aa9e7de4c7bceeec94b036f --- quantum/common/constants.py | 3 + .../versions/3cb5d900c5de_security_groups.py | 3 +- .../nicira/nicira_nvp_plugin/QuantumPlugin.py | 214 ++++++++++++++++-- .../common/securitygroups.py | 124 ++++++++++ .../nicira/nicira_nvp_plugin/nvplib.py | 158 ++++++++++++- .../etc/fake_post_security_profile.json | 10 + .../tests/unit/nicira/fake_nvpapiclient.py | 42 +++- .../tests/unit/nicira/test_nicira_plugin.py | 52 ++++- .../unit/test_extension_security_group.py | 28 ++- 9 files changed, 595 insertions(+), 39 deletions(-) create mode 100644 quantum/plugins/nicira/nicira_nvp_plugin/common/securitygroups.py create mode 100644 quantum/tests/unit/nicira/etc/fake_post_security_profile.json diff --git a/quantum/common/constants.py b/quantum/common/constants.py index 5ff6a740c..8f661f0f4 100644 --- a/quantum/common/constants.py +++ b/quantum/common/constants.py @@ -34,6 +34,9 @@ INTERFACE_KEY = '_interfaces' IPv4 = 'IPv4' IPv6 = 'IPv6' +UDP_PROTOCOL = 17 +DHCP_RESPONSE_PORT = 68 + EXT_NS = '_extension_ns' XML_NS_V20 = 'http://openstack.org/quantum/api/v2.0' XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance" diff --git a/quantum/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py b/quantum/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py index 889735b17..d14a5391a 100644 --- a/quantum/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py +++ b/quantum/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py @@ -30,7 +30,8 @@ down_revision = '48b6f43f7471' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ - 'quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2' + 'quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2', + 'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2' ] from alembic import op diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py b/quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py index 2eb4d2779..af0e03915 100644 --- a/quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py +++ b/quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py @@ -25,7 +25,7 @@ import logging import webob.exc -from quantum.api.v2 import attributes +from quantum.api.v2 import attributes as attr from quantum.api.v2 import base from quantum.common import constants from quantum.common import exceptions as q_exc @@ -37,10 +37,14 @@ from quantum.db import dhcp_rpc_base from quantum.db import portsecurity_db # NOTE: quota_db cannot be removed, it is for db model from quantum.db import quota_db +from quantum.db import securitygroups_db from quantum.extensions import portsecurity as psec from quantum.extensions import providernet as pnet +from quantum.extensions import securitygroup as ext_sg from quantum.openstack.common import cfg from quantum.openstack.common import rpc +from quantum.plugins.nicira.nicira_nvp_plugin.common import (securitygroups + as nvp_sec) from quantum import policy from quantum.plugins.nicira.nicira_nvp_plugin.common import config from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions @@ -108,13 +112,18 @@ class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin): class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, - portsecurity_db.PortSecurityDbMixin): + portsecurity_db.PortSecurityDbMixin, + securitygroups_db.SecurityGroupDbMixin, + nvp_sec.NVPSecurityGroups): """ NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network functionality using NVP. """ - supported_extension_aliases = ["provider", "quotas", "port-security"] + supported_extension_aliases = ["provider", "quotas", "port-security", + "security-group"] + __native_bulk_support = True + # Default controller cluster default_cluster = None @@ -236,9 +245,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, network_type = attrs.get(pnet.NETWORK_TYPE) physical_network = attrs.get(pnet.PHYSICAL_NETWORK) segmentation_id = attrs.get(pnet.SEGMENTATION_ID) - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) + network_type_set = attr.is_attr_set(network_type) + physical_network_set = attr.is_attr_set(physical_network) + segmentation_id_set = attr.is_attr_set(segmentation_id) if not (network_type_set or physical_network_set or segmentation_id_set): return @@ -345,18 +354,19 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, def create_network(self, context, network): net_data = network['network'].copy() + tenant_id = self._get_tenant_id_for_create(context, net_data) + self._ensure_default_security_group(context, tenant_id) # Process the provider network extension self._handle_provider_create(context, net_data) # Replace ATTR_NOT_SPECIFIED with None before sending to NVP - for attr, value in network['network'].iteritems(): - if value is attributes.ATTR_NOT_SPECIFIED: - net_data[attr] = None + for key, value in network['network'].iteritems(): + if value is attr.ATTR_NOT_SPECIFIED: + net_data[key] = None # FIXME(arosen) implement admin_state_up = False in NVP if net_data['admin_state_up'] is False: LOG.warning(_("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s"), net_data.get('name', '')) - tenant_id = self._get_tenant_id_for_create(context, net_data) target_cluster = self._find_target_cluster(net_data) nvp_binding_type = net_data.get(pnet.NETWORK_TYPE) if nvp_binding_type in ('flat', 'vlan'): @@ -544,6 +554,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, context, filters) for quantum_lport in quantum_lports: self._extend_port_port_security_dict(context, quantum_lport) + self._extend_port_dict_security_group(context, quantum_lport) vm_filter = "" tenant_filter = "" @@ -638,7 +649,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. # TODO(arosen) fix policy engine to do this for us automatically. - if attributes.is_attr_set(port['port'].get(psec.PORTSECURITY)): + if attr.is_attr_set(port['port'].get(psec.PORTSECURITY)): self._enforce_set_auth(context, port, self.port_security_enabled_create) port_data = port['port'] @@ -653,6 +664,15 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, context, port_data) port_data[psec.PORTSECURITY] = port_security self._process_port_security_create(context, port_data) + # security group extension checks + if port_security and has_ip: + self._ensure_default_security_group_on_port(context, port) + elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + port_data[ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._process_port_create_security_group( + context, quantum_db['id'], port_data[ext_sg.SECURITYGROUPS]) # provider networking extension checks # Fetch the network and network binding from Quantum db network = self._get_network(context, port_data['network_id']) @@ -681,7 +701,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, port_data['admin_state_up'], port_data['mac_address'], port_data['fixed_ips'], - port_data[psec.PORTSECURITY]) + port_data[psec.PORTSECURITY], + port_data[ext_sg.SECURITYGROUPS]) # Get NVP ls uuid for quantum network nvplib.plug_interface(cluster, selected_lswitch['uuid'], lport['uuid'], "VifAttachment", @@ -703,27 +724,55 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, "%(tenant_id)s: (%(id)s)"), port_data) self._extend_port_port_security_dict(context, port_data) + self._extend_port_dict_security_group(context, port_data) return port_data def update_port(self, context, id, port): self._enforce_set_auth(context, port, self.port_security_enabled_update) tenant_id = self._get_tenant_id_for_create(context, port) + delete_security_groups = self._check_update_deletes_security_groups( + port) + has_security_groups = self._check_update_has_security_groups(port) with context.session.begin(subtransactions=True): ret_port = super(NvpPluginV2, self).update_port( context, id, port) # copy values over ret_port.update(port['port']) + tenant_id = self._get_tenant_id_for_create(context, ret_port) - # Handle port security - if psec.PORTSECURITY in port['port']: - self._update_port_security_binding( - context, id, ret_port[psec.PORTSECURITY]) - # populate with value - else: + # populate port_security setting + if psec.PORTSECURITY not in port['port']: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) + has_ip = self._ip_on_port(ret_port) + # checks if security groups were updated adding/modifying + # security groups, port security is set and port has ip + if not (has_ip and ret_port[psec.PORTSECURITY]): + if has_security_groups: + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + # Update did not have security groups passed in. Check + # that port does not have any security groups already on it. + filters = {'port_id': [id]} + security_groups = ( + super(NvpPluginV2, self)._get_port_security_group_bindings( + context, filters) + ) + if security_groups and not delete_security_groups: + raise psec.PortSecurityPortHasSecurityGroup() + + if (delete_security_groups or has_security_groups): + # delete the port binding and read it with the new rules. + self._delete_port_security_group_bindings(context, id) + sgids = self._get_security_groups_on_port(context, port) + self._process_port_create_security_group(context, id, sgids) + + if psec.PORTSECURITY in port['port']: + self._update_port_security_binding( + context, id, ret_port[psec.PORTSECURITY]) + self._extend_port_port_security_dict(context, ret_port) + self._extend_port_dict_security_group(context, ret_port) port_nvp, cluster = ( nvplib.get_port_by_quantum_tag(self.clusters.itervalues(), ret_port["network_id"], id)) @@ -734,7 +783,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, ret_port['admin_state_up'], ret_port['mac_address'], ret_port['fixed_ips'], - ret_port[psec.PORTSECURITY]) + ret_port[psec.PORTSECURITY], + ret_port[ext_sg.SECURITYGROUPS]) # Update the port status from nvp. If we fail here hide it since # the port was successfully updated but we were not able to retrieve @@ -763,7 +813,10 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, return super(NvpPluginV2, self).delete_port(context, id) def get_port(self, context, id, fields=None): - quantum_db = super(NvpPluginV2, self).get_port(context, id, fields) + with context.session.begin(subtransactions=True): + quantum_db = super(NvpPluginV2, self).get_port(context, id, fields) + self._extend_port_port_security_dict(context, quantum_db) + self._extend_port_dict_security_group(context, quantum_db) #TODO: pass only the appropriate cluster here #Look for port in all lswitches @@ -783,3 +836,124 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2, def get_plugin_version(self): return PLUGIN_VERSION + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + If default_sg is true that means a we are creating a default security + group and we don't need to check if one exists. + """ + s = security_group.get('security_group') + if cfg.CONF.SECURITYGROUP.proxy_mode: + if not context.is_admin: + raise ext_sg.SecurityGroupProxyModeNotAdmin() + elif not s.get('external_id'): + raise ext_sg.SecurityGroupProxyMode() + elif s.get('external_id'): + raise ext_sg.SecurityGroupNotProxyMode() + + tenant_id = self._get_tenant_id_for_create(context, s) + if not default_sg and not cfg.CONF.SECURITYGROUP.proxy_mode: + self._ensure_default_security_group(context, tenant_id, + security_group) + if s.get('external_id'): + filters = {'external_id': [s.get('external_id')]} + security_groups = super(NvpPluginV2, self).get_security_groups( + context, filters=filters) + if security_groups: + raise ext_sg.SecurityGroupAlreadyExists( + name=s.get('name', ''), external_id=s.get('external_id')) + nvp_secgroup = nvplib.create_security_profile(self.default_cluster, + tenant_id, s) + security_group['security_group']['id'] = nvp_secgroup['uuid'] + return super(NvpPluginV2, self).create_security_group( + context, security_group, default_sg) + + def delete_security_group(self, context, security_group_id): + """Delete a security group + :param security_group_id: security group rule to remove. + """ + if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin): + raise ext_sg.SecurityGroupProxyModeNotAdmin() + + with context.session.begin(subtransactions=True): + security_group = super(NvpPluginV2, self).get_security_group( + context, security_group_id) + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + + if security_group['name'] == 'default': + raise ext_sg.SecurityGroupCannotRemoveDefault() + + filters = {'security_group_id': [security_group['id']]} + if super(NvpPluginV2, self)._get_port_security_group_bindings( + context, filters): + raise ext_sg.SecurityGroupInUse(id=security_group['id']) + nvplib.delete_security_profile(self.default_cluster, + security_group['id']) + return super(NvpPluginV2, self).delete_security_group( + context, security_group_id) + + def create_security_group_rule(self, context, security_group_rule): + """create a single security group rule""" + bulk_rule = {'security_group_rules': [security_group_rule]} + return self.create_security_group_rule_bulk(context, bulk_rule)[0] + + def create_security_group_rule_bulk(self, context, security_group_rule): + """ create security group rules + :param security_group_rule: list of rules to create + """ + s = security_group_rule.get('security_group_rules') + tenant_id = self._get_tenant_id_for_create(context, s) + + # TODO(arosen) is there anyway we could avoid having the update of + # the security group rules in nvp outside of this transaction? + with context.session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + security_group_id = self._validate_security_group_rules( + context, security_group_rule) + + # Check to make sure security group exists and retrieve + # security_group['id'] needed incase it only has an external_id + security_group = super(NvpPluginV2, self).get_security_group( + context, security_group_id) + + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + # Check for duplicate rules + self._check_for_duplicate_rules(context, s) + # gather all the existing security group rules since we need all + # of them to PUT to NVP. + combined_rules = self._merge_security_group_rules_with_current( + context, s, security_group['id']) + nvplib.update_security_group_rules(self.default_cluster, + security_group['id'], + combined_rules) + return super( + NvpPluginV2, self).create_security_group_rule_bulk_native( + context, security_group_rule) + + def delete_security_group_rule(self, context, sgrid): + """ Delete a security group rule + :param sgrid: security group id to remove. + """ + if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin): + raise ext_sg.SecurityGroupProxyModeNotAdmin() + + with context.session.begin(subtransactions=True): + # determine security profile id + security_group_rule = ( + super(NvpPluginV2, self).get_security_group_rule( + context, sgrid)) + if not security_group_rule: + raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) + + sgid = security_group_rule['security_group_id'] + current_rules = self._get_security_group_rules_nvp_format( + context, sgid, True) + + self._remove_security_group_with_id_and_id_field( + current_rules, sgrid) + nvplib.update_security_group_rules( + self.default_cluster, sgid, current_rules) + return super(NvpPluginV2, self).delete_security_group_rule(context, + sgrid) diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/common/securitygroups.py b/quantum/plugins/nicira/nicira_nvp_plugin/common/securitygroups.py new file mode 100644 index 000000000..d5b7805bf --- /dev/null +++ b/quantum/plugins/nicira/nicira_nvp_plugin/common/securitygroups.py @@ -0,0 +1,124 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Nicira, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless equired by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Aaron Rosen, Nicira Networks, Inc. + +from quantum.extensions import securitygroup as ext_sg + +# Protocol number look up for supported protocols +protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17} + + +class NVPSecurityGroups(object): + + def _convert_to_nvp_rule(self, rule, with_id=False): + """Converts Quantum API security group rule to NVP API.""" + nvp_rule = {} + params = ['source_ip_prefix', 'protocol', + 'source_group_id', 'port_range_min', + 'port_range_max', 'ethertype'] + if with_id: + params.append('id') + + for param in params: + value = rule.get(param) + if param not in rule: + nvp_rule[param] = value + elif not value: + pass + elif param == 'source_ip_prefix': + nvp_rule['ip_prefix'] = rule['source_ip_prefix'] + elif param == 'source_group_id': + nvp_rule['profile_uuid'] = rule['source_group_id'] + elif param == 'protocol': + nvp_rule['protocol'] = protocol_num_look_up[rule['protocol']] + else: + nvp_rule[param] = value + return nvp_rule + + def _convert_to_nvp_rules(self, rules, with_id=False): + """Converts a list of Quantum API security group rules to NVP API.""" + nvp_rules = {'logical_port_ingress_rules': [], + 'logical_port_egress_rules': []} + for direction in ['logical_port_ingress_rules', + 'logical_port_egress_rules']: + for rule in rules[direction]: + nvp_rules[direction].append( + self._convert_to_nvp_rule(rule, with_id)) + return nvp_rules + + def _get_security_group_rules_nvp_format(self, context, security_group_id, + with_id=False): + """Query quantum db for security group rules. If external_id is + provided the external_id will also be returned. + """ + fields = ['source_ip_prefix', 'source_group_id', 'protocol', + 'port_range_min', 'port_range_max', 'protocol', 'ethertype'] + if with_id: + fields.append('id') + + filters = {'security_group_id': [security_group_id], + 'direction': ['ingress']} + ingress_rules = self.get_security_group_rules(context, filters, fields) + filters = {'security_group_id': [security_group_id], + 'direction': ['egress']} + egress_rules = self.get_security_group_rules(context, filters, fields) + rules = {'logical_port_ingress_rules': egress_rules, + 'logical_port_egress_rules': ingress_rules} + return self._convert_to_nvp_rules(rules, with_id) + + def _get_profile_uuid(self, context, source_group_id): + """Return profile id from novas group id. """ + security_group = self.get_security_group(context, source_group_id) + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=source_group_id) + return security_group['id'] + + def _merge_security_group_rules_with_current(self, context, new_rules, + security_group_id): + merged_rules = self._get_security_group_rules_nvp_format( + context, security_group_id) + for new_rule in new_rules: + rule = new_rule['security_group_rule'] + rule['security_group_id'] = security_group_id + if rule.get('souce_group_id'): + rule['source_group_id'] = self._get_profile_uuid( + context, rule['source_group_id']) + if rule['direction'] == 'ingress': + merged_rules['logical_port_egress_rules'].append( + self._convert_to_nvp_rule(rule)) + elif rule['direction'] == 'egress': + merged_rules['logical_port_ingress_rules'].append( + self._convert_to_nvp_rule(rule)) + return merged_rules + + def _remove_security_group_with_id_and_id_field(self, rules, rule_id): + """This function receives all of the current rule associated with a + security group and then removes the rule that matches the rule_id. In + addition it removes the id field in the dict with each rule since that + should not be passed to nvp. + """ + for rule_direction in rules.values(): + item_to_remove = None + for port_rule in rule_direction: + if port_rule['id'] == rule_id: + item_to_remove = port_rule + else: + # remove key from dictionary for NVP + del port_rule['id'] + if item_to_remove: + rule_direction.remove(item_to_remove) diff --git a/quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py b/quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py index 431da9a59..b38056e74 100644 --- a/quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py +++ b/quantum/plugins/nicira/nicira_nvp_plugin/nvplib.py @@ -417,7 +417,7 @@ def get_port(cluster, network, port, relations=None): def _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled): + port_security_enabled, security_profiles): lport_obj['allowed_address_pairs'] = [] if port_security_enabled: for fixed_ip in fixed_ips: @@ -430,11 +430,13 @@ def _configure_extensions(lport_obj, mac_address, fixed_ips, lport_obj["allowed_address_pairs"].append( {"mac_address": mac_address, "ip_address": "0.0.0.0"}) + lport_obj['security_profiles'] = list(security_profiles or []) def update_port(cluster, lswitch_uuid, lport_uuid, quantum_port_id, tenant_id, display_name, device_id, admin_status_enabled, - mac_address=None, fixed_ips=None, port_security_enabled=None): + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None): # device_id can be longer than 40 so we rehash it hashed_device_id = hashlib.sha1(device_id).hexdigest() @@ -446,7 +448,7 @@ def update_port(cluster, lswitch_uuid, lport_uuid, quantum_port_id, tenant_id, dict(scope='vm_id', tag=hashed_device_id)]) _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled) + port_security_enabled, security_profiles) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: @@ -465,7 +467,8 @@ def update_port(cluster, lswitch_uuid, lport_uuid, quantum_port_id, tenant_id, def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id, display_name, device_id, admin_status_enabled, - mac_address=None, fixed_ips=None, port_security_enabled=None): + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None): """ Creates a logical port on the assigned logical switch """ # device_id can be longer than 40 so we rehash it hashed_device_id = hashlib.sha1(device_id).hexdigest() @@ -478,7 +481,7 @@ def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id, ) _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled) + port_security_enabled, security_profiles) path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid) try: @@ -538,3 +541,148 @@ def plug_interface(cluster, lswitch_id, port, type, attachment=None): result = json.dumps(resp_obj) return result + +#------------------------------------------------------------------------------ +# Security Profile convenience functions. +#------------------------------------------------------------------------------ +EXT_SECURITY_PROFILE_ID_SCOPE = 'nova_spid' +TENANT_ID_SCOPE = 'os_tid' + + +def format_exception(etype, e, execption_locals, request=None): + """Consistent formatting for exceptions. + :param etype: a string describing the exception type. + :param e: the exception. + :param request: the request object. + :param execption_locals: calling context local variable dict. + :returns: a formatted string. + """ + msg = ["Error. %s exception: %s." % (etype, e)] + if request: + msg.append("request=[%s]" % request) + if request.body: + msg.append("request.body=[%s]" % str(request.body)) + l = dict((k, v) for k, v in execption_locals if k != 'request') + msg.append("locals=[%s]" % str(l)) + return ' '.join(msg) + + +def do_request(*args, **kwargs): + """Convenience function wraps do_single_request. + + :param args: a list of positional arguments. + :param kwargs: a list of keyworkds arguments. + :returns: the result of do_single_request loaded into a python object + or None.""" + res = do_single_request(*args, **kwargs) + if res: + return json.loads(res) + return res + + +def mk_body(**kwargs): + """Convenience function creates and dumps dictionary to string. + + :param kwargs: the key/value pirs to be dumped into a json string. + :returns: a json string.""" + return json.dumps(kwargs, ensure_ascii=False) + + +def set_tenant_id_tag(tenant_id, taglist=None): + """Convenience function to add tenant_id tag to taglist. + + :param tenant_id: the tenant_id to set. + :param taglist: the taglist to append to (or None). + :returns: a new taglist that includes the old taglist with the new + tenant_id tag set.""" + new_taglist = [] + if taglist: + new_taglist = [x for x in taglist if x['scope'] != TENANT_ID_SCOPE] + new_taglist.append(dict(scope=TENANT_ID_SCOPE, tag=tenant_id)) + return new_taglist + + +def set_ext_security_profile_id_tag(external_id, taglist=None): + """Convenience function to add spid tag to taglist. + + :param external_id: the security_profile id from nova + :param taglist: the taglist to append to (or None). + :returns: a new taglist that includes the old taglist with the new + spid tag set.""" + new_taglist = [] + if taglist: + new_taglist = [x for x in taglist if x['scope'] != + EXT_SECURITY_PROFILE_ID_SCOPE] + if external_id: + new_taglist.append(dict(scope=EXT_SECURITY_PROFILE_ID_SCOPE, + tag=str(external_id))) + return new_taglist + + +# ----------------------------------------------------------------------------- +# Security Group API Calls +# ----------------------------------------------------------------------------- +def create_security_profile(cluster, tenant_id, security_profile): + path = "/ws.v1/security-profile" + tags = set_tenant_id_tag(tenant_id) + tags = set_ext_security_profile_id_tag( + security_profile.get('external_id'), tags) + # Allow all dhcp responses in + dhcp = {'logical_port_egress_rules': [{'ethertype': 'IPv4', + 'protocol': 17, + 'port_range_min': 68, + 'port_range_max': 68, + 'ip_prefix': '0.0.0.0/0'}], + 'logical_port_ingress_rules': []} + try: + body = mk_body( + tags=tags, display_name=security_profile.get('name'), + logical_port_ingress_rules=dhcp['logical_port_ingress_rules'], + logical_port_egress_rules=dhcp['logical_port_egress_rules']) + rsp = do_request("POST", path, body, cluster=cluster) + except NvpApiClient.NvpApiException as e: + LOG.error(format_exception("Unknown", e, locals())) + raise exception.QuantumException() + if security_profile.get('name') == 'default': + # If security group is default allow ip traffic between + # members of the same security profile. + rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', + 'profile_uuid': rsp['uuid']}, + {'ethertype': 'IPv6', + 'profile_uuid': rsp['uuid']}], + 'logical_port_ingress_rules': []} + + update_security_group_rules(cluster, rsp['uuid'], rules) + LOG.debug("Created Security Profile: %s" % rsp) + return rsp + + +def update_security_group_rules(cluster, spid, rules): + path = "/ws.v1/security-profile/%s" % spid + + # Allow all dhcp responses in + rules['logical_port_egress_rules'].append( + {'ethertype': 'IPv4', 'protocol': constants.UDP_PROTOCOL, + 'port_range_min': constants.DHCP_RESPONSE_PORT, + 'port_range_max': constants.DHCP_RESPONSE_PORT, + 'ip_prefix': '0.0.0.0/0'}) + try: + body = mk_body( + logical_port_ingress_rules=rules['logical_port_ingress_rules'], + logical_port_egress_rules=rules['logical_port_egress_rules']) + rsp = do_request("PUT", path, body, cluster=cluster) + except NvpApiClient.NvpApiException as e: + LOG.error(format_exception("Unknown", e, locals())) + raise exception.QuantumException() + LOG.debug("Updated Security Profile: %s" % rsp) + return rsp + + +def delete_security_profile(cluster, spid): + path = "/ws.v1/security-profile/%s" % spid + + try: + do_request("DELETE", path, cluster=cluster) + except NvpApiClient.NvpApiException as e: + LOG.error(format_exception("Unknown", e, locals())) + raise exception.QuantumException() diff --git a/quantum/tests/unit/nicira/etc/fake_post_security_profile.json b/quantum/tests/unit/nicira/etc/fake_post_security_profile.json new file mode 100644 index 000000000..594da3310 --- /dev/null +++ b/quantum/tests/unit/nicira/etc/fake_post_security_profile.json @@ -0,0 +1,10 @@ +{ + "display_name": "%(display_name)s", + "_href": "/ws.v1/security-profile/%(uuid)s", + "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, + {"scope": "nova_spid", "tag": "%(nova_spid)s"}], + "logical_port_egress_rules": [], + "_schema": "/ws.v1/schema/SecurityProfileConfig", + "logical_port_ingress_rules": [], + "uuid": "%(uuid)s" +} diff --git a/quantum/tests/unit/nicira/fake_nvpapiclient.py b/quantum/tests/unit/nicira/fake_nvpapiclient.py index b538ec140..8664391e2 100644 --- a/quantum/tests/unit/nicira/fake_nvpapiclient.py +++ b/quantum/tests/unit/nicira/fake_nvpapiclient.py @@ -35,17 +35,20 @@ class FakeClient: FAKE_POST_RESPONSES = { "lswitch": "fake_post_lswitch.json", - "lport": "fake_post_lport.json" + "lport": "fake_post_lport.json", + "securityprofile": "fake_post_security_profile.json" } FAKE_PUT_RESPONSES = { "lswitch": "fake_post_lswitch.json", - "lport": "fake_post_lport.json" + "lport": "fake_post_lport.json", + "securityprofile": "fake_post_security_profile.json" } _fake_lswitch_dict = {} _fake_lport_dict = {} _fake_lportstatus_dict = {} + _fake_securityprofile_dict = {} def __init__(self, fake_files_path): self.fake_files_path = fake_files_path @@ -102,17 +105,32 @@ class FakeClient: self._fake_lportstatus_dict[fake_lport['uuid']] = fake_lport_status return fake_lport + def _add_securityprofile(self, body): + fake_securityprofile = json.loads(body) + fake_securityprofile['uuid'] = uuidutils.generate_uuid() + fake_securityprofile['tenant_id'] = self._get_tag( + fake_securityprofile, 'os_tid') + + fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile, + 'nova_spid') + self._fake_securityprofile_dict[fake_securityprofile['uuid']] = ( + fake_securityprofile) + return fake_securityprofile + def _get_resource_type(self, path): uri_split = path.split('/') resource_type = ('status' in uri_split and 'lport' in uri_split and 'lportstatus' or 'lport' in uri_split and 'lport' - or 'lswitch' in uri_split and 'lswitch') + or 'lswitch' in uri_split and 'lswitch' or + 'security-profile' in uri_split and 'securityprofile') switch_uuid = ('lswitch' in uri_split and len(uri_split) > 3 and uri_split[3]) port_uuid = ('lport' in uri_split and len(uri_split) > 5 and uri_split[5]) - return (resource_type, switch_uuid, port_uuid) + securityprofile_uuid = ('security-profile' in uri_split and + len(uri_split) > 3 and uri_split[3]) + return (resource_type, switch_uuid, port_uuid, securityprofile_uuid) def _list(self, resource_type, response_file, switch_uuid=None, query=None): @@ -176,7 +194,8 @@ class FakeClient: def handle_get(self, url): #TODO(salvatore-orlando): handle field selection parsedurl = urlparse.urlparse(url) - (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path) + (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type( + parsedurl.path) response_file = self.FAKE_GET_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") @@ -199,7 +218,8 @@ class FakeClient: def handle_post(self, url, body): parsedurl = urlparse.urlparse(url) - (res_type, s_uuid, _p) = self._get_resource_type(parsedurl.path) + (res_type, s_uuid, _p, sec_uuid) = self._get_resource_type( + parsedurl.path) response_file = self.FAKE_POST_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") @@ -214,8 +234,9 @@ class FakeClient: def handle_put(self, url, body): parsedurl = urlparse.urlparse(url) - (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path) - target_uuid = p_uuid or s_uuid + (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type( + parsedurl.path) + target_uuid = p_uuid or s_uuid or sec_uuid response_file = self.FAKE_PUT_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") @@ -229,8 +250,9 @@ class FakeClient: def handle_delete(self, url): parsedurl = urlparse.urlparse(url) - (res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path) - target_uuid = p_uuid or s_uuid + (res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type( + parsedurl.path) + target_uuid = p_uuid or s_uuid or sec_uuid response_file = self.FAKE_PUT_RESPONSES.get(res_type) if not response_file: raise Exception("resource not found") diff --git a/quantum/tests/unit/nicira/test_nicira_plugin.py b/quantum/tests/unit/nicira/test_nicira_plugin.py index c20beebbc..2c04cba32 100644 --- a/quantum/tests/unit/nicira/test_nicira_plugin.py +++ b/quantum/tests/unit/nicira/test_nicira_plugin.py @@ -22,13 +22,14 @@ import webob.exc import quantum.common.test_lib as test_lib from quantum import context from quantum.extensions import providernet as pnet +from quantum.extensions import securitygroup as secgrp from quantum import manager from quantum.openstack.common import cfg from quantum.plugins.nicira.nicira_nvp_plugin import nvplib from quantum.tests.unit.nicira import fake_nvpapiclient import quantum.tests.unit.test_db_plugin as test_plugin import quantum.tests.unit.test_extension_portsecurity as psec - +import quantum.tests.unit.test_extension_security_group as ext_sg LOG = logging.getLogger(__name__) NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin' @@ -120,6 +121,24 @@ class TestNiciraPortsV2(test_plugin.TestPortsV2, NiciraPluginV2TestCase): net['network']['id']) self.assertEqual(len(ls), 2) + def test_update_port_delete_ip(self): + # This test case overrides the default because the nvp plugin + # implements port_security/security groups and it is not allowed + # to remove an ip address from a port unless the security group + # is first removed. + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + data = {'port': {'admin_state_up': False, + 'fixed_ips': [], + secgrp.SECURITYGROUPS: []}} + req = self.new_update_request('ports', + data, port['port']['id']) + res = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + self.assertEqual(res['port']['fixed_ips'], + data['port']['fixed_ips']) + class TestNiciraNetworksV2(test_plugin.TestNetworksV2, NiciraPluginV2TestCase): @@ -185,3 +204,34 @@ class NiciraPortSecurityTestCase(psec.PortSecurityDBTestCase): class TestNiciraPortSecurity(psec.TestPortSecurity, NiciraPortSecurityTestCase): pass + + +class NiciraSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): + + _plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH) + + def setUp(self): + etc_path = os.path.join(os.path.dirname(__file__), 'etc') + test_lib.test_config['config_files'] = [os.path.join(etc_path, + 'nvp.ini.test')] + # mock nvp api client + fc = fake_nvpapiclient.FakeClient(etc_path) + self.mock_nvpapi = mock.patch('%s.NvpApiClient.NVPApiHelper' + % NICIRA_PKG_PATH, autospec=True) + instance = self.mock_nvpapi.start() + instance.return_value.login.return_value = "the_cookie" + + def _fake_request(*args, **kwargs): + return fc.fake_request(*args, **kwargs) + + instance.return_value.request.side_effect = _fake_request + super(NiciraSecurityGroupsTestCase, self).setUp(self._plugin_name) + + def tearDown(self): + super(NiciraSecurityGroupsTestCase, self).tearDown() + self.mock_nvpapi.stop() + + +class TestNiciraSecurityGroup(ext_sg.TestSecurityGroups, + NiciraSecurityGroupsTestCase): + pass diff --git a/quantum/tests/unit/test_extension_security_group.py b/quantum/tests/unit/test_extension_security_group.py index 1ee83ce25..cc70b1f54 100644 --- a/quantum/tests/unit/test_extension_security_group.py +++ b/quantum/tests/unit/test_extension_security_group.py @@ -1,5 +1,5 @@ # Copyright (c) 2012 OpenStack, LLC. -# + # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,6 +19,7 @@ import os import mock import webob.exc +from quantum.api.v2 import attributes as attr from quantum.common.test_lib import test_config from quantum import context from quantum.db import db_base_plugin_v2 @@ -174,7 +175,7 @@ class SecurityGroupTestPlugin(db_base_plugin_v2.QuantumDbPluginV2, def create_port(self, context, port): tenant_id = self._get_tenant_id_for_create(context, port['port']) default_sg = self._ensure_default_security_group(context, tenant_id) - if not port['port'].get(ext_sg.SECURITYGROUPS): + if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): port['port'][ext_sg.SECURITYGROUPS] = [default_sg] session = context.session with session.begin(subtransactions=True): @@ -207,6 +208,13 @@ class SecurityGroupTestPlugin(db_base_plugin_v2.QuantumDbPluginV2, return super(SecurityGroupTestPlugin, self).create_network(context, network) + def get_ports(self, context, filters=None, fields=None): + quantum_lports = super(SecurityGroupTestPlugin, self).get_ports( + context, filters) + for quantum_lport in quantum_lports: + self._extend_port_dict_security_group(context, quantum_lport) + return quantum_lports + class SecurityGroupDBTestCase(SecurityGroupsTestCase): def setUp(self, plugin=None): @@ -215,6 +223,10 @@ class SecurityGroupDBTestCase(SecurityGroupsTestCase): test_config['extension_manager'] = ext_mgr super(SecurityGroupDBTestCase, self).setUp(plugin) + def tearDown(self): + del test_config['plugin_name_v2'] + super(SecurityGroupDBTestCase, self).tearDown() + class TestSecurityGroups(SecurityGroupDBTestCase): def test_create_security_group(self): @@ -649,6 +661,18 @@ class TestSecurityGroups(SecurityGroupDBTestCase): self.deserialize(self.fmt, res) self.assertEqual(res.status_int, 400) + def test_list_ports_security_group(self): + with self.network() as n: + with self.subnet(n): + res = self._create_port(self.fmt, n['network']['id']) + self.deserialize(self.fmt, res) + res = self.new_list_request('ports') + ports = self.deserialize(self.fmt, + res.get_response(self.api)) + port = ports['ports'][0] + self.assertEquals(len(port[ext_sg.SECURITYGROUPS]), 1) + self._delete('ports', port['id']) + def test_update_port_with_security_group(self): with self.network() as n: with self.subnet(n): -- 2.45.2