--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""inital port security
+
+Revision ID: 1149d7de0cfa
+Revises: 1b693c095aa3
+Create Date: 2013-01-22 14:05:20.696502
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1149d7de0cfa'
+down_revision = '1b693c095aa3'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+ 'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+from quantum.db import migration
+
+
+def upgrade(active_plugin=None, options=None):
+ if not migration.should_run(active_plugin, migration_for_plugins):
+ return
+
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('networksecuritybindings',
+ sa.Column('network_id', sa.String(length=36),
+ nullable=False),
+ sa.Column('port_security_enabled', sa.Boolean(),
+ nullable=False),
+ sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('network_id'))
+ op.create_table('portsecuritybindings',
+ sa.Column('port_id', sa.String(length=36),
+ nullable=False),
+ sa.Column('port_security_enabled', sa.Boolean(),
+ nullable=False),
+ sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('port_id'))
+ ### end Alembic commands ###
+
+
+def downgrade(active_plugin=None, options=None):
+ if not migration.should_run(active_plugin, migration_for_plugins):
+ return
+
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('portsecuritybindings')
+ op.drop_table('networksecuritybindings')
+ ### end Alembic commands ###
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
+from quantum.db import portsecurity_db
# NOTE: quota_db cannot be removed, it is for db model
from quantum.db import quota_db
+from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.openstack.common import cfg
from quantum.openstack.common import rpc
return q_rpc.PluginRpcDispatcher([self])
-class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
+class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
+ portsecurity_db.PortSecurityDbMixin):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
- supported_extension_aliases = ["provider", "quotas"]
+ supported_extension_aliases = ["provider", "quotas", "port-security"]
# Default controller cluster
default_cluster = None
+ provider_network_view = "extension:provider_network:view"
+ provider_network_set = "extension:provider_network:set"
+ port_security_enabled_create = "create_port:port_security_enabled"
+ port_security_enabled_update = "update_port:port_security_enabled"
+
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
else:
return self.default_cluster
- def _check_provider_view_auth(self, context, network):
- return policy.check(context,
- "extension:provider_network:view",
- network)
+ def _check_view_auth(self, context, resource, action):
+ return policy.check(context, action, resource)
- def _enforce_provider_set_auth(self, context, network):
- return policy.enforce(context,
- "extension:provider_network:set",
- network)
+ def _enforce_set_auth(self, context, resource, action):
+ return policy.enforce(context, action, resource)
def _handle_provider_create(self, context, attrs):
# NOTE(salvatore-orlando): This method has been borrowed from
return
# Authorize before exposing plugin details to client
- self._enforce_provider_set_auth(context, attrs)
+ self._enforce_set_auth(context, attrs, self.provider_network_set)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None):
- if self._check_provider_view_auth(context, network):
+ if self._check_view_auth(context, network, self.provider_network_view):
if not binding:
binding = nicira_db.get_network_binding(context.session,
network['id'])
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
+ self._process_network_create_port_security(context,
+ network['network'])
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
net_data.get(pnet.SEGMENTATION_ID))
self._extend_network_dict_provider(context, new_net,
net_binding)
+ self._extend_network_port_security_dict(context, new_net)
return new_net
def delete_network(self, context, id):
network = self._get_network(context, id)
net_result = self._make_network_dict(network, None)
self._extend_network_dict_provider(context, net_result)
+ self._extend_network_port_security_dict(context, net_result)
+
# verify the fabric status of the corresponding
# logical switch(es) in nvp
try:
super(NvpPluginV2, self).get_networks(context, filters))
for net in quantum_lswitches:
self._extend_network_dict_provider(context, net)
+ self._extend_network_port_security_dict(context, net)
if context.is_admin and not filters.get("tenant_id"):
tenant_filter = ""
raise q_exc.NotImplementedError(_("admin_state_up=False "
"networks are not "
"supported."))
- return super(NvpPluginV2, self).update_network(context, id, network)
+ with context.session.begin(subtransactions=True):
+ quantum_db = super(NvpPluginV2, self).update_network(
+ context, id, network)
+ if psec.PORTSECURITY in network['network']:
+ self._update_network_security_binding(
+ context, id, network['network'][psec.PORTSECURITY])
+ self._extend_network_port_security_dict(
+ context, quantum_db)
+ return quantum_db
def get_ports(self, context, filters=None, fields=None):
- quantum_lports = super(NvpPluginV2, self).get_ports(context, filters)
+ with context.session.begin(subtransactions=True):
+ quantum_lports = super(NvpPluginV2, self).get_ports(
+ context, filters)
+ for quantum_lport in quantum_lports:
+ self._extend_port_port_security_dict(context, quantum_lport)
+
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Quantum checks to see if
return lports
def create_port(self, context, port):
+ # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
+ # then we pass the port to the policy engine. The reason why we don't
+ # pass the value to the policy engine when the port is
+ # ATTR_NOT_SPECIFIED is for the case where a port is created on a
+ # shared network that is not owned by the tenant.
+ # TODO(arosen) fix policy engine to do this for us automatically.
+ if attributes.is_attr_set(port['port'].get(psec.PORTSECURITY)):
+ self._enforce_set_auth(context, port,
+ self.port_security_enabled_create)
+ port_data = port['port']
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
- port_data = port['port']
+
+ # port security extension checks
+ (port_security, has_ip) = self._determine_port_security_and_has_ip(
+ context, port_data)
+ port_data[psec.PORTSECURITY] = port_security
+ self._process_port_security_create(context, port_data)
+ # provider networking extension checks
# Fetch the network and network binding from Quantum db
network = self._get_network(context, port_data['network_id'])
network_binding = nicira_db.get_network_binding(
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
- port_data['fixed_ips'])
+ port_data['fixed_ips'],
+ port_data[psec.PORTSECURITY])
# Get NVP ls uuid for quantum network
nvplib.plug_interface(cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
- return port_data
+ self._extend_port_port_security_dict(context, port_data)
+ return port_data
def update_port(self, context, id, port):
- params = {}
- port_quantum = super(NvpPluginV2, self).get_port(context, id)
- port_nvp, cluster = (
- nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
- port_quantum["network_id"], id))
- params["cluster"] = cluster
- params["port"] = port_quantum
- LOG.debug(_("Update port request: %s"), params)
- nvplib.update_port(port_quantum['network_id'],
- port_nvp['uuid'], **params)
- return super(NvpPluginV2, self).update_port(context, id, port)
+ self._enforce_set_auth(context, port,
+ self.port_security_enabled_update)
+ tenant_id = self._get_tenant_id_for_create(context, port)
+ with context.session.begin(subtransactions=True):
+ ret_port = super(NvpPluginV2, self).update_port(
+ context, id, port)
+ # copy values over
+ ret_port.update(port['port'])
+
+ # Handle port security
+ if psec.PORTSECURITY in port['port']:
+ self._update_port_security_binding(
+ context, id, ret_port[psec.PORTSECURITY])
+ # populate with value
+ else:
+ ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
+ context, id)
+
+ port_nvp, cluster = (
+ nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
+ ret_port["network_id"], id))
+ LOG.debug(_("Update port request: %s"), port)
+ nvplib.update_port(cluster, ret_port['network_id'],
+ port_nvp['uuid'], id, tenant_id,
+ ret_port['name'], ret_port['device_id'],
+ ret_port['admin_state_up'],
+ ret_port['mac_address'],
+ ret_port['fixed_ips'],
+ ret_port[psec.PORTSECURITY])
+
+ # Update the port status from nvp. If we fail here hide it since
+ # the port was successfully updated but we were not able to retrieve
+ # the status.
+ try:
+ ret_port['status'] = nvplib.get_port_status(
+ cluster, ret_port['network_id'], port_nvp['uuid'])
+ except:
+ LOG.warn(_("Unable to retrieve port status for: %s."),
+ port_nvp['uuid'])
+ return ret_port
def delete_port(self, context, id):
# TODO(salvatore-orlando): pass only actual cluster
raise exception.QuantumException()
if not resp_obj:
return []
- lswitches = json.loads(resp_obj)["results"]
networks_result = copy(networks)
return networks_result
for c in clusters:
try:
res_obj = do_single_request('GET', query, cluster=c)
- except Exception as e:
+ except Exception:
continue
res = json.loads(res_obj)
if len(res["results"]) == 1:
return port
-def update_port(network, port_id, **params):
- cluster = params["cluster"]
- lport_obj = {}
+def _configure_extensions(lport_obj, mac_address, fixed_ips,
+ port_security_enabled):
+ lport_obj['allowed_address_pairs'] = []
+ if port_security_enabled:
+ for fixed_ip in fixed_ips:
+ ip_address = fixed_ip.get('ip_address')
+ if ip_address:
+ lport_obj['allowed_address_pairs'].append(
+ {'mac_address': mac_address, 'ip_address': ip_address})
+ # add address pair allowing src_ip 0.0.0.0 to leave
+ # this is required for outgoing dhcp request
+ lport_obj["allowed_address_pairs"].append(
+ {"mac_address": mac_address,
+ "ip_address": "0.0.0.0"})
- admin_state_up = params['port'].get('admin_state_up')
- name = params["port"].get("name")
- device_id = params["port"].get("device_id")
- if admin_state_up:
- lport_obj["admin_status_enabled"] = admin_state_up
- if name:
- lport_obj["display_name"] = name
-
- if device_id:
- # device_id can be longer than 40 so we rehash it
- device_id = hashlib.sha1(device_id).hexdigest()
- lport_obj["tags"] = (
- [dict(scope='os_tid', tag=params["port"].get("tenant_id")),
- dict(scope='q_port_id', tag=params["port"]["id"]),
- dict(scope='vm_id', tag=device_id)])
-
- uri = "/ws.v1/lswitch/" + network + "/lport/" + port_id
+
+def update_port(cluster, lswitch_uuid, lport_uuid, quantum_port_id, tenant_id,
+ display_name, device_id, admin_status_enabled,
+ mac_address=None, fixed_ips=None, port_security_enabled=None):
+
+ # device_id can be longer than 40 so we rehash it
+ hashed_device_id = hashlib.sha1(device_id).hexdigest()
+ lport_obj = dict(
+ admin_status_enabled=admin_status_enabled,
+ display_name=display_name,
+ tags=[dict(scope='os_tid', tag=tenant_id),
+ dict(scope='q_port_id', tag=quantum_port_id),
+ dict(scope='vm_id', tag=hashed_device_id)])
+
+ _configure_extensions(lport_obj, mac_address, fixed_ips,
+ port_security_enabled)
+
+ path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
- resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
+ resp_obj = do_single_request("PUT", path, json.dumps(lport_obj),
cluster=cluster)
except NvpApiClient.ResourceNotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
- raise exception.PortNotFound(port_id=port_id, net_id=network)
+ raise exception.PortNotFound(port_id=lport_uuid, net_id=lswitch_uuid)
except NvpApiClient.NvpApiException as e:
raise exception.QuantumException()
-
- obj = json.loads(resp_obj)
- obj["port-op-status"] = get_port_status(cluster, network, obj["uuid"])
- return obj
+ result = json.loads(resp_obj)
+ LOG.debug(_("Updated logical port %(result)s on logical swtich %(uuid)s"),
+ {'result': result['uuid'], 'uuid': lswitch_uuid})
+ return result
def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
display_name, device_id, admin_status_enabled,
- mac_address=None, fixed_ips=None):
+ mac_address=None, fixed_ips=None, port_security_enabled=None):
""" Creates a logical port on the assigned logical switch """
# device_id can be longer than 40 so we rehash it
hashed_device_id = hashlib.sha1(device_id).hexdigest()
dict(scope='q_port_id', tag=quantum_port_id),
dict(scope='vm_id', tag=hashed_device_id)],
)
+
+ _configure_extensions(lport_obj, mac_address, fixed_ips,
+ port_security_enabled)
+
path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid)
try:
resp_obj = do_single_request("POST", path,