--- /dev/null
+[DATABASE]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# sql_connection = mysql://root:pass@127.0.0.1:3306/midonet_quantum
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main quantum server. (Leave it as is if the database runs on this host.)
+sql_connection = sqlite://
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# sql_max_retries = 10
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+reconnect_interval = 2
+# Enable the use of eventlet's db_pool for MySQL. The flags sql_min_pool_size,
+# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
+# sql_dbpool_enable = False
+# Minimum number of SQL connections to keep open in a pool
+# sql_min_pool_size = 1
+# Maximum number of SQL connections to keep open in a pool
+# sql_max_pool_size = 5
+# Timeout in seconds before idle sql connections are reaped
+# sql_idle_timeout = 3600
+
+[MIDONET]
+# MidoNet API server URI
+# midonet_uri = http://localhost:8080/midonet-api
+
+# MidoNet admin username
+#username = admin
+
+# MidoNet admin password
+#password = passw0rd
+
+# ID of the project that MidoNet admin user belongs to
+#project_id = 77777777-7777-7777-7777-777777777777
+
+# Virtual provider router ID
+#provider_router_id = 00112233-0011-0011-0011-001122334455
+
+# Virtual metadata router ID
+#metadata_router_id = ffeeddcc-ffee-ffee-ffee-ffeeddccbbaa
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Tomoe Sugihara, Midokura Japan KK
+
+from oslo.config import cfg
+
+midonet_opts = [
+ cfg.StrOpt('midonet_uri', default='http://localhost:8080/midonet-api',
+ help=_('MidoNet API server URI.')),
+ cfg.StrOpt('username', default='admin',
+ help=_('MidoNet admin username.')),
+ cfg.StrOpt('password', default='passw0rd',
+ secret=True,
+ help=_('MidoNet admin password.')),
+ cfg.StrOpt('project_id',
+ default='77777777-7777-7777-7777-777777777777',
+ help=_('ID of the project that MidoNet admin user'
+ 'belongs to.')),
+ cfg.StrOpt('provider_router_id',
+ default=None,
+ help=_('Virtual provider router ID.')),
+ cfg.StrOpt('metadata_router_id',
+ default=None,
+ help=_('Virtual metadata router ID.')),
+ cfg.StrOpt('mode',
+ default='dev',
+ help=_('Operational mode. Internal dev use only.'))
+]
+
+
+cfg.CONF.register_opts(midonet_opts, "MIDONET")
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Tomoe Sugihara, Midokura Japan KK
+# @author: Ryu Ishimoto, Midokura Japan KK
+
+
+from quantum.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+PREFIX = 'OS_SG_'
+SUFFIX_IN = '_IN'
+SUFFIX_OUT = '_OUT'
+OS_ROUTER_IN_CHAIN_NAME_FORMAT = 'OS_ROUTER_IN_%s'
+OS_ROUTER_OUT_CHAIN_NAME_FORMAT = 'OS_ROUTER_OUT_%s'
+NAME_IDENTIFIABLE_PREFIX_LEN = len(PREFIX) + 36 # 36 = length of uuid
+
+
+def sg_label(sg_id, sg_name):
+ """Construct the security group ID used as chain identifier in MidoNet."""
+ return PREFIX + str(sg_id) + '_' + sg_name
+
+port_group_name = sg_label
+
+
+def chain_names(sg_id, sg_name):
+ """Get inbound and outbound chain names."""
+ prefix = sg_label(sg_id, sg_name)
+ in_chain_name = prefix + SUFFIX_IN
+ out_chain_name = prefix + SUFFIX_OUT
+ return {'in': in_chain_name, 'out': out_chain_name}
+
+
+class ChainManager:
+
+ def __init__(self, mido_api):
+ self.mido_api = mido_api
+
+ def create_for_sg(self, tenant_id, sg_id, sg_name):
+ """Create a new chain for security group.
+
+ Creating a security group creates a pair of chains in MidoNet, one for
+ inbound and the other for outbound.
+ """
+ LOG.debug(_("ChainManager.create_for_sg called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s "
+ "sg_name=%(sg_name)s "),
+ {'tenant_id': tenant_id, 'sg_id': sg_id, 'sg_name': sg_name})
+
+ cnames = chain_names(sg_id, sg_name)
+ self.mido_api.add_chain().tenant_id(tenant_id).name(
+ cnames['in']).create()
+ self.mido_api.add_chain().tenant_id(tenant_id).name(
+ cnames['out']).create()
+
+ def delete_for_sg(self, tenant_id, sg_id, sg_name):
+ """Delete a chain mapped to a security group.
+
+ Delete a SG means deleting all the chains (inbound and outbound)
+ associated with the SG in MidoNet.
+ """
+ LOG.debug(_("ChainManager.delete_for_sg called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s "
+ "sg_name=%(sg_name)s "),
+ {'tenant_id': tenant_id, 'sg_id': sg_id, 'sg_name': sg_name})
+
+ cnames = chain_names(sg_id, sg_name)
+ chains = self.mido_api.get_chains({'tenant_id': tenant_id})
+ for c in chains:
+ if c.get_name() == cnames['in'] or c.get_name() == cnames['out']:
+ LOG.debug(_('ChainManager.delete_for_sg: deleting chain=%r'),
+ c)
+ c.delete()
+
+ def get_router_chains(self, tenant_id, router_id):
+ """Get router chains.
+
+ Returns a dictionary that has in/out chain resources key'ed with 'in'
+ and 'out' respectively, given the tenant_id and the router_id passed
+ in in the arguments.
+ """
+ LOG.debug(_("ChainManager.get_router_chains called: "
+ "tenant_id=%(tenant_id)s router_id=%(router_id)s"),
+ {'tenant_id': tenant_id, 'router_id': router_id})
+
+ router_chain_names = self._get_router_chain_names(router_id)
+ chains = {}
+ for c in self.mido_api.get_chains({'tenant_id': tenant_id}):
+ if c.get_name() == router_chain_names['in']:
+ chains['in'] = c
+ elif c.get_name() == router_chain_names['out']:
+ chains['out'] = c
+ return chains
+
+ def create_router_chains(self, tenant_id, router_id):
+ """Create a new chain on a router.
+
+ Creates chains for the router and returns the same dictionary as
+ get_router_chains() returns.
+ """
+ LOG.debug(_("ChainManager.create_router_chains called: "
+ "tenant_id=%(tenant_id)s router_id=%(router_id)s"),
+ {'tenant_id': tenant_id, 'router_id': router_id})
+
+ chains = {}
+ router_chain_names = self._get_router_chain_names(router_id)
+ chains['in'] = self.mido_api.add_chain().tenant_id(tenant_id).name(
+ router_chain_names['in']).create()
+
+ chains['out'] = self.mido_api.add_chain().tenant_id(tenant_id).name(
+ router_chain_names['out']).create()
+ return chains
+
+ def get_sg_chains(self, tenant_id, sg_id):
+ """Get a list of chains mapped to a security group."""
+ LOG.debug(_("ChainManager.get_sg_chains called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s"),
+ {'tenant_id': tenant_id, 'sg_id': sg_id})
+
+ cnames = chain_names(sg_id, sg_name='')
+ chain_name_prefix_for_id = cnames['in'][:NAME_IDENTIFIABLE_PREFIX_LEN]
+ chains = {}
+
+ for c in self.mido_api.get_chains({'tenant_id': tenant_id}):
+ if c.get_name().startswith(chain_name_prefix_for_id):
+ if c.get_name().endswith(SUFFIX_IN):
+ chains['in'] = c
+ if c.get_name().endswith(SUFFIX_OUT):
+ chains['out'] = c
+ assert 'in' in chains
+ assert 'out' in chains
+ return chains
+
+ def _get_router_chain_names(self, router_id):
+ LOG.debug(_("ChainManager.get_router_chain_names called: "
+ "router_id=%(router_id)s"), {'router_id': router_id})
+
+ in_name = OS_ROUTER_IN_CHAIN_NAME_FORMAT % router_id
+ out_name = OS_ROUTER_OUT_CHAIN_NAME_FORMAT % router_id
+ router_chain_names = {'in': in_name, 'out': out_name}
+ return router_chain_names
+
+
+class PortGroupManager:
+
+ def __init__(self, mido_api):
+ self.mido_api = mido_api
+
+ def create(self, tenant_id, sg_id, sg_name):
+ LOG.debug(_("PortGroupManager.create called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s "
+ "sg_name=%(sg_name)s"),
+ {'tenant_id': tenant_id, 'sg_id': sg_id, 'sg_name': sg_name})
+ pg_name = port_group_name(sg_id, sg_name)
+ self.mido_api.add_port_group().tenant_id(tenant_id).name(
+ pg_name).create()
+
+ def delete(self, tenant_id, sg_id, sg_name):
+ LOG.debug(_("PortGroupManager.delete called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s "
+ "sg_name=%(sg_name)s"),
+ {'tenant_id': tenant_id, 'sg_id': sg_id, 'sg_name': sg_name})
+ pg_name = port_group_name(sg_id, sg_name)
+ pgs = self.mido_api.get_port_groups({'tenant_id': tenant_id})
+ for pg in pgs:
+ if pg.get_name() == pg_name:
+ LOG.debug(_("PortGroupManager.delete: deleting pg=%r"), pg)
+ pg.delete()
+
+ def get_for_sg(self, tenant_id, sg_id):
+ LOG.debug(_("PortGroupManager.get_for_sg called: "
+ "tenant_id=%(tenant_id)s sg_id=%(sg_id)s"),
+ {'tenant_id': tenant_id, 'sg_id': sg_id})
+
+ pg_name_prefix = port_group_name(
+ sg_id, sg_name='')[:NAME_IDENTIFIABLE_PREFIX_LEN]
+ port_groups = self.mido_api.get_port_groups({'tenant_id': tenant_id})
+ for pg in port_groups:
+ if pg.get_name().startswith(pg_name_prefix):
+ LOG.debug(_("PortGroupManager.get_for_sg exiting: pg=%r"), pg)
+ return pg
+ return None
+
+
+class RuleManager:
+
+ OS_SG_KEY = 'os_sg_rule_id'
+
+ def __init__(self, mido_api):
+ self.mido_api = mido_api
+ self.chain_manager = ChainManager(mido_api)
+ self.pg_manager = PortGroupManager(mido_api)
+
+ def _properties(self, os_sg_rule_id):
+ return {self.OS_SG_KEY: str(os_sg_rule_id)}
+
+ def create_for_sg_rule(self, rule):
+ LOG.debug(_("RuleManager.create_for_sg_rule called: rule=%r"), rule)
+
+ direction = rule['direction']
+ protocol = rule['protocol']
+ port_range_max = rule['port_range_max']
+ rule_id = rule['id']
+ ethertype = rule['ethertype']
+ security_group_id = rule['security_group_id']
+ source_group_id = rule['source_group_id']
+ source_ip_prefix = rule['source_ip_prefix'] # watch out. not validated
+ tenant_id = rule['tenant_id']
+ port_range_min = rule['port_range_min']
+ external_id = rule['external_id']
+
+ # construct a corresponding rule
+ tp_src_start = tp_src_end = None
+ tp_dst_start = tp_dst_end = None
+ nw_src_address = None
+ nw_src_length = None
+ port_group_id = None
+
+ # handle source
+ if not source_ip_prefix is None:
+ nw_src_address, nw_src_length = source_ip_prefix.split('/')
+ elif not source_group_id is None: # security group as a srouce
+ source_pg = self.pg_manager.get_for_sg(tenant_id, source_group_id)
+ port_group_id = source_pg.get_id()
+ else:
+ raise Exception(_("Don't know what to do with rule=%r"), rule)
+
+ # dst ports
+ tp_dst_start, tp_dst_end = port_range_min, port_range_max
+
+ # protocol
+ if rule['protocol'] == 'tcp':
+ nw_proto = 6
+ elif rule['protocol'] == 'udp':
+ nw_proto = 17
+ elif rule['protocol'] == 'icmp':
+ nw_proto = 1
+ # extract type and code from reporposed fields
+ icmp_type = rule['from_port']
+ icmp_code = rule['to_port']
+
+ # translate -1(wildcard in OS) to midonet wildcard
+ if icmp_type == -1:
+ icmp_type = None
+ if icmp_code == -1:
+ icmp_code = None
+
+ # set data for midonet rule
+ tp_src_start = tp_src_end = icmp_type
+ tp_dst_start = tp_dst_end = icmp_code
+
+ chains = self.chain_manager.get_sg_chains(tenant_id, security_group_id)
+ chain = None
+ if direction == 'egress':
+ chain = chains['in']
+ elif direction == 'ingress':
+ chain = chains['out']
+ else:
+ raise Exception(_("Don't know what to do with rule=%r"), rule)
+
+ # create an accept rule
+ properties = self._properties(rule_id)
+ LOG.debug(_("RuleManager.create_for_sg_rule: adding accept rule "
+ "%(rule_id) in portgroup %(port_group_id)s"),
+ {'rule_id': rule_id, 'port_group_id': port_group_id})
+ chain.add_rule().port_group(port_group_id).type('accept').nw_proto(
+ nw_proto).nw_src_address(nw_src_address).nw_src_length(
+ nw_src_length).tp_src_start(tp_src_start).tp_src_end(
+ tp_src_end).tp_dst_start(tp_dst_start).tp_dst_end(
+ tp_dst_end).properties(properties).create()
+
+ def delete_for_sg_rule(self, rule):
+ LOG.debug(_("RuleManager.delete_for_sg_rule called: rule=%r"), rule)
+
+ tenant_id = rule['tenant_id']
+ security_group_id = rule['security_group_id']
+ rule_id = rule['id']
+
+ properties = self._properties(rule_id)
+ # search for the chains to find the rule to delete
+ chains = self.chain_manager.get_sg_chains(tenant_id, security_group_id)
+ for c in chains['in'], chains['out']:
+ rules = c.get_rules()
+ for r in rules:
+ if r.get_properties() == properties:
+ LOG.debug(_("RuleManager.delete_for_sg_rule: deleting "
+ "rule %r"), r)
+ r.delete()
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Takaaki Suzuki, Midokura Japan KK
+# @author: Tomoe Sugihara, Midokura Japan KK
+# @author: Ryu Ishimoto, Midokura Japan KK
+
+from midonetclient import api
+from oslo.config import cfg
+from webob import exc as w_exc
+
+from quantum.common import exceptions as q_exc
+from quantum.common.utils import find_config_file
+from quantum.db import api as db
+from quantum.db import db_base_plugin_v2
+from quantum.db import l3_db
+from quantum.db import models_v2
+from quantum.db import securitygroups_db
+from quantum.extensions import securitygroup as ext_sg
+from quantum.openstack.common import log as logging
+from quantum.plugins.midonet import config
+from quantum.plugins.midonet import midonet_lib
+
+
+LOG = logging.getLogger(__name__)
+
+OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE'
+OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP'
+SNAT_RULE = 'SNAT'
+SNAT_RULE_PROPERTY = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE}
+
+
+class MidonetResourceNotFound(q_exc.NotFound):
+ message = _('MidoNet %(resource_type)s %(id)s could not be found')
+
+
+class MidonetPluginException(q_exc.QuantumException):
+ message = _("%(msg)s")
+
+
+class MidonetPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
+ l3_db.L3_NAT_db_mixin,
+ securitygroups_db.SecurityGroupDbMixin):
+
+ supported_extension_aliases = ['router', 'security-group']
+
+ def __init__(self):
+
+ # Read config values
+ midonet_conf = cfg.CONF.MIDONET
+ midonet_uri = midonet_conf.midonet_uri
+ admin_user = midonet_conf.username
+ admin_pass = midonet_conf.password
+ admin_project_id = midonet_conf.project_id
+ provider_router_id = midonet_conf.provider_router_id
+ metadata_router_id = midonet_conf.metadata_router_id
+ mode = midonet_conf.mode
+
+ self.mido_api = api.MidonetApi(midonet_uri, admin_user,
+ admin_pass,
+ project_id=admin_project_id)
+
+ # get MidoNet provider router and metadata router
+ if provider_router_id and metadata_router_id:
+ self.provider_router = self.mido_api.get_router(provider_router_id)
+ self.metadata_router = self.mido_api.get_router(metadata_router_id)
+
+ # for dev purpose only
+ elif mode == 'dev':
+ msg = _('No provider router and metadata device ids found. '
+ 'But skipping because running in dev env.')
+ LOG.debug(msg)
+ else:
+ msg = _('provider_router_id and metadata_router_id '
+ 'should be configured in the plugin config file')
+ LOG.exception(msg)
+ raise MidonetPluginException(msg=msg)
+
+ self.chain_manager = midonet_lib.ChainManager(self.mido_api)
+ self.pg_manager = midonet_lib.PortGroupManager(self.mido_api)
+ self.rule_manager = midonet_lib.RuleManager(self.mido_api)
+
+ db.configure_db()
+
+ def create_subnet(self, context, subnet):
+ """Create Quantum subnet.
+
+ Creates a Quantum subnet and a DHCP entry in MidoNet bridge.
+ """
+ LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet)
+
+ if subnet['subnet']['ip_version'] == 6:
+ raise q_exc.NotImplementedError(
+ _("MidoNet doesn't support IPv6."))
+
+ net = super(MidonetPluginV2, self).get_network(
+ context, subnet['subnet']['network_id'], fields=None)
+ if net['subnets']:
+ raise q_exc.NotImplementedError(
+ _("MidoNet doesn't support multiple subnets "
+ "on the same network."))
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ sn_entry = super(MidonetPluginV2, self).create_subnet(context,
+ subnet)
+ try:
+ bridge = self.mido_api.get_bridge(sn_entry['network_id'])
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge',
+ id=sn_entry['network_id'])
+
+ gateway_ip = subnet['subnet']['gateway_ip']
+ network_address, prefix = subnet['subnet']['cidr'].split('/')
+ bridge.add_dhcp_subnet().default_gateway(gateway_ip).subnet_prefix(
+ network_address).subnet_length(prefix).create()
+
+ # If the network is external, link the bridge to MidoNet provider
+ # router
+ self._extend_network_dict_l3(context, net)
+ if net['router:external']:
+ gateway_ip = sn_entry['gateway_ip']
+ network_address, length = sn_entry['cidr'].split('/')
+
+ # create a interior port in the MidoNet provider router
+ in_port = self.provider_router.add_interior_port()
+ pr_port = in_port.port_address(gateway_ip).network_address(
+ network_address).network_length(length).create()
+
+ # create a interior port in the bridge, then link
+ # it to the provider router.
+ br_port = bridge.add_interior_port().create()
+ pr_port.link(br_port.get_id())
+
+ # add a route for the subnet in the provider router
+ self.provider_router.add_route().type(
+ 'Normal').src_network_addr('0.0.0.0').src_network_length(
+ 0).dst_network_addr(
+ network_address).dst_network_length(
+ length).weight(100).next_hop_port(
+ pr_port.get_id()).create()
+
+ LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"),
+ sn_entry)
+ return sn_entry
+
+ def get_subnet(self, context, id, fields=None):
+ """Get Quantum subnet.
+
+ Retrieves a Quantum subnet record but also including the DHCP entry
+ data stored in MidoNet.
+ """
+ LOG.debug(_("MidonetPluginV2.get_subnet called: id=%(id)s "
+ "fields=%(fields)s"), {'id': id, 'fields': fields})
+
+ qsubnet = super(MidonetPluginV2, self).get_subnet(context, id)
+ bridge_id = qsubnet['network_id']
+ try:
+ bridge = self.mido_api.get_bridge(bridge_id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge',
+ id=bridge_id)
+
+ # get dhcp subnet data from MidoNet bridge.
+ dhcps = bridge.get_dhcp_subnets()
+ b_network_address = dhcps[0].get_subnet_prefix()
+ b_prefix = dhcps[0].get_subnet_length()
+
+ # Validate against quantum database.
+ network_address, prefix = qsubnet['cidr'].split('/')
+ if network_address != b_network_address or int(prefix) != b_prefix:
+ raise MidonetResourceNotFound(resource_type='DhcpSubnet',
+ id=qsubnet['cidr'])
+
+ LOG.debug(_("MidonetPluginV2.get_subnet exiting: qsubnet=%s"), qsubnet)
+ return qsubnet
+
+ def get_subnets(self, context, filters=None, fields=None):
+ """List Quantum subnets.
+
+ Retrieves Quantum subnets with some fields populated by the data
+ stored in MidoNet.
+ """
+ LOG.debug(_("MidonetPluginV2.get_subnets called: filters=%(filters)r, "
+ "fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+ subnets = super(MidonetPluginV2, self).get_subnets(context, filters,
+ fields)
+ for sn in subnets:
+ if not 'network_id' in sn:
+ continue
+ try:
+ bridge = self.mido_api.get_bridge(sn['network_id'])
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge',
+ id=sn['network_id'])
+
+ # TODO(tomoe): dedupe this part.
+ # get dhcp subnet data from MidoNet bridge.
+ dhcps = bridge.get_dhcp_subnets()
+ b_network_address = dhcps[0].get_subnet_prefix()
+ b_prefix = dhcps[0].get_subnet_length()
+
+ # Validate against quantum database.
+ if sn.get('cidr'):
+ network_address, prefix = sn['cidr'].split('/')
+ if network_address != b_network_address or int(
+ prefix) != b_prefix:
+ raise MidonetResourceNotFound(resource_type='DhcpSubnet',
+ id=sn['cidr'])
+
+ LOG.debug(_("MidonetPluginV2.create_subnet exiting"))
+ return subnets
+
+ def delete_subnet(self, context, id):
+ """Delete Quantum subnet.
+
+ Delete quantum network and its corresponding MidoNet bridge.
+ """
+ LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id)
+ subnet = super(MidonetPluginV2, self).get_subnet(context, id,
+ fields=None)
+ net = super(MidonetPluginV2, self).get_network(context,
+ subnet['network_id'],
+ fields=None)
+ bridge_id = subnet['network_id']
+ try:
+ bridge = self.mido_api.get_bridge(bridge_id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge', id=bridge_id)
+
+ dhcp = bridge.get_dhcp_subnets()
+ dhcp[0].delete()
+
+ # If the network is external, clean up routes, links, ports.
+ self._extend_network_dict_l3(context, net)
+ if net['router:external']:
+ # Delete routes and unlink the router and the bridge.
+ routes = self.provider_router.get_routes()
+
+ bridge_ports_to_delete = []
+ for p in self.provider_router.get_peer_ports():
+ if p.get_device_id() == bridge.get_id():
+ bridge_ports_to_delete.append(p)
+
+ for p in bridge.get_peer_ports():
+ if p.get_device_id() == self.provider_router.get_id():
+ # delete the routes going to the brdge
+ for r in routes:
+ if r.get_next_hop_port() == p.get_id():
+ r.delete()
+ p.unlink()
+ p.delete()
+
+ # delete bridge port
+ map(lambda x: x.delete(), bridge_ports_to_delete)
+
+ super(MidonetPluginV2, self).delete_subnet(context, id)
+ LOG.debug(_("MidonetPluginV2.delete_subnet exiting"))
+
+ def create_network(self, context, network):
+ """Create Quantum network.
+
+ Create a new Quantum network and its corresponding MidoNet bridge.
+ """
+ LOG.debug(_('MidonetPluginV2.create_network called: network=%r'),
+ network)
+
+ if network['network']['admin_state_up'] is False:
+ LOG.warning(_('Ignoring admin_state_up=False for network=%r'
+ 'Overriding with True'), network)
+ network['network']['admin_state_up'] = True
+
+ tenant_id = self._get_tenant_id_for_create(context, network['network'])
+
+ self._ensure_default_security_group(context, tenant_id)
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ bridge = self.mido_api.add_bridge().name(
+ network['network']['name']).tenant_id(tenant_id).create()
+
+ # Set MidoNet bridge ID to the quantum DB entry
+ network['network']['id'] = bridge.get_id()
+ net = super(MidonetPluginV2, self).create_network(context, network)
+
+ # to handle l3 related data in DB
+ self._process_l3_create(context, network['network'], net['id'])
+ self._extend_network_dict_l3(context, net)
+ LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net)
+ return net
+
+ def update_network(self, context, id, network):
+ """Update Quantum network.
+
+ Update an existing Quantum network and its corresponding MidoNet
+ bridge.
+ """
+ LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, "
+ "network=%(network)r"), {'id': id, 'network': network})
+
+ # Reject admin_state_up=False
+ if network['network'].get('admin_state_up') and network['network'][
+ 'admin_state_up'] is False:
+ raise q_exc.NotImplementedError(_('admin_state_up=False '
+ 'networks are not '
+ 'supported.'))
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ net = super(MidonetPluginV2, self).update_network(
+ context, id, network)
+ try:
+ bridge = self.mido_api.get_bridge(id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge', id=id)
+ bridge.name(net['name']).update()
+
+ self._extend_network_dict_l3(context, net)
+ LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net)
+ return net
+
+ def get_network(self, context, id, fields=None):
+ """Get Quantum network.
+
+ Retrieves a Quantum network and its corresponding MidoNet bridge.
+ """
+ LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, "
+ "fields=%(fields)r"), {'id': id, 'fields': fields})
+
+ # NOTE: Get network data with all fields (fields=None) for
+ # _extend_network_dict_l3() method, which needs 'id' field
+ qnet = super(MidonetPluginV2, self).get_network(context, id, None)
+ try:
+ self.mido_api.get_bridge(id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge', id=id)
+
+ self._extend_network_dict_l3(context, qnet)
+ LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet)
+ return self._fields(qnet, fields)
+
+ def get_networks(self, context, filters=None, fields=None):
+ """List quantum networks and verify that all exist in MidoNet."""
+ LOG.debug(_("MidonetPluginV2.get_networks called: "
+ "filters=%(filters)r, fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+
+ # NOTE: Get network data with all fields (fields=None) for
+ # _extend_network_dict_l3() method, which needs 'id' field
+ qnets = super(MidonetPluginV2, self).get_networks(context, filters,
+ None)
+ self.mido_api.get_bridges({'tenant_id': context.tenant_id})
+ for n in qnets:
+ try:
+ self.mido_api.get_bridge(n['id'])
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge',
+ id=n['id'])
+ self._extend_network_dict_l3(context, n)
+
+ qnets = self._filter_nets_l3(context, qnets, filters)
+ return [self._fields(net, fields) for net in qnets]
+
+ def delete_network(self, context, id):
+ """Delete a network and its corresponding MidoNet bridge."""
+ LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id)
+
+ self.mido_api.get_bridge(id).delete()
+ try:
+ super(MidonetPluginV2, self).delete_network(context, id)
+ except Exception:
+ LOG.error(_('Failed to delete quantum db, while Midonet bridge=%r'
+ 'had been deleted'), id)
+ raise
+
+ def create_port(self, context, port):
+ """Create a L2 port in Quantum/MidoNet."""
+ LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port)
+
+ is_compute_interface = False
+ port_data = port['port']
+ # get the bridge and create a port on it.
+ try:
+ bridge = self.mido_api.get_bridge(port_data['network_id'])
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Bridge',
+ id=port_data['network_id'])
+
+ device_owner = port_data['device_owner']
+
+ if device_owner.startswith('compute:') or device_owner is '':
+ is_compute_interface = True
+ bridge_port = bridge.add_exterior_port().create()
+ elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF:
+ bridge_port = bridge.add_interior_port().create()
+ elif (device_owner == l3_db.DEVICE_OWNER_ROUTER_GW or
+ device_owner == l3_db.DEVICE_OWNER_FLOATINGIP):
+
+ # This is a dummy port to make l3_db happy.
+ # This will not be used in MidoNet
+ bridge_port = bridge.add_interior_port().create()
+
+ if bridge_port:
+ # set midonet port id to quantum port id and create a DB record.
+ port_data['id'] = bridge_port.get_id()
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ port_db_entry = super(MidonetPluginV2,
+ self).create_port(context, port)
+ self._extend_port_dict_security_group(context, port_db_entry)
+ if is_compute_interface:
+ # Create a DHCP entry if needed.
+ if 'ip_address' in (port_db_entry['fixed_ips'] or [{}])[0]:
+ # get ip and mac from DB record, assuming one IP address
+ # at most since we only support one subnet per network now.
+ fixed_ip = port_db_entry['fixed_ips'][0]['ip_address']
+ mac = port_db_entry['mac_address']
+ # create dhcp host entry under the bridge.
+ dhcp_subnets = bridge.get_dhcp_subnets()
+ if dhcp_subnets:
+ dhcp_subnets[0].add_dhcp_host().ip_addr(
+ fixed_ip).mac_addr(mac).create()
+ LOG.debug(_("MidonetPluginV2.create_port exiting: port_db_entry=%r"),
+ port_db_entry)
+ return port_db_entry
+
+ def update_port(self, context, id, port):
+ """Update port."""
+ LOG.debug(_("MidonetPluginV2.update_port called: id=%(id)s "
+ "port=%(port)r"), {'id': id, 'port': port})
+ return super(MidonetPluginV2, self).update_port(context, id, port)
+
+ def get_port(self, context, id, fields=None):
+ """Retrieve port."""
+ LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s "
+ "fields=%(fields)r"), {'id': id, 'fields': fields})
+
+ # get the quantum port from DB.
+ port_db_entry = super(MidonetPluginV2, self).get_port(context,
+ id, fields)
+ self._extend_port_dict_security_group(context, port_db_entry)
+
+ # verify that corresponding port exists in MidoNet.
+ try:
+ self.mido_api.get_port(id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Port', id=id)
+
+ LOG.debug(_("MidonetPluginV2.get_port exiting: port_db_entry=%r"),
+ port_db_entry)
+ return port_db_entry
+
+ def get_ports(self, context, filters=None, fields=None):
+ """List quantum ports and verify that they exist in MidoNet."""
+ LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s "
+ "fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+ ports_db_entry = super(MidonetPluginV2, self).get_ports(context,
+ filters,
+ fields)
+ if ports_db_entry:
+ try:
+ for port in ports_db_entry:
+ self.mido_api.get_port(port['id'])
+ self._extend_port_dict_security_group(context, port)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Port',
+ id=port['id'])
+ return ports_db_entry
+
+ def delete_port(self, context, id, l3_port_check=True):
+ """Delete a quantum port and corresponding MidoNet bridge port."""
+ LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s "
+ "l3_port_check=%(l3_port_check)r"),
+ {'id': id, 'l3_port_check': l3_port_check})
+ # if needed, check to see if this is a port owned by
+ # and l3-router. If so, we should prevent deletion.
+ if l3_port_check:
+ self.prevent_l3_port_deletion(context, id)
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ port_db_entry = super(MidonetPluginV2, self).get_port(context,
+ id, None)
+ bridge = self.mido_api.get_bridge(port_db_entry['network_id'])
+ # Clean up dhcp host entry if needed.
+ if 'ip_address' in (port_db_entry['fixed_ips'] or [{}])[0]:
+ # get ip and mac from DB record.
+ ip = port_db_entry['fixed_ips'][0]['ip_address']
+ mac = port_db_entry['mac_address']
+
+ # create dhcp host entry under the bridge.
+ dhcp_subnets = bridge.get_dhcp_subnets()
+ if dhcp_subnets:
+ for dh in dhcp_subnets[0].get_dhcp_hosts():
+ if dh.get_mac_addr() == mac and dh.get_ip_addr() == ip:
+ dh.delete()
+
+ self.mido_api.get_port(id).delete()
+ return super(MidonetPluginV2, self).delete_port(context, id)
+
+ #
+ # L3 APIs.
+ #
+
+ def create_router(self, context, router):
+ LOG.debug(_("MidonetPluginV2.create_router called: router=%r"), router)
+
+ if router['router']['admin_state_up'] is False:
+ LOG.warning(_('Ignoreing admin_state_up=False for router=%r',
+ 'Overriding with True'), router)
+ router['router']['admin_state_up'] = True
+
+ tenant_id = self._get_tenant_id_for_create(context, router['router'])
+ session = context.session
+ with session.begin(subtransactions=True):
+ mrouter = self.mido_api.add_router().name(
+ router['router']['name']).tenant_id(tenant_id).create()
+ qrouter = super(MidonetPluginV2, self).create_router(context,
+ router)
+
+ chains = self.chain_manager.create_router_chains(tenant_id,
+ mrouter.get_id())
+
+ # set chains to in/out filters
+ mrouter.inbound_filter_id(
+ chains['in'].get_id()).outbound_filter_id(
+ chains['out'].get_id()).update()
+
+ # get entry from the DB and update 'id' with MidoNet router id.
+ qrouter_entry = self._get_router(context, qrouter['id'])
+ qrouter['id'] = mrouter.get_id()
+ qrouter_entry.update(qrouter)
+
+ # link to metadata router
+ in_port = self.metadata_router.add_interior_port()
+ mdr_port = in_port.network_address('169.254.255.0').network_length(
+ 30).port_address('169.254.255.1').create()
+
+ tr_port = mrouter.add_interior_port().network_address(
+ '169.254.255.0').network_length(30).port_address(
+ '169.254.255.2').create()
+ mdr_port.link(tr_port.get_id())
+
+ # forward metadata traffic to metadata router
+ mrouter.add_route().type('Normal').src_network_addr(
+ '0.0.0.0').src_network_length(0).dst_network_addr(
+ '169.254.169.254').dst_network_length(32).weight(
+ 100).next_hop_port(tr_port.get_id()).create()
+
+ LOG.debug(_("MidonetPluginV2.create_router exiting: qrouter=%r"),
+ qrouter)
+ return qrouter
+
+ def update_router(self, context, id, router):
+ LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s "
+ "router=%(router)r"), router)
+
+ if router['router'].get('admin_state_up') is False:
+ raise q_exc.NotImplementedError(_('admin_state_up=False '
+ 'routers are not '
+ 'supported.'))
+
+ op_gateway_set = False
+ op_gateway_clear = False
+
+ # figure out which operation it is in
+ if ('external_gateway_info' in router['router'] and
+ 'network_id' in router['router']['external_gateway_info']):
+ op_gateway_set = True
+ elif ('external_gateway_info' in router['router'] and
+ router['router']['external_gateway_info'] == {}):
+ op_gateway_clear = True
+
+ qports = super(MidonetPluginV2, self).get_ports(
+ context, {'device_id': [id],
+ 'device_owner': ['network:router_gateway']})
+
+ assert len(qports) == 1
+ qport = qports[0]
+ snat_ip = qport['fixed_ips'][0]['ip_address']
+ qport['network_id']
+
+ session = context.session
+ with session.begin(subtransactions=True):
+
+ qrouter = super(MidonetPluginV2, self).update_router(context, id,
+ router)
+
+ changed_name = router['router'].get('name')
+ if changed_name:
+ self.mido_api.get_router(id).name(changed_name).update()
+
+ tenant_router = self.mido_api.get_router(id)
+ if op_gateway_set:
+ # find a qport with the network_id for the router
+ qports = super(MidonetPluginV2, self).get_ports(
+ context, {'device_id': [id],
+ 'device_owner': ['network:router_gateway']})
+ assert len(qports) == 1
+ qport = qports[0]
+ snat_ip = qport['fixed_ips'][0]['ip_address']
+
+ in_port = self.provider_router.add_interior_port()
+ pr_port = in_port.network_address(
+ '169.254.255.0').network_length(30).port_address(
+ '169.254.255.1').create()
+
+ # Create a port in the tenant router
+ tr_port = tenant_router.add_interior_port().network_address(
+ '169.254.255.0').network_length(30).port_address(
+ '169.254.255.2').create()
+
+ # Link them
+ pr_port.link(tr_port.get_id())
+
+ # Add a route for snat_ip to bring it down to tenant
+ self.provider_router.add_route().type(
+ 'Normal').src_network_addr('0.0.0.0').src_network_length(
+ 0).dst_network_addr(snat_ip).dst_network_length(
+ 32).weight(100).next_hop_port(
+ pr_port.get_id()).create()
+
+ # Add default route to uplink in the tenant router
+ tenant_router.add_route().type('Normal').src_network_addr(
+ '0.0.0.0').src_network_length(0).dst_network_addr(
+ '0.0.0.0').dst_network_length(0).weight(
+ 100).next_hop_port(tr_port.get_id()).create()
+
+ # ADD SNAT(masquerade) rules
+ chains = self.chain_manager.get_router_chains(
+ tenant_router.get_tenant_id(), tenant_router.get_id())
+
+ chains['in'].add_rule().nw_dst_address(snat_ip).nw_dst_length(
+ 32).type('rev_snat').flow_action('accept').in_ports(
+ [tr_port.get_id()]).properties(
+ SNAT_RULE_PROPERTY).position(1).create()
+
+ nat_targets = []
+ nat_targets.append(
+ {'addressFrom': snat_ip, 'addressTo': snat_ip,
+ 'portFrom': 1, 'portTo': 65535})
+
+ chains['out'].add_rule().type('snat').flow_action(
+ 'accept').nat_targets(nat_targets).out_ports(
+ [tr_port.get_id()]).properties(
+ SNAT_RULE_PROPERTY).position(1).create()
+
+ if op_gateway_clear:
+ # delete the port that is connected to provider router
+ for p in tenant_router.get_ports():
+ if p.get_port_address() == '169.254.255.2':
+ peer_port_id = p.get_peer_id()
+ p.unlink()
+ self.mido_api.get_port(peer_port_id).delete()
+ p.delete()
+
+ # delete default route
+ for r in tenant_router.get_routes():
+ if (r.get_dst_network_addr() == '0.0.0.0' and
+ r.get_dst_network_length() == 0):
+ r.delete()
+
+ # delete SNAT(masquerade) rules
+ chains = self.chain_manager.get_router_chains(
+ tenant_router.get_tenant_id(),
+ tenant_router.get_id())
+
+ for r in chains['in'].get_rules():
+ if OS_TENANT_ROUTER_RULE_KEY in r.get_properties():
+ if r.get_properties()[
+ OS_TENANT_ROUTER_RULE_KEY] == SNAT_RULE:
+ r.delete()
+
+ for r in chains['out'].get_rules():
+ if OS_TENANT_ROUTER_RULE_KEY in r.get_properties():
+ if r.get_properties()[
+ OS_TENANT_ROUTER_RULE_KEY] == SNAT_RULE:
+ r.delete()
+
+ LOG.debug(_("MidonetPluginV2.update_router exiting: qrouter=%r"),
+ qrouter)
+ return qrouter
+
+ def delete_router(self, context, id):
+ LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id)
+
+ mrouter = self.mido_api.get_router(id)
+ tenant_id = mrouter.get_tenant_id()
+
+ # unlink from metadata router and delete the interior ports
+ # that connect metadata router and this router.
+ for pp in self.metadata_router.get_peer_ports():
+ if pp.get_device_id() == mrouter.get_id():
+ mdr_port = self.mido_api.get_port(pp.get_peer_id())
+ pp.unlink()
+ pp.delete()
+ mdr_port.delete()
+
+ # delete corresponding chains
+ chains = self.chain_manager.get_router_chains(tenant_id,
+ mrouter.get_id())
+ chains['in'].delete()
+ chains['out'].delete()
+
+ # delete the router
+ mrouter.delete()
+
+ result = super(MidonetPluginV2, self).delete_router(context, id)
+ LOG.debug(_("MidonetPluginV2.delete_router exiting: result=%s"),
+ result)
+ return result
+
+ def get_router(self, context, id, fields=None):
+ LOG.debug(_("MidonetPluginV2.get_router called: id=%(id)s "
+ "fields=%(fields)r"), {'id': id, 'fields': fields})
+ qrouter = super(MidonetPluginV2, self).get_router(context, id, fields)
+
+ try:
+ self.mido_api.get_router(id)
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Router', id=id)
+
+ LOG.debug(_("MidonetPluginV2.get_router exiting: qrouter=%r"),
+ qrouter)
+ return qrouter
+
+ def get_routers(self, context, filters=None, fields=None):
+ LOG.debug(_("MidonetPluginV2.get_routers called: filters=%(filters)s "
+ "fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+
+ qrouters = super(MidonetPluginV2, self).get_routers(
+ context, filters, fields)
+ for qr in qrouters:
+ try:
+ self.mido_api.get_router(qr['id'])
+ except w_exc.HTTPNotFound:
+ raise MidonetResourceNotFound(resource_type='Router',
+ id=qr['id'])
+ return qrouters
+
+ def add_router_interface(self, context, router_id, interface_info):
+ LOG.debug(_("MidonetPluginV2.add_router_interface called: "
+ "router_id=%(router_id)s "
+ "interface_info=%(interface_info)r"),
+ {'router_id': router_id, 'interface_info': interface_info})
+
+ qport = super(MidonetPluginV2, self).add_router_interface(
+ context, router_id, interface_info)
+
+ # TODO(tomoe): handle a case with 'port' in interface_info
+ if 'subnet_id' in interface_info:
+ subnet_id = interface_info['subnet_id']
+ subnet = self._get_subnet(context, subnet_id)
+
+ gateway_ip = subnet['gateway_ip']
+ network_address, length = subnet['cidr'].split('/')
+
+ # Link the router and the bridge port.
+ mrouter = self.mido_api.get_router(router_id)
+ mrouter_port = mrouter.add_interior_port().port_address(
+ gateway_ip).network_address(
+ network_address).network_length(length).create()
+
+ mbridge_port = self.mido_api.get_port(qport['port_id'])
+ mrouter_port.link(mbridge_port.get_id())
+
+ # Add a route entry to the subnet
+ mrouter.add_route().type('Normal').src_network_addr(
+ '0.0.0.0').src_network_length(0).dst_network_addr(
+ network_address).dst_network_length(length).weight(
+ 100).next_hop_port(mrouter_port.get_id()).create()
+
+ # add a route for the subnet in metadata router; forward
+ # packets destined to the subnet to the tenant router
+ found = False
+ for pp in self.metadata_router.get_peer_ports():
+ if pp.get_device_id() == mrouter.get_id():
+ mdr_port_id = pp.get_peer_id()
+ found = True
+ assert found
+
+ self.metadata_router.add_route().type(
+ 'Normal').src_network_addr('0.0.0.0').src_network_length(
+ 0).dst_network_addr(network_address).dst_network_length(
+ length).weight(100).next_hop_port(mdr_port_id).create()
+
+ LOG.debug(_("MidonetPluginV2.add_router_interface exiting: "
+ "qport=%r"), qport)
+ return qport
+
+ def remove_router_interface(self, context, router_id, interface_info):
+ """Remove interior router ports."""
+ LOG.debug(_("MidonetPluginV2.remove_router_interface called: "
+ "router_id=%(router_id)s "
+ "interface_info=%(interface_info)r"),
+ {'router_id': router_id, 'interface_info': interface_info})
+ if 'port_id' in interface_info:
+
+ mbridge_port = self.mido_api.get_port(interface_info['port_id'])
+ subnet_id = self.get_port(context,
+ interface_info['port_id']
+ )['fixed_ips'][0]['subnet_id']
+
+ subnet = self._get_subnet(context, subnet_id)
+
+ if 'subnet_id' in interface_info:
+
+ subnet_id = interface_info['subnet_id']
+ subnet = self._get_subnet(context, subnet_id)
+ network_id = subnet['network_id']
+
+ # find a quantum port for the network
+ rport_qry = context.session.query(models_v2.Port)
+ ports = rport_qry.filter_by(
+ device_id=router_id,
+ device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
+ network_id=network_id).all()
+ network_port = None
+ for p in ports:
+ if p['fixed_ips'][0]['subnet_id'] == subnet_id:
+ network_port = p
+ break
+ assert network_port
+ mbridge_port = self.mido_api.get_port(network_port['id'])
+
+ # get network information from subnet data
+ network_addr, network_length = subnet['cidr'].split('/')
+ network_length = int(network_length)
+
+ # Unlink the router and the bridge.
+ mrouter = self.mido_api.get_router(router_id)
+ mrouter_port = self.mido_api.get_port(mbridge_port.get_peer_id())
+ mrouter_port.unlink()
+
+ # Delete the route for the subnet.
+ found = False
+ for r in mrouter.get_routes():
+ if r.get_next_hop_port() == mrouter_port.get_id():
+ r.delete()
+ found = True
+ #break # commented out due to issue#314
+ assert found
+
+ # delete the route for the subnet in the metadata router
+ found = False
+ for r in self.metadata_router.get_routes():
+ if (r.get_dst_network_addr() == network_addr and
+ r.get_dst_network_length() == network_length):
+ LOG.debug(_('Deleting route=%r ...'), r)
+ r.delete()
+ found = True
+ break
+ assert found
+
+ super(MidonetPluginV2, self).remove_router_interface(
+ context, router_id, interface_info)
+ LOG.debug(_("MidonetPluginV2.remove_router_interface exiting"))
+
+ def update_floatingip(self, context, id, floatingip):
+ LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s "
+ "floatingip=%(floatingip)s "),
+ {'id': id, 'floatingip': floatingip})
+
+ session = context.session
+ with session.begin(subtransactions=True):
+ if floatingip['floatingip']['port_id']:
+ fip = super(MidonetPluginV2, self).update_floatingip(
+ context, id, floatingip)
+ router_id = fip['router_id']
+ floating_address = fip['floating_ip_address']
+ fixed_address = fip['fixed_ip_address']
+
+ tenant_router = self.mido_api.get_router(router_id)
+ # find the provider router port that is connected to the tenant
+ # of the floating ip
+ for p in tenant_router.get_peer_ports():
+ if p.get_device_id() == self.provider_router.get_id():
+ pr_port = p
+
+ # get the tenant router port id connected to provider router
+ tr_port_id = pr_port.get_peer_id()
+
+ # add a route for the floating ip to bring it to the tenant
+ self.provider_router.add_route().type(
+ 'Normal').src_network_addr('0.0.0.0').src_network_length(
+ 0).dst_network_addr(
+ floating_address).dst_network_length(
+ 32).weight(100).next_hop_port(
+ pr_port.get_id()).create()
+
+ chains = self.chain_manager.get_router_chains(fip['tenant_id'],
+ fip['router_id'])
+ # add dnat/snat rule pair for the floating ip
+ nat_targets = []
+ nat_targets.append(
+ {'addressFrom': fixed_address, 'addressTo': fixed_address,
+ 'portFrom': 0, 'portTo': 0})
+
+ floating_property = {OS_FLOATING_IP_RULE_KEY: id}
+ chains['in'].add_rule().nw_dst_address(
+ floating_address).nw_dst_length(32).type(
+ 'dnat').flow_action('accept').nat_targets(
+ nat_targets).in_ports([tr_port_id]).position(
+ 1).properties(floating_property).create()
+
+ nat_targets = []
+ nat_targets.append(
+ {'addressFrom': floating_address,
+ 'addressTo': floating_address,
+ 'portFrom': 0,
+ 'portTo': 0})
+
+ chains['out'].add_rule().nw_src_address(
+ fixed_address).nw_src_length(32).type(
+ 'snat').flow_action('accept').nat_targets(
+ nat_targets).out_ports(
+ [tr_port_id]).position(1).properties(
+ floating_property).create()
+
+ # disassociate floating IP
+ elif floatingip['floatingip']['port_id'] is None:
+
+ fip = super(MidonetPluginV2, self).get_floatingip(context, id)
+
+ router_id = fip['router_id']
+ floating_address = fip['floating_ip_address']
+ fixed_address = fip['fixed_ip_address']
+
+ # delete the route for this floating ip
+ for r in self.provider_router.get_routes():
+ if (r.get_dst_network_addr() == floating_address and
+ r.get_dst_network_length() == 32):
+ r.delete()
+
+ # delete snat/dnat rule pair for this floating ip
+ chains = self.chain_manager.get_router_chains(fip['tenant_id'],
+ fip['router_id'])
+ LOG.debug(_('chains=%r'), chains)
+
+ for r in chains['in'].get_rules():
+ if OS_FLOATING_IP_RULE_KEY in r.get_properties():
+ if r.get_properties()[OS_FLOATING_IP_RULE_KEY] == id:
+ LOG.debug(_('deleting rule=%r'), r)
+ r.delete()
+ break
+
+ for r in chains['out'].get_rules():
+ if OS_FLOATING_IP_RULE_KEY in r.get_properties():
+ if r.get_properties()[OS_FLOATING_IP_RULE_KEY] == id:
+ LOG.debug(_('deleting rule=%r'), r)
+ r.delete()
+ break
+
+ super(MidonetPluginV2, self).update_floatingip(context, id,
+ floatingip)
+
+ LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip)
+ return fip
+
+ #
+ # Security groups supporting methods
+ #
+
+ def create_security_group(self, context, security_group, default_sg=False):
+ """Create chains for Quantum security group."""
+ LOG.debug(_("MidonetPluginV2.create_security_group called: "
+ "security_group=%(security_group)s "
+ "default_sg=%(default_sg)s "),
+ {'security_group': security_group, 'default_sg': default_sg})
+
+ sg = security_group.get('security_group')
+ tenant_id = self._get_tenant_id_for_create(context, sg)
+
+ with context.session.begin(subtransactions=True):
+ sg_db_entry = super(MidonetPluginV2, self).create_security_group(
+ context, security_group, default_sg)
+
+ # Create MidoNet chains and portgroup for the SG
+ sg_id = sg_db_entry['id']
+ sg_name = sg_db_entry['name']
+ self.chain_manager.create_for_sg(tenant_id, sg_id, sg_name)
+ self.pg_manager.create(tenant_id, sg_id, sg_name)
+
+ LOG.debug(_("MidonetPluginV2.create_security_group exiting: "
+ "sg_db_entry=%r"), sg_db_entry)
+ return sg_db_entry
+
+ def delete_security_group(self, context, id):
+ """Delete chains for Quantum security group."""
+ LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id)
+
+ with context.session.begin(subtransactions=True):
+ sg_db_entry = super(MidonetPluginV2, self).get_security_group(
+ context, id)
+
+ if not sg_db_entry:
+ raise ext_sg.SecurityGroupNotFound(id=id)
+
+ sg_name = sg_db_entry['name']
+ sg_id = sg_db_entry['id']
+ tenant_id = sg_db_entry['tenant_id']
+
+ if sg_name == 'default':
+ raise ext_sg.SecurityGroupCannotRemoveDefault()
+
+ filters = {'security_group_id': [sg_id]}
+ if super(MidonetPluginV2, self)._get_port_security_group_bindings(
+ context, filters):
+ raise ext_sg.SecurityGroupInUse(id=sg_id)
+
+ # Delete MidoNet Chains and portgroup for the SG
+ self.chain_manager.delete_for_sg(tenant_id, sg_id, sg_name)
+ self.pg_manager.delete(tenant_id, sg_id, sg_name)
+
+ return super(MidonetPluginV2, self).delete_security_group(
+ context, id)
+
+ def get_security_groups(self, context, filters=None, fields=None):
+ LOG.debug(_("MidonetPluginV2.get_security_groups called: "
+ "filters=%(filters)r fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+ return super(MidonetPluginV2, self).get_security_groups(
+ context, filters, fields)
+
+ def get_security_group(self, context, id, fields=None, tenant_id=None):
+ LOG.debug(_("MidonetPluginV2.get_security_group called: id=%(id)s "
+ "fields=%(fields)r tenant_id=%(tenant_id)s"),
+ {'id': id, 'fields': fields, 'tenant_id': tenant_id})
+ return super(MidonetPluginV2, self).get_security_group(context, id,
+ fields)
+
+ def create_security_group_rule(self, context, security_group_rule):
+ LOG.debug(_("MidonetPluginV2.create_security_group_rule called: "
+ "security_group_rule=%(security_group_rule)r"),
+ {'security_group_rule': security_group_rule})
+
+ with context.session.begin(subtransactions=True):
+ rule_db_entry = super(
+ MidonetPluginV2, self).create_security_group_rule(
+ context, security_group_rule)
+
+ self.rule_manager.create_for_sg_rule(rule_db_entry)
+ LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: "
+ "rule_db_entry=%r"), rule_db_entry)
+ return rule_db_entry
+
+ def delete_security_group_rule(self, context, sgrid):
+ LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: "
+ "sgrid=%s"), sgrid)
+
+ with context.session.begin(subtransactions=True):
+ rule_db_entry = super(MidonetPluginV2,
+ self).get_security_group_rule(context, sgrid)
+
+ if not rule_db_entry:
+ raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
+
+ self.rule_manager.delete_for_sg_rule(rule_db_entry)
+ return super(MidonetPluginV2,
+ self).delete_security_group_rule(context, sgrid)
+
+ def get_security_group_rules(self, context, filters=None, fields=None):
+ LOG.debug(_("MidonetPluginV2.get_security_group_rules called: "
+ "filters=%(filters)r fields=%(fields)r"),
+ {'filters': filters, 'fields': fields})
+ return super(MidonetPluginV2, self).get_security_group_rules(
+ context, filters, fields)
+
+ def get_security_group_rule(self, context, id, fields=None):
+ LOG.debug(_("MidonetPluginV2.get_security_group_rule called: "
+ "id=%(id)s fields=%(fields)r"),
+ {'id': id, 'fields': fields})
+ return super(MidonetPluginV2, self).get_security_group_rule(
+ context, id, fields)
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Ryu Ishimoto, Midokura Japan KK
+# @author: Tomoe Sugihara, Midokura Japan KK
+
+import unittest2 as unittest
+import uuid
+
+import mock
+
+from quantum.plugins.midonet import midonet_lib
+
+
+class MidonetLibTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_api = mock.Mock()
+
+ def tearDown(self):
+ self.mock_api = None
+
+ def _create_mock_chains(self, sg_id, sg_name):
+ mock_in_chain = mock.Mock()
+ mock_in_chain.get_name.return_value = "OS_SG_%s_%s_IN" % (sg_id,
+ sg_name)
+ mock_out_chain = mock.Mock()
+ mock_out_chain.get_name.return_value = "OS_SG_%s_%s_OUT" % (sg_id,
+ sg_name)
+ return (mock_in_chain, mock_out_chain)
+
+ def _create_mock_router_chains(self, router_id):
+ mock_in_chain = mock.Mock()
+ mock_in_chain.get_name.return_value = "OS_ROUTER_IN_%s" % (router_id)
+
+ mock_out_chain = mock.Mock()
+ mock_out_chain.get_name.return_value = "OS_ROUTER_OUT_%s" % (router_id)
+ return (mock_in_chain, mock_out_chain)
+
+ def _create_mock_port_group(self, sg_id, sg_name):
+ mock_pg = mock.Mock()
+ mock_pg.get_name.return_value = "OS_SG_%s_%s" % (sg_id, sg_name)
+ return mock_pg
+
+ def _create_mock_rule(self, rule_id):
+ mock_rule = mock.Mock()
+ mock_rule.get_properties.return_value = {"os_sg_rule_id": rule_id}
+ return mock_rule
+
+
+class MidonetChainManagerTestCase(MidonetLibTestCase):
+
+ def setUp(self):
+ super(MidonetChainManagerTestCase, self).setUp()
+ self.mgr = midonet_lib.ChainManager(self.mock_api)
+
+ def tearDown(self):
+ self.mgr = None
+ super(MidonetChainManagerTestCase, self).tearDown()
+
+ def test_create_for_sg(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ sg_name = 'test_sg_name'
+ calls = [mock.call.add_chain().tenant_id(tenant_id)]
+
+ self.mgr.create_for_sg(tenant_id, sg_id, sg_name)
+
+ self.mock_api.assert_has_calls(calls)
+
+ def test_delete_for_sg(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ sg_name = 'test_sg_name'
+ in_chain, out_chain = self._create_mock_chains(sg_id, sg_name)
+
+ # Mock get_chains returned values
+ self.mock_api.get_chains.return_value = [in_chain, out_chain]
+
+ self.mgr.delete_for_sg(tenant_id, sg_id, sg_name)
+
+ self.mock_api.assert_has_calls(mock.call.get_chains(
+ {"tenant_id": tenant_id}))
+ in_chain.delete.assert_called_once_with()
+ out_chain.delete.assert_called_once_with()
+
+ def test_get_router_chains(self):
+ tenant_id = 'test_tenant'
+ router_id = str(uuid.uuid4())
+ in_chain, out_chain = self._create_mock_router_chains(router_id)
+
+ # Mock get_chains returned values
+ self.mock_api.get_chains.return_value = [in_chain, out_chain]
+
+ chains = self.mgr.get_router_chains(tenant_id, router_id)
+
+ self.mock_api.assert_has_calls(mock.call.get_chains(
+ {"tenant_id": tenant_id}))
+ self.assertEquals(len(chains), 2)
+ self.assertEquals(chains['in'], in_chain)
+ self.assertEquals(chains['out'], out_chain)
+
+ def test_create_router_chains(self):
+ tenant_id = 'test_tenant'
+ router_id = str(uuid.uuid4())
+ calls = [mock.call.add_chain().tenant_id(tenant_id)]
+
+ self.mgr.create_router_chains(tenant_id, router_id)
+
+ self.mock_api.assert_has_calls(calls)
+
+ def test_get_sg_chains(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
+
+ # Mock get_chains returned values
+ self.mock_api.get_chains.return_value = [in_chain, out_chain]
+
+ chains = self.mgr.get_sg_chains(tenant_id, sg_id)
+
+ self.mock_api.assert_has_calls(mock.call.get_chains(
+ {"tenant_id": tenant_id}))
+ self.assertEquals(len(chains), 2)
+ self.assertEquals(chains['in'], in_chain)
+ self.assertEquals(chains['out'], out_chain)
+
+
+class MidonetPortGroupManagerTestCase(MidonetLibTestCase):
+
+ def setUp(self):
+ super(MidonetPortGroupManagerTestCase, self).setUp()
+ self.mgr = midonet_lib.PortGroupManager(self.mock_api)
+
+ def tearDown(self):
+ self.mgr = None
+ super(MidonetPortGroupManagerTestCase, self).tearDown()
+
+ def test_create(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ sg_name = 'test_sg'
+ pg_mock = self._create_mock_port_group(sg_id, sg_name)
+ rv = self.mock_api.add_port_group.return_value.tenant_id.return_value
+ rv.name.return_value = pg_mock
+
+ self.mgr.create(tenant_id, sg_id, sg_name)
+
+ pg_mock.create.assert_called_once_with()
+
+ def test_delete(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ sg_name = 'test_sg'
+ pg_mock1 = self._create_mock_port_group(sg_id, sg_name)
+ pg_mock2 = self._create_mock_port_group(sg_id, sg_name)
+ self.mock_api.get_port_groups.return_value = [pg_mock1, pg_mock2]
+
+ self.mgr.delete(tenant_id, sg_id, sg_name)
+
+ self.mock_api.assert_has_calls(mock.call.get_port_groups(
+ {"tenant_id": tenant_id}))
+ pg_mock1.delete.assert_called_once_with()
+ pg_mock2.delete.assert_called_once_with()
+
+ def test_get_for_sg(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ pg_mock = self._create_mock_port_group(sg_id, 'foo')
+ self.mock_api.get_port_groups.return_value = [pg_mock]
+
+ pg = self.mgr.get_for_sg(tenant_id, sg_id)
+
+ self.assertEquals(pg, pg_mock)
+
+
+class MidonetRuleManagerTestCase(MidonetLibTestCase):
+
+ def setUp(self):
+ super(MidonetRuleManagerTestCase, self).setUp()
+ self.mgr = midonet_lib.RuleManager(self.mock_api)
+ self.mgr.chain_manager = mock.Mock()
+ self.mgr.pg_manager = mock.Mock()
+
+ def tearDown(self):
+ self.mgr = None
+ super(MidonetRuleManagerTestCase, self).tearDown()
+
+ def _create_test_rule(self, tenant_id, sg_id, rule_id, direction="egress",
+ protocol="tcp", port_min=1, port_max=65535,
+ src_ip='192.168.1.0/24', src_group_id=None,
+ ethertype=0x0800):
+ return {"tenant_id": tenant_id, "security_group_id": sg_id,
+ "rule_id": rule_id, "direction": direction,
+ "protocol": protocol,
+ "source_ip_prefix": src_ip, "source_group_id": src_group_id,
+ "port_range_min": port_min, "port_range_max": port_max,
+ "ethertype": ethertype, "id": rule_id, "external_id": None}
+
+ def test_create_for_sg_rule(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ rule_id = str(uuid.uuid4())
+ in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
+ self.mgr.chain_manager.get_sg_chains.return_value = {"in": in_chain,
+ "out": out_chain}
+ props = {"os_sg_rule_id": rule_id}
+ rule = self._create_test_rule(tenant_id, sg_id, rule_id)
+ calls = [mock.call.add_rule().port_group(None).type(
+ 'accept').nw_proto(6).nw_src_address(
+ '192.168.1.0').nw_src_length(24).tp_src_start(
+ None).tp_src_end(None).tp_dst_start(1).tp_dst_end(
+ 65535).properties(props).create()]
+
+ self.mgr.create_for_sg_rule(rule)
+
+ in_chain.assert_has_calls(calls)
+
+ def test_delete_for_sg_rule(self):
+ tenant_id = 'test_tenant'
+ sg_id = str(uuid.uuid4())
+ rule_id = str(uuid.uuid4())
+ in_chain, out_chain = self._create_mock_chains(sg_id, 'foo')
+ self.mgr.chain_manager.get_sg_chains.return_value = {"in": in_chain,
+ "out": out_chain}
+
+ # Mock the rules returned for each chain
+ mock_rule_in = self._create_mock_rule(rule_id)
+ mock_rule_out = self._create_mock_rule(rule_id)
+ in_chain.get_rules.return_value = [mock_rule_in]
+ out_chain.get_rules.return_value = [mock_rule_out]
+
+ rule = self._create_test_rule(tenant_id, sg_id, rule_id)
+ self.mgr.delete_for_sg_rule(rule)
+
+ mock_rule_in.delete.assert_called_once_with()
+ mock_rule_out.delete.assert_called_once_with()
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012 Midokura Japan K.K.
+# Copyright (C) 2013 Midokura PTE LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Rossella Sblendido, Midokura Europe SARL
+# @author: Ryu Ishimoto, Midokura Japan KK
+# @author: Tomoe Sugihara, Midokura Japan KK
+
+import sys
+import uuid
+
+import mock
+from webob import exc as w_exc
+
+import quantum.common.test_lib as test_lib
+import quantum.tests.unit.midonet as midonet
+import quantum.tests.unit.test_db_plugin as test_plugin
+
+
+MIDOKURA_PKG_PATH = "quantum.plugins.midonet.plugin"
+
+# Need to mock the midonetclient module since the plugin will try to load it.
+sys.modules["midonetclient"] = mock.Mock()
+
+
+class MidonetPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase):
+
+ _plugin_name = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
+
+ def setUp(self):
+ self.mock_api = mock.patch('midonetclient.api.MidonetApi')
+ self.instance = self.mock_api.start()
+ super(MidonetPluginV2TestCase, self).setUp(self._plugin_name)
+
+ def tearDown(self):
+ super(MidonetPluginV2TestCase, self).tearDown()
+ self.mock_api.stop()
+
+ def _setup_bridge_mock(self, bridge_id=str(uuid.uuid4()), name='net'):
+ # Set up mocks needed for the parent network() method
+ bridge = mock.Mock()
+ bridge.get_id.return_value = bridge_id
+ bridge.get_name.return_value = name
+
+ self.instance.return_value.add_bridge.return_value.name.return_value\
+ .tenant_id.return_value.create.return_value = bridge
+ self.instance.return_value.get_bridges.return_value = [bridge]
+ self.instance.return_value.get_bridge.return_value = bridge
+ return bridge
+
+ def _setup_subnet_mocks(self, subnet_id=str(uuid.uuid4()),
+ subnet_prefix='10.0.0.0', subnet_len=int(24)):
+ # Set up mocks needed for the parent subnet() method
+ bridge = self._setup_bridge_mock()
+ subnet = mock.Mock()
+ subnet.get_subnet_prefix.return_value = subnet_prefix
+ subnet.get_subnet_length.return_value = subnet_len
+ subnet.get_id.return_value = subnet_prefix + '/' + str(subnet_len)
+ bridge.add_dhcp_subnet.return_value.default_gateway\
+ .return_value.subnet_prefix.return_value.subnet_length\
+ .return_value.create.return_value = subnet
+ bridge.get_dhcp_subnets.return_value = [subnet]
+ return (bridge, subnet)
+
+ def _setup_port_mocks(self, port_id=str(uuid.uuid4())):
+ # Set up mocks needed for the parent port() method
+ bridge, subnet = self._setup_subnet_mocks()
+ port = mock.Mock()
+ port.get_id.return_value = port_id
+ self.instance.return_value.create_port.return_value = port
+ self.instance.return_value.get_port.return_value = port
+ bridge.add_exterior_port.return_value.create.return_value = (
+ port
+ )
+
+ dhcp_host = mock.Mock()
+ rv1 = subnet.add_dhcp_host.return_value.ip_addr.return_value
+ rv1.mac_addr.return_value.create.return_value = dhcp_host
+
+ subnet.get_dhcp_hosts.return_value = [dhcp_host]
+ return (bridge, subnet, port, dhcp_host)
+
+
+class TestMidonetNetworksV2(test_plugin.TestNetworksV2,
+ MidonetPluginV2TestCase):
+
+ def test_create_network(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2, self).test_create_network()
+
+ def test_create_public_network(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2, self).test_create_public_network()
+
+ def test_create_public_network_no_admin_tenant(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2,
+ self).test_create_public_network_no_admin_tenant()
+
+ def test_update_network(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2, self).test_update_network()
+
+ def test_list_networks(self):
+ bridge = self._setup_bridge_mock()
+ with self.network(name='net1') as net1:
+ req = self.new_list_request('networks')
+ res = self.deserialize('json', req.get_response(self.api))
+ self.assertEquals(res['networks'][0]['name'],
+ net1['network']['name'])
+
+ def test_show_network(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2, self).test_show_network()
+
+ def test_update_shared_network_noadmin_returns_403(self):
+ self._setup_bridge_mock()
+ super(TestMidonetNetworksV2,
+ self).test_update_shared_network_noadmin_returns_403()
+
+ def test_update_network_set_shared(self):
+ pass
+
+ def test_update_network_with_subnet_set_shared(self):
+ pass
+
+ def test_update_network_set_not_shared_single_tenant(self):
+ pass
+
+ def test_update_network_set_not_shared_other_tenant_returns_409(self):
+ pass
+
+ def test_update_network_set_not_shared_multi_tenants_returns_409(self):
+ pass
+
+ def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
+ pass
+
+ def test_create_networks_bulk_native(self):
+ pass
+
+ def test_create_networks_bulk_native_quotas(self):
+ pass
+
+ def test_create_networks_bulk_tenants_and_quotas(self):
+ pass
+
+ def test_create_networks_bulk_tenants_and_quotas_fail(self):
+ pass
+
+ def test_create_networks_bulk_emulated(self):
+ pass
+
+ def test_create_networks_bulk_wrong_input(self):
+ pass
+
+ def test_create_networks_bulk_emulated_plugin_failure(self):
+ pass
+
+ def test_create_networks_bulk_native_plugin_failure(self):
+ pass
+
+ def test_list_networks_with_parameters(self):
+ pass
+
+ def test_list_networks_with_fields(self):
+ pass
+
+ def test_list_networks_with_parameters_invalid_values(self):
+ pass
+
+ def test_show_network_with_subnet(self):
+ pass
+
+ def test_invalid_admin_status(self):
+ pass
+
+ def test_list_networks_with_pagination_emulated(self):
+ pass
+
+ def test_list_networks_with_pagination_reverse_emulated(self):
+ pass
+
+ def test_list_networks_with_parameters(self):
+ pass
+
+ def test_list_networks_with_parameters_invalid_values(self):
+ pass
+
+ def test_list_networks_with_sort_emulated(self):
+ pass
+
+ def test_list_networks_without_pk_in_fields_pagination_emulated(self):
+ pass
+
+
+class TestMidonetSubnetsV2(test_plugin.TestSubnetsV2,
+ MidonetPluginV2TestCase):
+
+ def test_create_subnet(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet()
+
+ def test_create_two_subnets(self):
+ pass
+
+ def test_create_two_subnets_same_cidr_returns_400(self):
+ pass
+
+ def test_create_two_subnets_same_cidr_returns_400(self):
+ pass
+
+ def test_create_subnet_bad_V4_cidr(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_V4_cidr()
+
+ def test_create_subnet_bad_V6_cidr(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_V4_cidr()
+
+ def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self):
+ pass
+
+ def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self):
+ pass
+
+ def test_create_subnets_bulk_native(self):
+ pass
+
+ def test_create_subnets_bulk_emulated(self):
+ pass
+
+ def test_create_subnets_bulk_emulated_plugin_failure(self):
+ pass
+
+ def test_create_subnets_bulk_native_plugin_failure(self):
+ pass
+
+ def test_delete_subnet(self):
+ _bridge, subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_delete_subnet()
+ subnet.delete.assert_called_once_with()
+
+ def test_delete_subnet_port_exists_owned_by_network(self):
+ _bridge, subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_delete_subnet_port_exists_owned_by_network()
+
+ def test_delete_subnet_port_exists_owned_by_other(self):
+ pass
+
+ def test_delete_network(self):
+ bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_delete_network()
+ bridge.delete.assert_called_once_with()
+
+ def test_create_subnet_bad_tenant(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_tenant()
+
+ def test_create_subnet_bad_ip_version(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_ip_version()
+
+ def test_create_subnet_bad_ip_version_null(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_bad_ip_version_null()
+
+ def test_create_subnet_bad_uuid(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_uuid()
+
+ def test_create_subnet_bad_boolean(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_boolean()
+
+ def test_create_subnet_bad_pools(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_pools()
+
+ def test_create_subnet_bad_nameserver(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_nameserver()
+
+ def test_create_subnet_bad_hostroutes(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_bad_hostroutes()
+
+ def test_create_subnet_defaults(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_defaults()
+
+ def test_create_subnet_gw_values(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_gw_values()
+
+ def test_create_force_subnet_gw_values(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_force_subnet_gw_values()
+
+ def test_create_subnet_with_allocation_pool(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_allocation_pool()
+
+ def test_create_subnet_with_none_gateway(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_none_gateway()
+
+ def test_create_subnet_with_none_gateway_fully_allocated(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_none_gateway_fully_allocated()
+
+ def test_subnet_with_allocation_range(self):
+ pass
+
+ def test_create_subnet_with_none_gateway_allocation_pool(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_none_gateway_allocation_pool()
+
+ def test_create_subnet_with_v6_allocation_pool(self):
+ pass
+
+ def test_create_subnet_with_large_allocation_pool(self):
+ pass
+
+ def test_create_subnet_multiple_allocation_pools(self):
+ pass
+
+ def test_create_subnet_with_dhcp_disabled(self):
+ pass
+
+ def test_create_subnet_default_gw_conflict_allocation_pool_returns_409(
+ self):
+ pass
+
+ def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self)\
+ .test_create_subnet_gateway_in_allocation_pool_returns_409()
+
+ def test_create_subnet_overlapping_allocation_pools_returns_409(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self)\
+ .test_create_subnet_overlapping_allocation_pools_returns_409()
+
+ def test_create_subnet_invalid_allocation_pool_returns_400(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_invalid_allocation_pool_returns_400()
+
+ def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self)\
+ .test_create_subnet_out_of_range_allocation_pool_returns_400()
+
+ def test_create_subnet_shared_returns_400(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_shared_returns_400()
+
+ def test_create_subnet_inconsistent_ipv6_cidrv4(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv4_cidrv6(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv4_gatewayv6(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv6_dns_v4(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self):
+ pass
+
+ def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self):
+ pass
+
+ def test_update_subnet(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_update_subnet()
+
+ def test_update_subnet_shared_returns_400(self):
+ self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_update_subnet_shared_returns_400()
+
+ def test_update_subnet_inconsistent_ipv4_gatewayv6(self):
+ pass
+
+ def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
+ pass
+
+ def test_update_subnet_inconsistent_ipv4_dns_v6(self):
+ pass
+
+ def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
+ pass
+
+ def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
+ pass
+
+ def test_show_subnet(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_show_subnet()
+
+ def test_list_subnets(self):
+ pass
+
+ def test_list_subnets_shared(self):
+ pass
+
+ def test_list_subnets_with_parameter(self):
+ pass
+
+ def test_invalid_ip_version(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_invalid_ip_version()
+
+ def test_invalid_subnet(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_invalid_subnet()
+
+ def test_invalid_ip_address(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_invalid_ip_address()
+
+ def test_invalid_uuid(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_invalid_uuid()
+
+ def test_create_subnet_with_one_dns(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_with_one_dns()
+
+ def test_create_subnet_with_two_dns(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_create_subnet_with_two_dns()
+
+ def test_create_subnet_with_too_many_dns(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_too_many_dns()
+
+ def test_create_subnet_with_one_host_route(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_one_host_route()
+
+ def test_create_subnet_with_two_host_routes(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_two_host_routes()
+
+ def test_create_subnet_with_too_many_routes(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_create_subnet_with_too_many_routes()
+
+ def test_update_subnet_dns(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_update_subnet_dns()
+
+ def test_update_subnet_dns_to_None(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_update_subnet_dns_to_None()
+
+ def test_update_subnet_dns_with_too_many_entries(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_update_subnet_dns_with_too_many_entries()
+
+ def test_update_subnet_route(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_update_subnet_route()
+
+ def test_update_subnet_route_to_None(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_update_subnet_route_to_None()
+
+ def test_update_subnet_route_with_too_many_entries(self):
+ _bridge, _subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_update_subnet_route_with_too_many_entries()
+
+ def test_delete_subnet_with_dns(self):
+ _bridge, subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_delete_subnet_with_dns()
+ subnet.delete.assert_called_once_with()
+
+ def test_delete_subnet_with_route(self):
+ _bridge, subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2, self).test_delete_subnet_with_route()
+ subnet.delete.assert_called_once_with()
+
+ def test_delete_subnet_with_dns_and_route(self):
+ _bridge, subnet = self._setup_subnet_mocks()
+ super(TestMidonetSubnetsV2,
+ self).test_delete_subnet_with_dns_and_route()
+ subnet.delete.assert_called_once_with()
+
+ def test_update_subnet_gateway_in_allocation_pool_returns_409(self):
+ self._setup_port_mocks()
+ super(TestMidonetSubnetsV2, self)\
+ .test_update_subnet_gateway_in_allocation_pool_returns_409()
+
+ def test_list_subnets_with_pagination_emulated(self):
+ pass
+
+ def test_list_subnets_with_pagination_reverse_emulated(self):
+ pass
+
+ def test_list_subnets_with_sort_emulated(self):
+ pass
+
+
+class TestMidonetPortsV2(test_plugin.TestPortsV2,
+ MidonetPluginV2TestCase):
+
+ def test_create_port_json(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_create_port_json()
+
+ def test_create_port_bad_tenant(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_create_port_bad_tenant()
+
+ def test_create_port_public_network(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_create_port_public_network()
+
+ def test_create_port_public_network_with_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2,
+ self).test_create_port_public_network_with_ip()
+
+ def test_create_ports_bulk_native(self):
+ pass
+
+ def test_create_ports_bulk_emulated(self):
+ pass
+
+ def test_create_ports_bulk_wrong_input(self):
+ pass
+
+ def test_create_ports_bulk_emulated_plugin_failure(self):
+ pass
+
+ def test_create_ports_bulk_native_plugin_failure(self):
+ pass
+
+ def test_list_ports(self):
+ pass
+
+ def test_list_ports_filtered_by_fixed_ip(self):
+ pass
+
+ def test_list_ports_public_network(self):
+ pass
+
+ def test_show_port(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_show_port()
+
+ def test_delete_port(self):
+ _bridge, _subnet, port, _dhcp = self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_delete_port()
+ port.delete.assert_called_once_with()
+
+ def test_delete_port_public_network(self):
+ _bridge, _subnet, port, _dhcp = self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_delete_port_public_network()
+ port.delete.assert_called_once_with()
+
+ def test_update_port(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_port()
+
+ def test_update_device_id_null(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_device_id_null()
+
+ def test_delete_network_if_port_exists(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_delete_network_if_port_exists()
+
+ def test_delete_network_port_exists_owned_by_network(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2,
+ self).test_delete_network_port_exists_owned_by_network()
+
+ def test_update_port_delete_ip(self):
+ pass
+
+ def test_no_more_port_exception(self):
+ pass
+
+ def test_update_port_update_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_port_update_ip()
+
+ def test_update_port_update_ips(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_port_update_ips()
+
+ def test_update_port_add_additional_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_port_add_additional_ip()
+
+ def test_requested_duplicate_mac(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_requested_duplicate_mac()
+
+ def test_mac_generation(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_mac_generation()
+
+ def test_mac_generation_4octet(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_mac_generation_4octet()
+
+ def test_bad_mac_format(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_bad_mac_format()
+
+ def test_mac_exhaustion(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_mac_exhaustion()
+
+ def test_requested_duplicate_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_requested_duplicate_ip()
+
+ def test_requested_subnet_delete(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_requested_subnet_delete()
+
+ def test_requested_subnet_id(self):
+ pass
+
+ def test_requested_subnet_id_not_on_network(self):
+ pass
+
+ def test_overlapping_subnets(self):
+ pass
+
+ def test_requested_subnet_id_v4_and_v6(self):
+ pass
+
+ def test_range_allocation(self):
+ pass
+
+ def test_requested_invalid_fixed_ips(self):
+ pass
+
+ def test_invalid_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_invalid_ip()
+
+ def test_requested_split(self):
+ pass
+
+ def test_duplicate_ips(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_duplicate_ips()
+
+ def test_fixed_ip_invalid_subnet_id(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_fixed_ip_invalid_subnet_id()
+
+ def test_fixed_ip_invalid_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_fixed_ip_invalid_ip()
+
+ def test_requested_ips_only(self):
+ pass
+
+ def test_recycling(self):
+ pass
+
+ def test_invalid_admin_state(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_invalid_admin_state()
+
+ def test_invalid_mac_address(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_invalid_mac_address()
+
+ def test_default_allocation_expiration(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_default_allocation_expiration()
+
+ def test_update_fixed_ip_lease_expiration(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2,
+ self).test_update_fixed_ip_lease_expiration()
+
+ def test_port_delete_holds_ip(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_port_delete_holds_ip()
+
+ def test_update_fixed_ip_lease_expiration_invalid_address(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2,
+ self).test_update_fixed_ip_lease_expiration_invalid_address()
+
+ def test_hold_ip_address(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_hold_ip_address()
+
+ def test_recycle_held_ip_address(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_recycle_held_ip_address()
+
+ def test_recycle_expired_previously_run_within_context(self):
+ pass
+
+ def test_update_port_not_admin(self):
+ self._setup_port_mocks()
+ super(TestMidonetPortsV2, self).test_update_port_not_admin()
+
+ def test_list_ports_with_pagination_emulated(self):
+ pass
+
+ def test_list_ports_with_pagination_reverse_emulated(self):
+ pass
+
+ def test_list_ports_with_sort_emulated(self):
+ pass