+++ /dev/null
-# Neuron REST Proxy Plug-in for Big Switch and FloodLight Controllers
-
-This module provides a generic neutron plugin 'NeutronRestProxy' that
-translates neutron function calls to authenticated REST requests (JSON supported)
-to a set of redundant external network controllers.
-
-It also keeps a local persistent store of neutron state that has been
-setup using that API.
-
-Currently the FloodLight Openflow Controller or the Big Switch Networks Controller
-can be configured as external network controllers for this plugin.
-
-For more details on this plugin, please refer to the following link:
-http://www.openflowhub.org/display/floodlightcontroller/Neutron+REST+Proxy+Plugin
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-import sys
-import time
-
import eventlet
eventlet.monkey_patch()
-from oslo_config import cfg
-import oslo_messaging
-from oslo_utils import excutils
-
-from neutron.agent.linux import ovs_lib
-from neutron.agent.linux import utils
-from neutron.agent import rpc as agent_rpc
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.common import config
-from neutron.common import topics
-from neutron import context as q_context
-from neutron.extensions import securitygroup as ext_sg
-from neutron.i18n import _LE
-from neutron.openstack.common import log
-from neutron.plugins.bigswitch import config as pl_config
-
-LOG = log.getLogger(__name__)
-
-
-class IVSBridge(ovs_lib.OVSBridge):
- '''
- This class does not provide parity with OVS using IVS.
- It's only the bare minimum necessary to use IVS with this agent.
- '''
- def run_vsctl(self, args, check_error=False):
- full_args = ["ivs-ctl"] + args
- try:
- return utils.execute(full_args, run_as_root=True)
- except Exception as e:
- with excutils.save_and_reraise_exception() as ctxt:
- LOG.error(_LE("Unable to execute %(cmd)s. "
- "Exception: %(exception)s"),
- {'cmd': full_args, 'exception': e})
- if not check_error:
- ctxt.reraise = False
-
- def get_vif_port_set(self):
- port_names = self.get_port_name_list()
- edge_ports = set(port_names)
- return edge_ports
-
- def get_vif_port_by_id(self, port_id):
- # IVS in nova uses hybrid method with last 14 chars of UUID
- name = 'qvo%s' % port_id[:14]
- if name in self.get_vif_port_set():
- return name
- return False
-
-
-class RestProxyAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
-
- target = oslo_messaging.Target(version='1.1')
-
- def __init__(self, integ_br, polling_interval, vs='ovs'):
- super(RestProxyAgent, self).__init__()
- self.polling_interval = polling_interval
- self._setup_rpc()
- self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
- self.sg_plugin_rpc)
- if vs == 'ivs':
- self.int_br = IVSBridge(integ_br)
- else:
- self.int_br = ovs_lib.OVSBridge(integ_br)
-
- def _setup_rpc(self):
- self.topic = topics.AGENT
- self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
- self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
- self.context = q_context.get_admin_context_without_session()
- self.endpoints = [self]
- consumers = [[topics.PORT, topics.UPDATE],
- [topics.SECURITY_GROUP, topics.UPDATE]]
- self.connection = agent_rpc.create_consumers(self.endpoints,
- self.topic,
- consumers)
-
- def port_update(self, context, **kwargs):
- LOG.debug("Port update received")
- port = kwargs.get('port')
- vif_port = self.int_br.get_vif_port_by_id(port['id'])
- if not vif_port:
- LOG.debug("Port %s is not present on this host.", port['id'])
- return
-
- LOG.debug("Port %s found. Refreshing firewall.", port['id'])
- if ext_sg.SECURITYGROUPS in port:
- self.sg_agent.refresh_firewall()
-
- def _update_ports(self, registered_ports):
- ports = self.int_br.get_vif_port_set()
- if ports == registered_ports:
- return
- added = ports - registered_ports
- removed = registered_ports - ports
- return {'current': ports,
- 'added': added,
- 'removed': removed}
-
- def _process_devices_filter(self, port_info):
- if 'added' in port_info:
- self.sg_agent.prepare_devices_filter(port_info['added'])
- if 'removed' in port_info:
- self.sg_agent.remove_devices_filter(port_info['removed'])
-
- def daemon_loop(self):
- ports = set()
-
- while True:
- start = time.time()
- try:
- port_info = self._update_ports(ports)
- if port_info:
- LOG.debug("Agent loop has new device")
- self._process_devices_filter(port_info)
- ports = port_info['current']
- except Exception:
- LOG.exception(_LE("Error in agent event loop"))
-
- elapsed = max(time.time() - start, 0)
- if (elapsed < self.polling_interval):
- time.sleep(self.polling_interval - elapsed)
- else:
- LOG.debug("Loop iteration exceeded interval "
- "(%(polling_interval)s vs. %(elapsed)s)!",
- {'polling_interval': self.polling_interval,
- 'elapsed': elapsed})
-
-
-def main():
- config.init(sys.argv[1:])
- config.setup_logging()
- pl_config.register_config()
+from bsnstacklib.plugins.bigswitch.agent import restproxy_agent
- integ_br = cfg.CONF.RESTPROXYAGENT.integration_bridge
- polling_interval = cfg.CONF.RESTPROXYAGENT.polling_interval
- bsnagent = RestProxyAgent(integ_br, polling_interval,
- cfg.CONF.RESTPROXYAGENT.virtual_switch_type)
- bsnagent.daemon_loop()
- sys.exit(0)
if __name__ == "__main__":
- main()
+ restproxy_agent.main()
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-This module manages configuration options
-"""
-
-from oslo_config import cfg
-
-from neutron.common import utils
-from neutron.extensions import portbindings
-
-restproxy_opts = [
- cfg.ListOpt('servers', default=['localhost:8800'],
- help=_("A comma separated list of Big Switch or Floodlight "
- "servers and port numbers. The plugin proxies the "
- "requests to the Big Switch/Floodlight server, "
- "which performs the networking configuration. Only one"
- "server is needed per deployment, but you may wish to"
- "deploy multiple servers to support failover.")),
- cfg.StrOpt('server_auth', secret=True,
- help=_("The username and password for authenticating against "
- " the Big Switch or Floodlight controller.")),
- cfg.BoolOpt('server_ssl', default=True,
- help=_("If True, Use SSL when connecting to the Big Switch or "
- "Floodlight controller.")),
- cfg.BoolOpt('ssl_sticky', default=True,
- help=_("Trust and store the first certificate received for "
- "each controller address and use it to validate future "
- "connections to that address.")),
- cfg.BoolOpt('no_ssl_validation', default=False,
- help=_("Disables SSL certificate validation for controllers")),
- cfg.BoolOpt('cache_connections', default=True,
- help=_("Re-use HTTP/HTTPS connections to the controller.")),
- cfg.StrOpt('ssl_cert_directory',
- default='/etc/neutron/plugins/bigswitch/ssl',
- help=_("Directory containing ca_certs and host_certs "
- "certificate directories.")),
- cfg.BoolOpt('sync_data', default=False,
- help=_("Sync data on connect")),
- cfg.BoolOpt('auto_sync_on_failure', default=True,
- help=_("If neutron fails to create a resource because "
- "the backend controller doesn't know of a dependency, "
- "the plugin automatically triggers a full data "
- "synchronization to the controller.")),
- cfg.IntOpt('consistency_interval', default=60,
- help=_("Time between verifications that the backend controller "
- "database is consistent with Neutron. (0 to disable)")),
- cfg.IntOpt('server_timeout', default=10,
- help=_("Maximum number of seconds to wait for proxy request "
- "to connect and complete.")),
- cfg.IntOpt('thread_pool_size', default=4,
- help=_("Maximum number of threads to spawn to handle large "
- "volumes of port creations.")),
- cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(),
- deprecated_name='quantum_id',
- help=_("User defined identifier for this Neutron deployment")),
- cfg.BoolOpt('add_meta_server_route', default=True,
- help=_("Flag to decide if a route to the metadata server "
- "should be injected into the VM")),
-]
-router_opts = [
- cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'],
- help=_("The default router rules installed in new tenant "
- "routers. Repeat the config option for each rule. "
- "Format is <tenant>:<source>:<destination>:<action>"
- " Use an * to specify default for all tenants.")),
- cfg.IntOpt('max_router_rules', default=200,
- help=_("Maximum number of router rules")),
-]
-nova_opts = [
- cfg.StrOpt('vif_type', default='ovs',
- help=_("Virtual interface type to configure on "
- "Nova compute nodes")),
-]
-
-# Each VIF Type can have a list of nova host IDs that are fixed to that type
-for i in portbindings.VIF_TYPES:
- opt = cfg.ListOpt('node_override_vif_' + i, default=[],
- help=_("Nova compute nodes to manually set VIF "
- "type to %s") % i)
- nova_opts.append(opt)
-
-# Add the vif types for reference later
-nova_opts.append(cfg.ListOpt('vif_types',
- default=portbindings.VIF_TYPES,
- help=_('List of allowed vif_type values.')))
-
-agent_opts = [
- cfg.StrOpt('integration_bridge', default='br-int',
- help=_('Name of integration bridge on compute '
- 'nodes used for security group insertion.')),
- cfg.IntOpt('polling_interval', default=5,
- help=_('Seconds between agent checks for port changes')),
- cfg.StrOpt('virtual_switch_type', default='ovs',
- help=_('Virtual switch type.'))
-]
-
-
-def register_config():
- cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
- cfg.CONF.register_opts(router_opts, "ROUTER")
- cfg.CONF.register_opts(nova_opts, "NOVA")
- cfg.CONF.register_opts(agent_opts, "RESTPROXYAGENT")
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import random
-import re
-import string
-import time
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from neutron.db import model_base
-from neutron.i18n import _LI, _LW
-from neutron.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-# Maximum time in seconds to wait for a single record lock to be released
-# NOTE: The total time waiting may exceed this if there are multiple servers
-# waiting for the same lock
-MAX_LOCK_WAIT_TIME = 15
-
-
-def setup_db():
- '''Helper to register models for unit tests'''
- if HashHandler._FACADE is None:
- HashHandler._FACADE = session.EngineFacade.from_config(
- cfg.CONF, sqlite_fk=True)
- ConsistencyHash.metadata.create_all(
- HashHandler._FACADE.get_engine())
-
-
-def clear_db():
- '''Helper to unregister models and clear engine in unit tests'''
- if not HashHandler._FACADE:
- return
- ConsistencyHash.metadata.drop_all(HashHandler._FACADE.get_engine())
- HashHandler._FACADE = None
class ConsistencyHash(model_base.BASEV2):
hash_id = sa.Column(sa.String(255),
primary_key=True)
hash = sa.Column(sa.String(255), nullable=False)
-
-
-class HashHandler(object):
- '''
- A wrapper object to keep track of the session between the read
- and the update operations.
-
- This class needs an SQL engine completely independent of the main
- neutron connection so rollbacks from consistency hash operations don't
- affect the parent sessions.
- '''
- _FACADE = None
-
- def __init__(self, hash_id='1'):
- if HashHandler._FACADE is None:
- HashHandler._FACADE = session.EngineFacade.from_config(
- cfg.CONF, sqlite_fk=True)
- self.hash_id = hash_id
- self.session = HashHandler._FACADE.get_session(autocommit=True,
- expire_on_commit=False)
- self.random_lock_id = ''.join(random.choice(string.ascii_uppercase
- + string.digits)
- for _ in range(10))
- self.lock_marker = 'LOCKED_BY[%s]' % self.random_lock_id
-
- def _get_current_record(self):
- with self.session.begin(subtransactions=True):
- res = (self.session.query(ConsistencyHash).
- filter_by(hash_id=self.hash_id).first())
- if res:
- self.session.refresh(res) # make sure latest is loaded from db
- return res
-
- def _insert_empty_hash_with_lock(self):
- # try to insert a new hash, return False on conflict
- try:
- with self.session.begin(subtransactions=True):
- res = ConsistencyHash(hash_id=self.hash_id,
- hash=self.lock_marker)
- self.session.add(res)
- return True
- except db_exc.DBDuplicateEntry:
- # another server created a new record at the same time
- return False
-
- def _optimistic_update_hash_record(self, old_record, new_hash):
- # Optimistic update strategy. Returns True if successful, else False.
- query = sa.update(ConsistencyHash.__table__).values(hash=new_hash)
- query = query.where(ConsistencyHash.hash_id == old_record.hash_id)
- query = query.where(ConsistencyHash.hash == old_record.hash)
- with self._FACADE.get_engine().begin() as conn:
- result = conn.execute(query)
- # We need to check update row count in case another server is
- # doing this at the same time. Only one will succeed, the other will
- # not update any rows.
- return result.rowcount != 0
-
- def _get_lock_owner(self, record):
- matches = re.findall(r"^LOCKED_BY\[(\w+)\]", record)
- if not matches:
- return None
- return matches[0]
-
- def read_for_update(self):
- # An optimistic locking strategy with a timeout to avoid using a
- # consistency hash while another server is using it. This will
- # not return until a lock is acquired either normally or by stealing
- # it after an individual ID holds it for greater than
- # MAX_LOCK_WAIT_TIME.
- lock_wait_start = None
- last_lock_owner = None
- while True:
- res = self._get_current_record()
- if not res:
- # no current entry. try to insert to grab lock
- if not self._insert_empty_hash_with_lock():
- # A failed insert after missing current record means
- # a concurrent insert occurred. Start process over to
- # find the new record.
- LOG.debug("Concurrent record inserted. Retrying.")
- time.sleep(0.25)
- continue
- # The empty hash was successfully inserted with our lock
- return ''
-
- current_lock_owner = self._get_lock_owner(res.hash)
- if not current_lock_owner:
- # no current lock. attempt to lock
- new = self.lock_marker + res.hash
- if not self._optimistic_update_hash_record(res, new):
- # someone else beat us to it. restart process to wait
- # for new lock ID to be removed
- LOG.debug(
- "Failed to acquire lock. Restarting lock wait. "
- "Previous hash: %(prev)s. Attempted update: %(new)s",
- {'prev': res.hash, 'new': new})
- time.sleep(0.25)
- continue
- # successfully got the lock
- return res.hash
-
- LOG.debug("This request's lock ID is %(this)s. "
- "DB lock held by %(that)s",
- {'this': self.random_lock_id,
- 'that': current_lock_owner})
-
- if current_lock_owner == self.random_lock_id:
- # no change needed, we already have the table lock due to
- # previous read_for_update call.
- # return hash with lock tag stripped off for use in a header
- return res.hash.replace(self.lock_marker, '')
-
- if current_lock_owner != last_lock_owner:
- # The owner changed since the last iteration, but it
- # wasn't to us. Reset the counter. Log if not
- # first iteration.
- if lock_wait_start:
- LOG.debug("Lock owner changed from %(old)s to %(new)s "
- "while waiting to acquire it.",
- {'old': last_lock_owner,
- 'new': current_lock_owner})
- lock_wait_start = time.time()
- last_lock_owner = current_lock_owner
- if time.time() - lock_wait_start > MAX_LOCK_WAIT_TIME:
- # the lock has been held too long, steal it
- LOG.warning(_LW("Gave up waiting for consistency DB "
- "lock, trying to take it. "
- "Current hash is: %s"), res.hash)
- new_db_value = res.hash.replace(current_lock_owner,
- self.random_lock_id)
- if self._optimistic_update_hash_record(res, new_db_value):
- return res.hash.replace(new_db_value, '')
- LOG.info(_LI("Failed to take lock. Another process updated "
- "the DB first."))
-
- def clear_lock(self):
- LOG.debug("Clearing hash record lock of id %s", self.random_lock_id)
- with self.session.begin(subtransactions=True):
- res = (self.session.query(ConsistencyHash).
- filter_by(hash_id=self.hash_id).first())
- if not res:
- LOG.warning(_LW("Hash record already gone, no lock to clear."))
- return
- if not res.hash.startswith(self.lock_marker):
- # if these are frequent the server is too slow
- LOG.warning(_LW("Another server already removed the lock. %s"),
- res.hash)
- return
- res.hash = res.hash.replace(self.lock_marker, '')
-
- def put_hash(self, hash):
- hash = hash or ''
- with self.session.begin(subtransactions=True):
- res = (self.session.query(ConsistencyHash).
- filter_by(hash_id=self.hash_id).first())
- if res:
- res.hash = hash
- else:
- conhash = ConsistencyHash(hash_id=self.hash_id, hash=hash)
- self.session.merge(conhash)
- LOG.debug("Consistency hash for group %(hash_id)s updated "
- "to %(hash)s", {'hash_id': self.hash_id, 'hash': hash})
+++ /dev/null
-# Copyright 2013, Big Switch Networks
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.api.v2 import attributes
-from neutron.i18n import _LW
-from neutron.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def get_port_hostid(context, port_id):
- # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db
- # relational table generation until one of the functions is called.
- from neutron.db import portbindings_db
- with context.session.begin(subtransactions=True):
- query = context.session.query(portbindings_db.PortBindingPort)
- res = query.filter_by(port_id=port_id).first()
- if not res:
- return False
- return res.host
-
-
-def put_port_hostid(context, port_id, host):
- # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db
- # relational table generation until one of the functions is called.
- from neutron.db import portbindings_db
- if not attributes.is_attr_set(host):
- LOG.warning(_LW("No host_id in port request to track port location."))
- return
- if port_id == '':
- LOG.warning(_LW("Received an empty port ID for host_id '%s'"), host)
- return
- if host == '':
- LOG.debug("Received an empty host_id for port '%s'", port_id)
- return
- LOG.debug("Logging port %(port)s on host_id %(host)s",
- {'port': port_id, 'host': host})
- with context.session.begin(subtransactions=True):
- location = portbindings_db.PortBindingPort(port_id=port_id, host=host)
- context.session.merge(location)
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.api.v2 import attributes as attr
-from neutron.common import exceptions as nexception
-from neutron.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-# Router Rules Exceptions
-class InvalidRouterRules(nexception.InvalidInput):
- message = _("Invalid format for router rules: %(rule)s, %(reason)s")
-
-
-class RulesExhausted(nexception.BadRequest):
- message = _("Unable to complete rules update for %(router_id)s. "
- "The number of rules exceeds the maximum %(quota)s.")
-
-
-def convert_to_valid_router_rules(data):
- """
- Validates and converts router rules to the appropriate data structure
- Example argument = [{'source': 'any', 'destination': 'any',
- 'action':'deny'},
- {'source': '1.1.1.1/32', 'destination': 'external',
- 'action':'permit',
- 'nexthops': ['1.1.1.254', '1.1.1.253']}
- ]
- """
- V4ANY = '0.0.0.0/0'
- CIDRALL = ['any', 'external']
- if not isinstance(data, list):
- emsg = _("Invalid data format for router rule: '%s'") % data
- LOG.debug(emsg)
- raise nexception.InvalidInput(error_message=emsg)
- _validate_uniquerules(data)
- rules = []
- expected_keys = ['source', 'destination', 'action']
- for rule in data:
- rule['nexthops'] = rule.get('nexthops', [])
- if not isinstance(rule['nexthops'], list):
- rule['nexthops'] = rule['nexthops'].split('+')
-
- src = V4ANY if rule['source'] in CIDRALL else rule['source']
- dst = V4ANY if rule['destination'] in CIDRALL else rule['destination']
-
- errors = [attr._verify_dict_keys(expected_keys, rule, False),
- attr._validate_subnet(dst),
- attr._validate_subnet(src),
- _validate_nexthops(rule['nexthops']),
- _validate_action(rule['action'])]
- errors = [m for m in errors if m]
- if errors:
- LOG.debug(errors)
- raise nexception.InvalidInput(error_message=errors)
- rules.append(rule)
- return rules
-
-
-def _validate_nexthops(nexthops):
- seen = []
- for ip in nexthops:
- msg = attr._validate_ip_address(ip)
- if ip in seen:
- msg = _("Duplicate nexthop in rule '%s'") % ip
- seen.append(ip)
- if msg:
- return msg
-
-
-def _validate_action(action):
- if action not in ['permit', 'deny']:
- return _("Action must be either permit or deny."
- " '%s' was provided") % action
-
-
-def _validate_uniquerules(rules):
- pairs = []
- for r in rules:
- if 'source' not in r or 'destination' not in r:
- continue
- pairs.append((r['source'], r['destination']))
-
- if len(set(pairs)) != len(pairs):
- error = _("Duplicate router rules (src,dst) found '%s'") % pairs
- LOG.debug(error)
- raise nexception.InvalidInput(error_message=error)
-
-
-class Routerrule(object):
-
- @classmethod
- def get_name(cls):
- return "Neutron Router Rule"
-
- @classmethod
- def get_alias(cls):
- return "router_rules"
-
- @classmethod
- def get_description(cls):
- return "Router rule configuration for L3 router"
-
- @classmethod
- def get_namespace(cls):
- return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0"
-
- @classmethod
- def get_updated(cls):
- return "2013-05-23T10:00:00-00:00"
-
- def get_extended_resources(self, version):
- if version == "2.0":
- return EXTENDED_ATTRIBUTES_2_0
- else:
- return {}
-
-# Attribute Map
-EXTENDED_ATTRIBUTES_2_0 = {
- 'routers': {
- 'router_rules': {'allow_post': False, 'allow_put': True,
- 'convert_to': convert_to_valid_router_rules,
- 'is_visible': True,
- 'default': attr.ATTR_NOT_SPECIFIED},
- }
-}
Big Switch core plugin.
"""
-from oslo_config import cfg
-from oslo_utils import excutils
+from bsnstacklib.plugins.bigswitch import l3_router_plugin
-from neutron.api import extensions as neutron_extensions
-from neutron.common import exceptions
-from neutron.common import log
-from neutron.db import l3_db
-from neutron.extensions import l3
-from neutron.i18n import _LE
-from neutron.openstack.common import log as logging
-from neutron.plugins.bigswitch import extensions
-from neutron.plugins.bigswitch import plugin as cplugin
-from neutron.plugins.bigswitch import routerrule_db
-from neutron.plugins.bigswitch import servermanager
-from neutron.plugins.common import constants
-# number of fields in a router rule string
-ROUTER_RULE_COMPONENT_COUNT = 5
-LOG = logging.getLogger(__name__)
-put_context_in_serverpool = cplugin.put_context_in_serverpool
-
-
-class L3RestProxy(cplugin.NeutronRestProxyV2Base,
- routerrule_db.RouterRule_db_mixin):
-
- supported_extension_aliases = ["router", "router_rules"]
-
- @staticmethod
- def get_plugin_type():
- return constants.L3_ROUTER_NAT
-
- @staticmethod
- def get_plugin_description():
- return _("L3 Router Service Plugin for Big Switch fabric")
-
- def __init__(self):
- # Include the Big Switch Extensions path in the api_extensions
- neutron_extensions.append_api_extensions_path(extensions.__path__)
- super(L3RestProxy, self).__init__()
- self.servers = servermanager.ServerPool.get_instance()
-
- @put_context_in_serverpool
- @log.log
- def create_router(self, context, router):
- self._warn_on_state_status(router['router'])
-
- tenant_id = self._get_tenant_id_for_create(context, router["router"])
-
- # set default router rules
- rules = self._get_tenant_default_router_rules(tenant_id)
- router['router']['router_rules'] = rules
-
- with context.session.begin(subtransactions=True):
- # create router in DB
- new_router = super(L3RestProxy, self).create_router(context,
- router)
- mapped_router = self._map_state_and_status(new_router)
- self.servers.rest_create_router(tenant_id, mapped_router)
-
- # return created router
- return new_router
-
- @put_context_in_serverpool
- @log.log
- def update_router(self, context, router_id, router):
- self._warn_on_state_status(router['router'])
-
- orig_router = super(L3RestProxy, self).get_router(context, router_id)
- tenant_id = orig_router["tenant_id"]
- with context.session.begin(subtransactions=True):
- new_router = super(L3RestProxy,
- self).update_router(context, router_id, router)
- router = self._map_state_and_status(new_router)
- # look up the network on this side to save an expensive query on
- # the backend controller.
- if router and router.get('external_gateway_info'):
- router['external_gateway_info']['network'] = self.get_network(
- context.elevated(),
- router['external_gateway_info']['network_id'])
- # update router on network controller
- self.servers.rest_update_router(tenant_id, router, router_id)
-
- # return updated router
- return new_router
-
- @put_context_in_serverpool
- @log.log
- def delete_router(self, context, router_id):
- with context.session.begin(subtransactions=True):
- orig_router = self._get_router(context, router_id)
- tenant_id = orig_router["tenant_id"]
-
- # Ensure that the router is not used
- router_filter = {'router_id': [router_id]}
- fips = self.get_floatingips_count(context.elevated(),
- filters=router_filter)
- if fips:
- raise l3.RouterInUse(router_id=router_id)
-
- device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
- device_filter = {'device_id': [router_id],
- 'device_owner': [device_owner]}
- ports = self.get_ports_count(context.elevated(),
- filters=device_filter)
- if ports:
- raise l3.RouterInUse(router_id=router_id)
- super(L3RestProxy, self).delete_router(context, router_id)
-
- # delete from network controller
- self.servers.rest_delete_router(tenant_id, router_id)
-
- @put_context_in_serverpool
- @log.log
- def add_router_interface(self, context, router_id, interface_info):
- # Validate args
- router = self._get_router(context, router_id)
- tenant_id = router['tenant_id']
-
- with context.session.begin(subtransactions=True):
- # create interface in DB
- new_intf_info = super(L3RestProxy,
- self).add_router_interface(context,
- router_id,
- interface_info)
- port = self._get_port(context, new_intf_info['port_id'])
- net_id = port['network_id']
- subnet_id = new_intf_info['subnet_id']
- # we will use the port's network id as interface's id
- interface_id = net_id
- intf_details = self._get_router_intf_details(context,
- interface_id,
- subnet_id)
-
- # create interface on the network controller
- self.servers.rest_add_router_interface(tenant_id, router_id,
- intf_details)
- return new_intf_info
-
- @put_context_in_serverpool
- @log.log
- def remove_router_interface(self, context, router_id, interface_info):
- # Validate args
- router = self._get_router(context, router_id)
- tenant_id = router['tenant_id']
-
- # we will first get the interface identifier before deleting in the DB
- if not interface_info:
- msg = _("Either subnet_id or port_id must be specified")
- raise exceptions.BadRequest(resource='router', msg=msg)
- if 'port_id' in interface_info:
- port = self._get_port(context, interface_info['port_id'])
- interface_id = port['network_id']
- elif 'subnet_id' in interface_info:
- subnet = self._get_subnet(context, interface_info['subnet_id'])
- interface_id = subnet['network_id']
- else:
- msg = _("Either subnet_id or port_id must be specified")
- raise exceptions.BadRequest(resource='router', msg=msg)
-
- with context.session.begin(subtransactions=True):
- # remove router in DB
- del_ret = super(L3RestProxy,
- self).remove_router_interface(context,
- router_id,
- interface_info)
-
- # create router on the network controller
- self.servers.rest_remove_router_interface(tenant_id, router_id,
- interface_id)
- return del_ret
-
- @put_context_in_serverpool
- @log.log
- def create_floatingip(self, context, floatingip):
- with context.session.begin(subtransactions=True):
- # create floatingip in DB
- new_fl_ip = super(L3RestProxy,
- self).create_floatingip(context, floatingip)
-
- # create floatingip on the network controller
- try:
- if 'floatingip' in self.servers.get_capabilities():
- self.servers.rest_create_floatingip(
- new_fl_ip['tenant_id'], new_fl_ip)
- else:
- self._send_floatingip_update(context)
- except servermanager.RemoteRestError as e:
- with excutils.save_and_reraise_exception():
- LOG.error(
- _LE("NeutronRestProxyV2: Unable to create remote "
- "floating IP: %s"), e)
- # return created floating IP
- return new_fl_ip
-
- @put_context_in_serverpool
- @log.log
- def update_floatingip(self, context, id, floatingip):
- with context.session.begin(subtransactions=True):
- # update floatingip in DB
- new_fl_ip = super(L3RestProxy,
- self).update_floatingip(context, id, floatingip)
-
- # update network on network controller
- if 'floatingip' in self.servers.get_capabilities():
- self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
- new_fl_ip, id)
- else:
- self._send_floatingip_update(context)
- return new_fl_ip
-
- @put_context_in_serverpool
- @log.log
- def delete_floatingip(self, context, id):
- with context.session.begin(subtransactions=True):
- # delete floating IP in DB
- old_fip = super(L3RestProxy, self).get_floatingip(context, id)
- super(L3RestProxy, self).delete_floatingip(context, id)
-
- # update network on network controller
- if 'floatingip' in self.servers.get_capabilities():
- self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
- else:
- self._send_floatingip_update(context)
-
- @put_context_in_serverpool
- @log.log
- def disassociate_floatingips(self, context, port_id, do_notify=True):
- router_ids = super(L3RestProxy, self).disassociate_floatingips(
- context, port_id, do_notify=do_notify)
- self._send_floatingip_update(context)
- return router_ids
-
- # overriding method from l3_db as original method calls
- # self.delete_floatingip() which in turn calls self.delete_port() which
- # is locked with 'bsn-port-barrier'
- @put_context_in_serverpool
- def delete_disassociated_floatingips(self, context, network_id):
- query = self._model_query(context, l3_db.FloatingIP)
- query = query.filter_by(floating_network_id=network_id,
- fixed_port_id=None,
- router_id=None)
- for fip in query:
- context.session.delete(fip)
- self._delete_port(context.elevated(), fip['floating_port_id'])
-
- def _send_floatingip_update(self, context):
- try:
- ext_net_id = self.get_external_network_id(context)
- if ext_net_id:
- # Use the elevated state of the context for the ext_net query
- admin_context = context.elevated()
- ext_net = super(L3RestProxy,
- self).get_network(admin_context, ext_net_id)
- # update external network on network controller
- self._send_update_network(ext_net, admin_context)
- except exceptions.TooManyExternalNetworks:
- # get_external_network can raise errors when multiple external
- # networks are detected, which isn't supported by the Plugin
- LOG.error(_LE("NeutronRestProxyV2: too many external networks"))
-
- def _get_tenant_default_router_rules(self, tenant):
- rules = cfg.CONF.ROUTER.tenant_default_router_rule
- default_set = []
- tenant_set = []
- for rule in rules:
- items = rule.split(':')
- # put an empty string on the end if nexthops wasn't specified
- if len(items) < ROUTER_RULE_COMPONENT_COUNT:
- items.append('')
- try:
- (tenant_id, source, destination, action, nexthops) = items
- except ValueError:
- continue
- parsed_rule = {'source': source,
- 'destination': destination, 'action': action,
- 'nexthops': [hop for hop in nexthops.split(',')
- if hop]}
- if tenant_id == '*':
- default_set.append(parsed_rule)
- if tenant_id == tenant:
- tenant_set.append(parsed_rule)
- return tenant_set if tenant_set else default_set
+L3RestProxy = l3_router_plugin.L3RestProxy
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
-NeutronRestProxy provides a generic neutron plugin that translates all plugin
-function calls to equivalent authenticated REST calls to a set of redundant
-external network controllers. It also keeps persistent store for all neutron
-state to allow for re-sync of the external controller(s), if required.
-
-The local state on the plugin also allows for local response and fast-fail
-semantics where it can be determined based on the local persistent store.
-
-Network controller specific code is decoupled from this plugin and expected
-to reside on the controller itself (via the REST interface).
-
-This allows for:
- - independent authentication and redundancy schemes between neutron and the
- network controller
- - independent upgrade/development cycles between neutron and the controller
- as it limits the proxy code upgrade requirement to neutron release cycle
- and the controller specific code upgrade requirement to controller code
- - ability to sync the controller with neutron for independent recovery/reset
-
-External REST API used by proxy is the same API as defined for neutron (JSON
-subset) with some additional parameters (gateway on network-create and macaddr
-on port-attach) on an additional PUT to do a bulk dump of all persistent data.
+See http://github.com/stackforge/networking-bigswitch for more information
"""
+from bsnstacklib.plugins.bigswitch import plugin
-import copy
-import functools
-import httplib
-import re
-
-import eventlet
-from oslo_config import cfg
-import oslo_messaging
-from oslo_utils import importutils
-from sqlalchemy.orm import exc as sqlexc
-
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api import extensions as neutron_extensions
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import metadata_rpc
-from neutron.api.rpc.handlers import securitygroups_rpc
-from neutron.common import constants as const
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils
-from neutron import context as qcontext
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import allowedaddresspairs_db as addr_pair_db
-from neutron.db import api as db
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.db import securitygroups_db as sg_db
-from neutron.db import securitygroups_rpc_base as sg_db_rpc
-from neutron.extensions import allowedaddresspairs as addr_pair
-from neutron.extensions import external_net
-from neutron.extensions import extra_dhcp_opt as edo_ext
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.bigswitch import config as pl_config
-from neutron.plugins.bigswitch.db import porttracker_db
-from neutron.plugins.bigswitch import extensions
-from neutron.plugins.bigswitch import servermanager
-from neutron.plugins.bigswitch import version
-from neutron.plugins.common import constants as pconst
-
-LOG = logging.getLogger(__name__)
-
-SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
-METADATA_SERVER_IP = '169.254.169.254'
-
-
-class AgentNotifierApi(sg_rpc.SecurityGroupAgentRpcApiMixin):
-
- def __init__(self, topic):
- self.topic = topic
- target = oslo_messaging.Target(topic=topic, version='1.0')
- self.client = n_rpc.get_client(target)
-
- def port_update(self, context, port):
- topic_port_update = topics.get_topic_name(self.client.target.topic,
- topics.PORT, topics.UPDATE)
- cctxt = self.client.prepare(fanout=True, topic=topic_port_update)
- cctxt.cast(context, 'port_update', port=port)
-
-
-class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
-
- def get_port_from_device(self, device):
- port_id = re.sub(r"^%s" % const.TAP_DEVICE_PREFIX, "", device)
- port = self.get_port_and_sgs(port_id)
- if port:
- port['device'] = device
- return port
-
- def get_port_and_sgs(self, port_id):
- """Get port from database with security group info."""
-
- LOG.debug("get_port_and_sgs() called for port_id %s", port_id)
- session = db.get_session()
- sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
-
- with session.begin(subtransactions=True):
- query = session.query(
- models_v2.Port,
- sg_db.SecurityGroupPortBinding.security_group_id
- )
- query = query.outerjoin(sg_db.SecurityGroupPortBinding,
- models_v2.Port.id == sg_binding_port)
- query = query.filter(models_v2.Port.id.startswith(port_id))
- port_and_sgs = query.all()
- if not port_and_sgs:
- return
- port = port_and_sgs[0][0]
- plugin = manager.NeutronManager.get_plugin()
- port_dict = plugin._make_port_dict(port)
- port_dict['security_groups'] = [
- sg_id for port_, sg_id in port_and_sgs if sg_id]
- port_dict['security_group_rules'] = []
- port_dict['security_group_source_groups'] = []
- port_dict['fixed_ips'] = [ip['ip_address']
- for ip in port['fixed_ips']]
- return port_dict
-
-
-class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
- external_net_db.External_net_db_mixin):
-
- supported_extension_aliases = ["binding"]
- servers = None
-
- @property
- def l3_plugin(self):
- return manager.NeutronManager.get_service_plugins().get(
- pconst.L3_ROUTER_NAT)
-
- def _get_all_data(self, get_ports=True, get_floating_ips=True,
- get_routers=True):
- admin_context = qcontext.get_admin_context()
- networks = []
- # this method is used by the ML2 driver so it can't directly invoke
- # the self.get_(ports|networks) methods
- plugin = manager.NeutronManager.get_plugin()
- all_networks = plugin.get_networks(admin_context) or []
- for net in all_networks:
- mapped_network = self._get_mapped_network_with_subnets(net)
- flips_n_ports = mapped_network
- if get_floating_ips:
- flips_n_ports = self._get_network_with_floatingips(
- mapped_network)
-
- if get_ports:
- ports = []
- net_filter = {'network_id': [net.get('id')]}
- net_ports = plugin.get_ports(admin_context,
- filters=net_filter) or []
- for port in net_ports:
- mapped_port = self._map_state_and_status(port)
- mapped_port['attachment'] = {
- 'id': port.get('device_id'),
- 'mac': port.get('mac_address'),
- }
- mapped_port = self._extend_port_dict_binding(admin_context,
- mapped_port)
- ports.append(mapped_port)
- flips_n_ports['ports'] = ports
-
- if flips_n_ports:
- networks.append(flips_n_ports)
-
- data = {'networks': networks}
-
- if get_routers and self.l3_plugin:
- routers = []
- all_routers = self.l3_plugin.get_routers(admin_context) or []
- for router in all_routers:
- interfaces = []
- mapped_router = self._map_state_and_status(router)
- router_filter = {
- 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
- 'device_id': [router.get('id')]
- }
- router_ports = self.get_ports(admin_context,
- filters=router_filter) or []
- for port in router_ports:
- net_id = port.get('network_id')
- subnet_id = port['fixed_ips'][0]['subnet_id']
- intf_details = self._get_router_intf_details(admin_context,
- net_id,
- subnet_id)
- interfaces.append(intf_details)
- mapped_router['interfaces'] = interfaces
-
- routers.append(mapped_router)
-
- data.update({'routers': routers})
- return data
-
- def _send_all_data(self, send_ports=True, send_floating_ips=True,
- send_routers=True, timeout=None,
- triggered_by_tenant=None):
- """Pushes all data to network ctrl (networks/ports, ports/attachments).
-
- This gives the controller an option to re-sync it's persistent store
- with neutron's current view of that data.
- """
- data = self._get_all_data(send_ports, send_floating_ips, send_routers)
- data['triggered_by_tenant'] = triggered_by_tenant
- errstr = _("Unable to update remote topology: %s")
- return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
- data, errstr, timeout=timeout)
-
- def _get_network_with_floatingips(self, network, context=None):
- if context is None:
- context = qcontext.get_admin_context()
-
- net_id = network['id']
- net_filter = {'floating_network_id': [net_id]}
- if self.l3_plugin:
- fl_ips = self.l3_plugin.get_floatingips(context,
- filters=net_filter) or []
- network['floatingips'] = fl_ips
-
- return network
-
- def _get_all_subnets_json_for_network(self, net_id, context=None):
- if context is None:
- context = qcontext.get_admin_context()
- # start a sub-transaction to avoid breaking parent transactions
- with context.session.begin(subtransactions=True):
- subnets = self._get_subnets_by_network(context,
- net_id)
- subnets_details = []
- if subnets:
- for subnet in subnets:
- subnet_dict = self._make_subnet_dict(subnet)
- mapped_subnet = self._map_state_and_status(subnet_dict)
- subnets_details.append(mapped_subnet)
-
- return subnets_details
-
- def _get_mapped_network_with_subnets(self, network, context=None):
- # if context is not provided, admin context is used
- if context is None:
- context = qcontext.get_admin_context()
- network = self._map_state_and_status(network)
- subnets = self._get_all_subnets_json_for_network(network['id'],
- context)
- network['subnets'] = subnets
- for subnet in (subnets or []):
- if subnet['gateway_ip']:
- # FIX: For backward compatibility with wire protocol
- network['gateway'] = subnet['gateway_ip']
- break
- else:
- network['gateway'] = ''
- network[external_net.EXTERNAL] = self._network_is_external(
- context, network['id'])
- # include ML2 segmentation types
- network['segmentation_types'] = getattr(self, "segmentation_types", "")
- return network
-
- def _send_create_network(self, network, context=None):
- tenant_id = network['tenant_id']
- mapped_network = self._get_mapped_network_with_subnets(network,
- context)
- self.servers.rest_create_network(tenant_id, mapped_network)
-
- def _send_update_network(self, network, context=None):
- net_id = network['id']
- tenant_id = network['tenant_id']
- mapped_network = self._get_mapped_network_with_subnets(network,
- context)
- net_fl_ips = self._get_network_with_floatingips(mapped_network,
- context)
- self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
-
- def _send_delete_network(self, network, context=None):
- net_id = network['id']
- tenant_id = network['tenant_id']
- self.servers.rest_delete_network(tenant_id, net_id)
-
- def _map_state_and_status(self, resource):
- resource = copy.copy(resource)
-
- resource['state'] = ('UP' if resource.pop('admin_state_up',
- True) else 'DOWN')
- resource.pop('status', None)
-
- return resource
-
- def _warn_on_state_status(self, resource):
- if resource.get('admin_state_up', True) is False:
- LOG.warning(_LW("Setting admin_state_up=False is not supported "
- "in this plugin version. Ignoring setting for "
- "resource: %s"), resource)
-
- if 'status' in resource:
- if resource['status'] != const.NET_STATUS_ACTIVE:
- LOG.warning(_LW("Operational status is internally set by the "
- "plugin. Ignoring setting status=%s."),
- resource['status'])
-
- def _get_router_intf_details(self, context, intf_id, subnet_id):
-
- # we will use the network id as interface's id
- net_id = intf_id
- network = self.get_network(context, net_id)
- subnet = self.get_subnet(context, subnet_id)
- mapped_network = self._get_mapped_network_with_subnets(network)
- mapped_subnet = self._map_state_and_status(subnet)
-
- data = {
- 'id': intf_id,
- "network": mapped_network,
- "subnet": mapped_subnet
- }
-
- return data
-
- def _extend_port_dict_binding(self, context, port):
- cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
- if cfg_vif_type not in (portbindings.VIF_TYPE_OVS,
- portbindings.VIF_TYPE_IVS):
- LOG.warning(_LW("Unrecognized vif_type in configuration "
- "[%s]. Defaulting to ovs."),
- cfg_vif_type)
- cfg_vif_type = portbindings.VIF_TYPE_OVS
- # In ML2, the host_id is already populated
- if portbindings.HOST_ID in port:
- hostid = port[portbindings.HOST_ID]
- elif 'id' in port:
- hostid = porttracker_db.get_port_hostid(context, port['id'])
- else:
- hostid = None
- if hostid:
- port[portbindings.HOST_ID] = hostid
- override = self._check_hostvif_override(hostid)
- if override:
- cfg_vif_type = override
- port[portbindings.VIF_TYPE] = cfg_vif_type
-
- sg_enabled = sg_rpc.is_firewall_enabled()
- port[portbindings.VIF_DETAILS] = {
- # TODO(rkukura): Replace with new VIF security details
- portbindings.CAP_PORT_FILTER:
- 'security-group' in self.supported_extension_aliases,
- portbindings.OVS_HYBRID_PLUG: sg_enabled
- }
- return port
-
- def _check_hostvif_override(self, hostid):
- for v in cfg.CONF.NOVA.vif_types:
- if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
- return v
- return False
-
- def _get_port_net_tenantid(self, context, port):
- net = super(NeutronRestProxyV2Base,
- self).get_network(context, port["network_id"])
- return net['tenant_id']
-
- def async_port_create(self, tenant_id, net_id, port):
- try:
- self.servers.rest_create_port(tenant_id, net_id, port)
- except servermanager.RemoteRestError as e:
- # 404 should never be received on a port create unless
- # there are inconsistencies between the data in neutron
- # and the data in the backend.
- # Run a sync to get it consistent.
- if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
- e.status == httplib.NOT_FOUND and
- servermanager.NXNETWORK in e.reason):
- LOG.error(_LE("Iconsistency with backend controller "
- "triggering full synchronization."))
- # args depend on if we are operating in ML2 driver
- # or as the full plugin
- topoargs = self.servers.get_topo_function_args
- self._send_all_data(
- send_ports=topoargs['get_ports'],
- send_floating_ips=topoargs['get_floating_ips'],
- send_routers=topoargs['get_routers'],
- triggered_by_tenant=tenant_id
- )
- # If the full sync worked, the port will be created
- # on the controller so it can be safely marked as active
- else:
- # Any errors that don't result in a successful auto-sync
- # require that the port be placed into the error state.
- LOG.error(
- _LE("NeutronRestProxyV2: Unable to create port: %s"), e)
- try:
- self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
- except exceptions.PortNotFound:
- # If port is already gone from DB and there was an error
- # creating on the backend, everything is already consistent
- pass
- return
- new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
- else const.PORT_STATUS_DOWN)
- try:
- self._set_port_status(port['id'], new_status)
- except exceptions.PortNotFound:
- # This port was deleted before the create made it to the controller
- # so it now needs to be deleted since the normal delete request
- # would have deleted an non-existent port.
- self.servers.rest_delete_port(tenant_id, net_id, port['id'])
-
- # NOTE(kevinbenton): workaround for eventlet/mysql deadlock
- @utils.synchronized('bsn-port-barrier')
- def _set_port_status(self, port_id, status):
- session = db.get_session()
- try:
- port = session.query(models_v2.Port).filter_by(id=port_id).one()
- port['status'] = status
- session.flush()
- except sqlexc.NoResultFound:
- raise exceptions.PortNotFound(port_id=port_id)
-
-
-def put_context_in_serverpool(f):
- @functools.wraps(f)
- def wrapper(self, context, *args, **kwargs):
- # core plugin: context is top level object
- # ml2: keeps context in _plugin_context
- self.servers.set_context(getattr(context, '_plugin_context', context))
- return f(self, context, *args, **kwargs)
- return wrapper
-
-
-class NeutronRestProxyV2(NeutronRestProxyV2Base,
- addr_pair_db.AllowedAddressPairsMixin,
- extradhcpopt_db.ExtraDhcpOptMixin,
- agentschedulers_db.DhcpAgentSchedulerDbMixin,
- SecurityGroupServerRpcMixin):
-
- _supported_extension_aliases = ["external-net", "binding",
- "extra_dhcp_opt", "quotas",
- "dhcp_agent_scheduler", "agent",
- "security-group", "allowed-address-pairs"]
-
- @property
- def supported_extension_aliases(self):
- if not hasattr(self, '_aliases'):
- aliases = self._supported_extension_aliases[:]
- sg_rpc.disable_security_group_extension_by_config(aliases)
- self._aliases = aliases
- return self._aliases
-
- def __init__(self):
- super(NeutronRestProxyV2, self).__init__()
- LOG.info(_LI('NeutronRestProxy: Starting plugin. Version=%s'),
- version.version_string_with_vcs())
- pl_config.register_config()
- self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
-
- # Include the Big Switch Extensions path in the api_extensions
- neutron_extensions.append_api_extensions_path(extensions.__path__)
-
- self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
-
- # init network ctrl connections
- self.servers = servermanager.ServerPool()
- self.servers.get_topo_function = self._get_all_data
- self.servers.get_topo_function_args = {'get_ports': True,
- 'get_floating_ips': True,
- 'get_routers': True}
-
- self.network_scheduler = importutils.import_object(
- cfg.CONF.network_scheduler_driver
- )
-
- # setup rpc for security and DHCP agents
- self._setup_rpc()
-
- if cfg.CONF.RESTPROXY.sync_data:
- self._send_all_data()
-
- self.start_periodic_dhcp_agent_status_check()
- LOG.debug("NeutronRestProxyV2: initialization done")
-
- def _setup_rpc(self):
- self.conn = n_rpc.create_connection(new=True)
- self.topic = topics.PLUGIN
- self.notifier = AgentNotifierApi(topics.AGENT)
- # init dhcp agent support
- self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
- self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
- self._dhcp_agent_notifier
- )
- self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(),
- dhcp_rpc.DhcpRpcCallback(),
- agents_db.AgentExtRpcCallback(),
- metadata_rpc.MetadataRpcCallback()]
- self.conn.create_consumer(self.topic, self.endpoints,
- fanout=False)
- # Consume from all consumers in threads
- self.conn.consume_in_threads()
-
- @put_context_in_serverpool
- def create_network(self, context, network):
- """Create a network.
-
- Network represents an L2 network segment which can have a set of
- subnets and ports associated with it.
-
- :param context: neutron api request context
- :param network: dictionary describing the network
-
- :returns: a sequence of mappings with the following signature:
- {
- "id": UUID representing the network.
- "name": Human-readable name identifying the network.
- "tenant_id": Owner of network. NOTE: only admin user can specify
- a tenant_id other than its own.
- "admin_state_up": Sets admin state of network.
- if down, network does not forward packets.
- "status": Indicates whether network is currently operational
- (values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
- "subnets": Subnets associated with this network.
- }
-
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2: create_network() called")
-
- self._warn_on_state_status(network['network'])
-
- with context.session.begin(subtransactions=True):
- self._ensure_default_security_group(
- context,
- network['network']["tenant_id"]
- )
- # create network in DB
- new_net = super(NeutronRestProxyV2, self).create_network(context,
- network)
- self._process_l3_create(context, new_net, network['network'])
- # create network on the network controller
- self._send_create_network(new_net, context)
-
- # return created network
- return new_net
-
- @put_context_in_serverpool
- def update_network(self, context, net_id, network):
- """Updates the properties of a particular Virtual Network.
-
- :param context: neutron api request context
- :param net_id: uuid of the network to update
- :param network: dictionary describing the updates
-
- :returns: a sequence of mappings with the following signature:
- {
- "id": UUID representing the network.
- "name": Human-readable name identifying the network.
- "tenant_id": Owner of network. NOTE: only admin user can
- specify a tenant_id other than its own.
- "admin_state_up": Sets admin state of network.
- if down, network does not forward packets.
- "status": Indicates whether network is currently operational
- (values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
- "subnets": Subnets associated with this network.
- }
-
- :raises: exceptions.NetworkNotFound
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2.update_network() called")
-
- self._warn_on_state_status(network['network'])
-
- session = context.session
- with session.begin(subtransactions=True):
- new_net = super(NeutronRestProxyV2, self).update_network(
- context, net_id, network)
- self._process_l3_update(context, new_net, network['network'])
-
- # update network on network controller
- self._send_update_network(new_net, context)
- return new_net
-
- # NOTE(kevinbenton): workaround for eventlet/mysql deadlock
- @utils.synchronized('bsn-port-barrier')
- @put_context_in_serverpool
- def delete_network(self, context, net_id):
- """Delete a network.
- :param context: neutron api request context
- :param id: UUID representing the network to delete.
-
- :returns: None
-
- :raises: exceptions.NetworkInUse
- :raises: exceptions.NetworkNotFound
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2: delete_network() called")
-
- # Validate args
- orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
- with context.session.begin(subtransactions=True):
- self._process_l3_delete(context, net_id)
- ret_val = super(NeutronRestProxyV2, self).delete_network(context,
- net_id)
- self._send_delete_network(orig_net, context)
- return ret_val
-
- @put_context_in_serverpool
- def create_port(self, context, port):
- """Create a port, which is a connection point of a device
- (e.g., a VM NIC) to attach an L2 Neutron network.
- :param context: neutron api request context
- :param port: dictionary describing the port
-
- :returns:
- {
- "id": uuid representing the port.
- "network_id": uuid of network.
- "tenant_id": tenant_id
- "mac_address": mac address to use on this port.
- "admin_state_up": Sets admin state of port. if down, port
- does not forward packets.
- "status": dicates whether port is currently operational
- (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
- "fixed_ips": list of subnet IDs and IP addresses to be used on
- this port
- "device_id": identifies the device (e.g., virtual server) using
- this port.
- }
-
- :raises: exceptions.NetworkNotFound
- :raises: exceptions.StateInvalid
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2: create_port() called")
-
- # Update DB in new session so exceptions rollback changes
- with context.session.begin(subtransactions=True):
- self._ensure_default_security_group_on_port(context, port)
- sgids = self._get_security_groups_on_port(context, port)
- # non-router port status is set to pending. it is then updated
- # after the async rest call completes. router ports are synchronous
- if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
- port['port']['status'] = const.PORT_STATUS_ACTIVE
- else:
- port['port']['status'] = const.PORT_STATUS_BUILD
- dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
- new_port = super(NeutronRestProxyV2, self).create_port(context,
- port)
- self._process_port_create_security_group(context, new_port, sgids)
- if (portbindings.HOST_ID in port['port']
- and 'id' in new_port):
- host_id = port['port'][portbindings.HOST_ID]
- porttracker_db.put_port_hostid(context, new_port['id'],
- host_id)
- new_port[addr_pair.ADDRESS_PAIRS] = (
- self._process_create_allowed_address_pairs(
- context, new_port,
- port['port'].get(addr_pair.ADDRESS_PAIRS)))
- self._process_port_create_extra_dhcp_opts(context, new_port,
- dhcp_opts)
- new_port = self._extend_port_dict_binding(context, new_port)
- net = super(NeutronRestProxyV2,
- self).get_network(context, new_port["network_id"])
- if self.add_meta_server_route:
- if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
- destination = METADATA_SERVER_IP + '/32'
- self._add_host_route(context, destination, new_port)
-
- # create on network ctrl
- mapped_port = self._map_state_and_status(new_port)
- # ports have to be created synchronously when creating a router
- # port since adding router interfaces is a multi-call process
- if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
- self.servers.rest_create_port(net["tenant_id"],
- new_port["network_id"],
- mapped_port)
- else:
- self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
- new_port["network_id"], mapped_port)
- self.notify_security_groups_member_updated(context, new_port)
- return new_port
-
- def get_port(self, context, id, fields=None):
- with context.session.begin(subtransactions=True):
- port = super(NeutronRestProxyV2, self).get_port(context, id,
- fields)
- self._extend_port_dict_binding(context, port)
- return self._fields(port, fields)
-
- def get_ports(self, context, filters=None, fields=None):
- with context.session.begin(subtransactions=True):
- ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
- fields)
- for port in ports:
- self._extend_port_dict_binding(context, port)
- return [self._fields(port, fields) for port in ports]
-
- @put_context_in_serverpool
- def update_port(self, context, port_id, port):
- """Update values of a port.
-
- :param context: neutron api request context
- :param id: UUID representing the port to update.
- :param port: dictionary with keys indicating fields to update.
-
- :returns: a mapping sequence with the following signature:
- {
- "id": uuid representing the port.
- "network_id": uuid of network.
- "tenant_id": tenant_id
- "mac_address": mac address to use on this port.
- "admin_state_up": sets admin state of port. if down, port
- does not forward packets.
- "status": dicates whether port is currently operational
- (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
- "fixed_ips": list of subnet IDs and IP addresses to be used on
- this port
- "device_id": identifies the device (e.g., virtual server) using
- this port.
- }
-
- :raises: exceptions.StateInvalid
- :raises: exceptions.PortNotFound
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2: update_port() called")
-
- self._warn_on_state_status(port['port'])
-
- # Validate Args
- orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
- with context.session.begin(subtransactions=True):
- # Update DB
- new_port = super(NeutronRestProxyV2,
- self).update_port(context, port_id, port)
- ctrl_update_required = False
- if addr_pair.ADDRESS_PAIRS in port['port']:
- ctrl_update_required |= (
- self.update_address_pairs_on_port(context, port_id, port,
- orig_port, new_port))
- self._update_extra_dhcp_opts_on_port(context, port_id, port,
- new_port)
- old_host_id = porttracker_db.get_port_hostid(context,
- orig_port['id'])
- if (portbindings.HOST_ID in port['port']
- and 'id' in new_port):
- host_id = port['port'][portbindings.HOST_ID]
- porttracker_db.put_port_hostid(context, new_port['id'],
- host_id)
- if old_host_id != host_id:
- ctrl_update_required = True
-
- if (new_port.get("device_id") != orig_port.get("device_id") and
- orig_port.get("device_id")):
- ctrl_update_required = True
-
- if ctrl_update_required:
- # tenant_id must come from network in case network is shared
- net_tenant_id = self._get_port_net_tenantid(context, new_port)
- new_port = self._extend_port_dict_binding(context, new_port)
- mapped_port = self._map_state_and_status(new_port)
- self.servers.rest_update_port(net_tenant_id,
- new_port["network_id"],
- mapped_port)
- need_port_update_notify = self.update_security_group_on_port(
- context, port_id, port, orig_port, new_port)
- need_port_update_notify |= self.is_security_group_member_updated(
- context, orig_port, new_port)
-
- if need_port_update_notify:
- self.notifier.port_update(context, new_port)
-
- # return new_port
- return new_port
-
- # NOTE(kevinbenton): workaround for eventlet/mysql deadlock
- @utils.synchronized('bsn-port-barrier')
- @put_context_in_serverpool
- def delete_port(self, context, port_id, l3_port_check=True):
- """Delete a port.
- :param context: neutron api request context
- :param id: UUID representing the port to delete.
-
- :raises: exceptions.PortInUse
- :raises: exceptions.PortNotFound
- :raises: exceptions.NetworkNotFound
- :raises: RemoteRestError
- """
- LOG.debug("NeutronRestProxyV2: delete_port() called")
-
- # if needed, check to see if this is a port owned by
- # and l3-router. If so, we should prevent deletion.
- if l3_port_check and self.l3_plugin:
- self.l3_plugin.prevent_l3_port_deletion(context, port_id)
- with context.session.begin(subtransactions=True):
- if self.l3_plugin:
- router_ids = self.l3_plugin.disassociate_floatingips(
- context, port_id, do_notify=False)
- port = super(NeutronRestProxyV2, self).get_port(context, port_id)
- # Tenant ID must come from network in case the network is shared
- tenid = self._get_port_net_tenantid(context, port)
- self._delete_port(context, port_id)
- self.servers.rest_delete_port(tenid, port['network_id'], port_id)
-
- if self.l3_plugin:
- # now that we've left db transaction, we are safe to notify
- self.l3_plugin.notify_routers_updated(context, router_ids)
-
- @put_context_in_serverpool
- def create_subnet(self, context, subnet):
- LOG.debug("NeutronRestProxyV2: create_subnet() called")
-
- self._warn_on_state_status(subnet['subnet'])
-
- with context.session.begin(subtransactions=True):
- # create subnet in DB
- new_subnet = super(NeutronRestProxyV2,
- self).create_subnet(context, subnet)
- net_id = new_subnet['network_id']
- orig_net = super(NeutronRestProxyV2,
- self).get_network(context, net_id)
- # update network on network controller
- self._send_update_network(orig_net, context)
- return new_subnet
-
- @put_context_in_serverpool
- def update_subnet(self, context, id, subnet):
- LOG.debug("NeutronRestProxyV2: update_subnet() called")
-
- self._warn_on_state_status(subnet['subnet'])
-
- with context.session.begin(subtransactions=True):
- # update subnet in DB
- new_subnet = super(NeutronRestProxyV2,
- self).update_subnet(context, id, subnet)
- net_id = new_subnet['network_id']
- orig_net = super(NeutronRestProxyV2,
- self).get_network(context, net_id)
- # update network on network controller
- self._send_update_network(orig_net, context)
- return new_subnet
-
- @put_context_in_serverpool
- def delete_subnet(self, context, id):
- LOG.debug("NeutronRestProxyV2: delete_subnet() called")
- orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
- net_id = orig_subnet['network_id']
- with context.session.begin(subtransactions=True):
- # delete subnet in DB
- super(NeutronRestProxyV2, self).delete_subnet(context, id)
- orig_net = super(NeutronRestProxyV2, self).get_network(context,
- net_id)
- # update network on network controller - exception will rollback
- self._send_update_network(orig_net, context)
- def _add_host_route(self, context, destination, port):
- subnet = {}
- for fixed_ip in port['fixed_ips']:
- subnet_id = fixed_ip['subnet_id']
- nexthop = fixed_ip['ip_address']
- subnet['host_routes'] = [{'destination': destination,
- 'nexthop': nexthop}]
- updated_subnet = self.update_subnet(context,
- subnet_id,
- {'subnet': subnet})
- payload = {'subnet': updated_subnet}
- self._dhcp_agent_notifier.notify(context, payload,
- 'subnet.update.end')
- LOG.debug("Adding host route: ")
- LOG.debug("Destination:%(dst)s nexthop:%(next)s",
- {'dst': destination, 'next': nexthop})
+NeutronRestProxyV2 = plugin.NeutronRestProxyV2
--- /dev/null
+bsnstacklib>=2015.1,<2015.2
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
import sqlalchemy as sa
from sqlalchemy import orm
-from neutron.db import l3_db
from neutron.db import model_base
-from neutron.openstack.common import log as logging
-from neutron.plugins.bigswitch.extensions import routerrule
-
-
-LOG = logging.getLogger(__name__)
class RouterRule(model_base.BASEV2):
ondelete="CASCADE"),
primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
-
-
-class RouterRule_db_mixin(l3_db.L3_NAT_db_mixin):
- """Mixin class to support route rule configuration on a router."""
- def update_router(self, context, id, router):
- r = router['router']
- with context.session.begin(subtransactions=True):
- router_db = self._get_router(context, id)
- if 'router_rules' in r:
- self._update_router_rules(context,
- router_db,
- r['router_rules'])
- updated = super(RouterRule_db_mixin, self).update_router(
- context, id, router)
- updated['router_rules'] = self._get_router_rules_by_router_id(
- context, id)
-
- return updated
-
- def create_router(self, context, router):
- r = router['router']
- with context.session.begin(subtransactions=True):
- router_db = super(RouterRule_db_mixin, self).create_router(
- context, router)
- if 'router_rules' in r:
- self._update_router_rules(context,
- router_db,
- r['router_rules'])
- else:
- LOG.debug('No rules in router')
- router_db['router_rules'] = self._get_router_rules_by_router_id(
- context, router_db['id'])
-
- return router_db
-
- def _update_router_rules(self, context, router, rules):
- if len(rules) > cfg.CONF.ROUTER.max_router_rules:
- raise routerrule.RulesExhausted(
- router_id=router['id'],
- quota=cfg.CONF.ROUTER.max_router_rules)
- del_context = context.session.query(RouterRule)
- del_context.filter_by(router_id=router['id']).delete()
- context.session.expunge_all()
- LOG.debug('Updating router rules to %s', rules)
- for rule in rules:
- router_rule = RouterRule(
- router_id=router['id'],
- destination=rule['destination'],
- source=rule['source'],
- action=rule['action'])
- router_rule.nexthops = [NextHop(nexthop=hop)
- for hop in rule['nexthops']]
- context.session.add(router_rule)
- context.session.flush()
-
- def _make_router_rule_list(self, router_rules):
- ruleslist = []
- for rule in router_rules:
- hops = [hop['nexthop'] for hop in rule['nexthops']]
- ruleslist.append({'id': rule['id'],
- 'destination': rule['destination'],
- 'source': rule['source'],
- 'action': rule['action'],
- 'nexthops': hops})
- return ruleslist
-
- def _get_router_rules_by_router_id(self, context, id):
- query = context.session.query(RouterRule)
- router_rules = query.filter_by(router_id=id).all()
- return self._make_router_rule_list(router_rules)
-
- def get_router(self, context, id, fields=None):
- with context.session.begin(subtransactions=True):
- router = super(RouterRule_db_mixin, self).get_router(
- context, id, fields)
- router['router_rules'] = self._get_router_rules_by_router_id(
- context, id)
- return router
-
- def get_routers(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- with context.session.begin(subtransactions=True):
- routers = super(RouterRule_db_mixin, self).get_routers(
- context, filters, fields, sorts=sorts, limit=limit,
- marker=marker, page_reverse=page_reverse)
- for router in routers:
- router['router_rules'] = self._get_router_rules_by_router_id(
- context, router['id'])
- return routers
-
- def get_sync_data(self, context, router_ids=None, active=None):
- """Query routers and their related floating_ips, interfaces."""
- with context.session.begin(subtransactions=True):
- routers = super(RouterRule_db_mixin,
- self).get_sync_data(context, router_ids,
- active=active)
- for router in routers:
- router['router_rules'] = self._get_router_rules_by_router_id(
- context, router['id'])
- return routers
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-This module manages the HTTP and HTTPS connections to the backend controllers.
-
-The main class it provides for external use is ServerPool which manages a set
-of ServerProxy objects that correspond to individual backend controllers.
-
-The following functionality is handled by this module:
-- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers
-- Automatic failover between controllers
-- SSL Certificate enforcement
-- HTTP Authentication
-
-"""
-import base64
-import httplib
-import os
-import socket
-import ssl
-import time
-import weakref
-
-import eventlet
-import eventlet.corolocal
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-
-from neutron.common import exceptions
-from neutron.i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
-from neutron.plugins.bigswitch.db import consistency_db as cdb
-
-LOG = logging.getLogger(__name__)
-
-# The following are used to invoke the API on the external controller
-CAPABILITIES_PATH = "/capabilities"
-NET_RESOURCE_PATH = "/tenants/%s/networks"
-PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
-ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
-ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
-NETWORKS_PATH = "/tenants/%s/networks/%s"
-FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s"
-PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
-ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
-ROUTERS_PATH = "/tenants/%s/routers/%s"
-ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
-TOPOLOGY_PATH = "/topology"
-HEALTH_PATH = "/health"
-SWITCHES_PATH = "/switches/%s"
-SUCCESS_CODES = range(200, 207)
-FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
- 504, 505]
-BASE_URI = '/networkService/v1.1'
-ORCHESTRATION_SERVICE_ID = 'Neutron v2.0'
-HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH'
-REQ_CONTEXT_HEADER = 'X-REQ-CONTEXT'
-# error messages
-NXNETWORK = 'NXVNS'
-HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT = 3
-HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL = 3
-
-
-class RemoteRestError(exceptions.NeutronException):
- message = _("Error in REST call to remote network "
- "controller: %(reason)s")
- status = None
-
- def __init__(self, **kwargs):
- self.status = kwargs.pop('status', None)
- self.reason = kwargs.get('reason')
- super(RemoteRestError, self).__init__(**kwargs)
-
-
-class ServerProxy(object):
- """REST server proxy to a network controller."""
-
- def __init__(self, server, port, ssl, auth, neutron_id, timeout,
- base_uri, name, mypool, combined_cert):
- self.server = server
- self.port = port
- self.ssl = ssl
- self.base_uri = base_uri
- self.timeout = timeout
- self.name = name
- self.success_codes = SUCCESS_CODES
- self.auth = None
- self.neutron_id = neutron_id
- self.failed = False
- self.capabilities = []
- # enable server to reference parent pool
- self.mypool = mypool
- # cache connection here to avoid a SSL handshake for every connection
- self.currentconn = None
- if auth:
- self.auth = 'Basic ' + base64.encodestring(auth).strip()
- self.combined_cert = combined_cert
-
- def get_capabilities(self):
- try:
- body = self.rest_call('GET', CAPABILITIES_PATH)[2]
- self.capabilities = jsonutils.loads(body)
- except Exception:
- LOG.exception(_LE("Couldn't retrieve capabilities. "
- "Newer API calls won't be supported."))
- LOG.info(_LI("The following capabilities were received "
- "for %(server)s: %(cap)s"), {'server': self.server,
- 'cap': self.capabilities})
- return self.capabilities
-
- def rest_call(self, action, resource, data='', headers=None,
- timeout=False, reconnect=False, hash_handler=None):
- uri = self.base_uri + resource
- body = jsonutils.dumps(data)
- headers = headers or {}
- headers['Content-type'] = 'application/json'
- headers['Accept'] = 'application/json'
- headers['NeutronProxy-Agent'] = self.name
- headers['Instance-ID'] = self.neutron_id
- headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
- if hash_handler:
- # this will be excluded on calls that don't need hashes
- # (e.g. topology sync, capability checks)
- headers[HASH_MATCH_HEADER] = hash_handler.read_for_update()
- else:
- hash_handler = cdb.HashHandler()
- if 'keep-alive' in self.capabilities:
- headers['Connection'] = 'keep-alive'
- else:
- reconnect = True
- if self.auth:
- headers['Authorization'] = self.auth
-
- LOG.debug("ServerProxy: server=%(server)s, port=%(port)d, "
- "ssl=%(ssl)r",
- {'server': self.server, 'port': self.port, 'ssl': self.ssl})
- LOG.debug("ServerProxy: resource=%(resource)s, data=%(data)r, "
- "headers=%(headers)r, action=%(action)s",
- {'resource': resource, 'data': data, 'headers': headers,
- 'action': action})
-
- # unspecified timeout is False because a timeout can be specified as
- # None to indicate no timeout.
- if timeout is False:
- timeout = self.timeout
-
- if timeout != self.timeout:
- # need a new connection if timeout has changed
- reconnect = True
-
- if not self.currentconn or reconnect:
- if self.currentconn:
- self.currentconn.close()
- if self.ssl:
- self.currentconn = HTTPSConnectionWithValidation(
- self.server, self.port, timeout=timeout)
- if self.currentconn is None:
- LOG.error(_LE('ServerProxy: Could not establish HTTPS '
- 'connection'))
- return 0, None, None, None
- self.currentconn.combined_cert = self.combined_cert
- else:
- self.currentconn = httplib.HTTPConnection(
- self.server, self.port, timeout=timeout)
- if self.currentconn is None:
- LOG.error(_LE('ServerProxy: Could not establish HTTP '
- 'connection'))
- return 0, None, None, None
-
- try:
- self.currentconn.request(action, uri, body, headers)
- response = self.currentconn.getresponse()
- respstr = response.read()
- respdata = respstr
- if response.status in self.success_codes:
- hash_value = response.getheader(HASH_MATCH_HEADER)
- # don't clear hash from DB if a hash header wasn't present
- if hash_value is not None:
- hash_handler.put_hash(hash_value)
- else:
- hash_handler.clear_lock()
- try:
- respdata = jsonutils.loads(respstr)
- except ValueError:
- # response was not JSON, ignore the exception
- pass
- else:
- # release lock so others don't have to wait for timeout
- hash_handler.clear_lock()
-
- ret = (response.status, response.reason, respstr, respdata)
- except httplib.HTTPException:
- # If we were using a cached connection, try again with a new one.
- with excutils.save_and_reraise_exception() as ctxt:
- self.currentconn.close()
- if reconnect:
- # if reconnect is true, this was on a fresh connection so
- # reraise since this server seems to be broken
- ctxt.reraise = True
- else:
- # if reconnect is false, it was a cached connection so
- # try one more time before re-raising
- ctxt.reraise = False
- return self.rest_call(action, resource, data, headers,
- timeout=timeout, reconnect=True)
- except (socket.timeout, socket.error) as e:
- self.currentconn.close()
- LOG.error(_LE('ServerProxy: %(action)s failure, %(e)r'),
- {'action': action, 'e': e})
- ret = 0, None, None, None
- LOG.debug("ServerProxy: status=%(status)d, reason=%(reason)r, "
- "ret=%(ret)s, data=%(data)r", {'status': ret[0],
- 'reason': ret[1],
- 'ret': ret[2],
- 'data': ret[3]})
- return ret
-
-
-class ServerPool(object):
-
- _instance = None
-
- @classmethod
- def get_instance(cls):
- if cls._instance:
- return cls._instance
- cls._instance = cls()
- return cls._instance
-
- def __init__(self, timeout=False,
- base_uri=BASE_URI, name='NeutronRestProxy'):
- LOG.debug("ServerPool: initializing")
- # 'servers' is the list of network controller REST end-points
- # (used in order specified till one succeeds, and it is sticky
- # till next failure). Use 'server_auth' to encode api-key
- servers = cfg.CONF.RESTPROXY.servers
- self.auth = cfg.CONF.RESTPROXY.server_auth
- self.ssl = cfg.CONF.RESTPROXY.server_ssl
- self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
- self.base_uri = base_uri
- self.name = name
- self.contexts = {}
- self.timeout = cfg.CONF.RESTPROXY.server_timeout
- self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections
- default_port = 8000
- if timeout is not False:
- self.timeout = timeout
-
- # Function to use to retrieve topology for consistency syncs.
- # Needs to be set by module that uses the servermanager.
- self.get_topo_function = None
- self.get_topo_function_args = {}
-
- if not servers:
- raise cfg.Error(_('Servers not defined. Aborting server manager.'))
- servers = [s if len(s.rsplit(':', 1)) == 2
- else "%s:%d" % (s, default_port)
- for s in servers]
- if any((len(spl) != 2 or not spl[1].isdigit())
- for spl in [sp.rsplit(':', 1)
- for sp in servers]):
- raise cfg.Error(_('Servers must be defined as <ip>:<port>. '
- 'Configuration was %s') % servers)
- self.servers = [
- self.server_proxy_for(server, int(port))
- for server, port in (s.rsplit(':', 1) for s in servers)
- ]
- eventlet.spawn(self._consistency_watchdog,
- cfg.CONF.RESTPROXY.consistency_interval)
- ServerPool._instance = self
- LOG.debug("ServerPool: initialization done")
-
- def set_context(self, context):
- # this context needs to be local to the greenthread
- # so concurrent requests don't use the wrong context.
- # Use a weakref so the context is garbage collected
- # after the plugin is done with it.
- ref = weakref.ref(context)
- self.contexts[eventlet.corolocal.get_ident()] = ref
-
- def get_context_ref(self):
- # Try to get the context cached for this thread. If one
- # doesn't exist or if it's been garbage collected, this will
- # just return None.
- try:
- return self.contexts[eventlet.corolocal.get_ident()]()
- except KeyError:
- return None
-
- def get_capabilities(self):
- # lookup on first try
- try:
- return self.capabilities
- except AttributeError:
- # each server should return a list of capabilities it supports
- # e.g. ['floatingip']
- capabilities = [set(server.get_capabilities())
- for server in self.servers]
- # Pool only supports what all of the servers support
- self.capabilities = set.intersection(*capabilities)
- return self.capabilities
-
- def server_proxy_for(self, server, port):
- combined_cert = self._get_combined_cert_for_server(server, port)
- return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id,
- self.timeout, self.base_uri, self.name, mypool=self,
- combined_cert=combined_cert)
-
- def _get_combined_cert_for_server(self, server, port):
- # The ssl library requires a combined file with all trusted certs
- # so we make one containing the trusted CAs and the corresponding
- # host cert for this server
- combined_cert = None
- if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation:
- base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory
- host_dir = os.path.join(base_ssl, 'host_certs')
- ca_dir = os.path.join(base_ssl, 'ca_certs')
- combined_dir = os.path.join(base_ssl, 'combined')
- combined_cert = os.path.join(combined_dir, '%s.pem' % server)
- if not os.path.exists(base_ssl):
- raise cfg.Error(_('ssl_cert_directory [%s] does not exist. '
- 'Create it or disable ssl.') % base_ssl)
- for automake in [combined_dir, ca_dir, host_dir]:
- if not os.path.exists(automake):
- os.makedirs(automake)
-
- # get all CA certs
- certs = self._get_ca_cert_paths(ca_dir)
-
- # check for a host specific cert
- hcert, exists = self._get_host_cert_path(host_dir, server)
- if exists:
- certs.append(hcert)
- elif cfg.CONF.RESTPROXY.ssl_sticky:
- self._fetch_and_store_cert(server, port, hcert)
- certs.append(hcert)
- if not certs:
- raise cfg.Error(_('No certificates were found to verify '
- 'controller %s') % (server))
- self._combine_certs_to_file(certs, combined_cert)
- return combined_cert
-
- def _combine_certs_to_file(self, certs, cfile):
- '''
- Concatenates the contents of each certificate in a list of
- certificate paths to one combined location for use with ssl
- sockets.
- '''
- with open(cfile, 'w') as combined:
- for c in certs:
- with open(c, 'r') as cert_handle:
- combined.write(cert_handle.read())
-
- def _get_host_cert_path(self, host_dir, server):
- '''
- returns full path and boolean indicating existence
- '''
- hcert = os.path.join(host_dir, '%s.pem' % server)
- if os.path.exists(hcert):
- return hcert, True
- return hcert, False
-
- def _get_ca_cert_paths(self, ca_dir):
- certs = [os.path.join(root, name)
- for name in [
- name for (root, dirs, files) in os.walk(ca_dir)
- for name in files
- ]
- if name.endswith('.pem')]
- return certs
-
- def _fetch_and_store_cert(self, server, port, path):
- '''
- Grabs a certificate from a server and writes it to
- a given path.
- '''
- try:
- cert = ssl.get_server_certificate((server, port),
- ssl_version=ssl.PROTOCOL_TLSv1)
- except Exception as e:
- raise cfg.Error(_('Could not retrieve initial '
- 'certificate from controller %(server)s. '
- 'Error details: %(error)s') %
- {'server': server, 'error': e})
-
- LOG.warning(_LW("Storing to certificate for host %(server)s "
- "at %(path)s"), {'server': server,
- 'path': path})
- self._file_put_contents(path, cert)
-
- return cert
-
- def _file_put_contents(self, path, contents):
- # Simple method to write to file.
- # Created for easy Mocking
- with open(path, 'w') as handle:
- handle.write(contents)
-
- def server_failure(self, resp, ignore_codes=[]):
- """Define failure codes as required.
-
- Note: We assume 301-303 is a failure, and try the next server in
- the server pool.
- """
- return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)
-
- def action_success(self, resp):
- """Defining success codes as required.
-
- Note: We assume any valid 2xx as being successful response.
- """
- return resp[0] in SUCCESS_CODES
-
- def rest_call(self, action, resource, data, headers, ignore_codes,
- timeout=False):
- context = self.get_context_ref()
- if context:
- # include the requesting context information if available
- cdict = context.to_dict()
- # remove the auth token so it's not present in debug logs on the
- # backend controller
- cdict.pop('auth_token', None)
- headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
- hash_handler = cdb.HashHandler()
- good_first = sorted(self.servers, key=lambda x: x.failed)
- first_response = None
- for active_server in good_first:
- for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
- ret = active_server.rest_call(action, resource, data, headers,
- timeout,
- reconnect=self.always_reconnect,
- hash_handler=hash_handler)
- if ret[0] != httplib.SERVICE_UNAVAILABLE:
- break
- time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)
-
- # If inconsistent, do a full synchronization
- if ret[0] == httplib.CONFLICT:
- if not self.get_topo_function:
- raise cfg.Error(_('Server requires synchronization, '
- 'but no topology function was defined.'))
- data = self.get_topo_function(**self.get_topo_function_args)
- active_server.rest_call('PUT', TOPOLOGY_PATH, data,
- timeout=None)
- # Store the first response as the error to be bubbled up to the
- # user since it was a good server. Subsequent servers will most
- # likely be cluster slaves and won't have a useful error for the
- # user (e.g. 302 redirect to master)
- if not first_response:
- first_response = ret
- if not self.server_failure(ret, ignore_codes):
- active_server.failed = False
- return ret
- else:
- LOG.error(_LE('ServerProxy: %(action)s failure for servers: '
- '%(server)r Response: %(response)s'),
- {'action': action,
- 'server': (active_server.server,
- active_server.port),
- 'response': ret[3]})
- LOG.error(_LE("ServerProxy: Error details: status=%(status)d, "
- "reason=%(reason)r, ret=%(ret)s, data=%(data)r"),
- {'status': ret[0], 'reason': ret[1], 'ret': ret[2],
- 'data': ret[3]})
- active_server.failed = True
-
- # A failure on a delete means the object is gone from Neutron but not
- # from the controller. Set the consistency hash to a bad value to
- # trigger a sync on the next check.
- # NOTE: The hash must have a comma in it otherwise it will be ignored
- # by the backend.
- if action == 'DELETE':
- hash_handler.put_hash('INCONSISTENT,INCONSISTENT')
- # All servers failed, reset server list and try again next time
- LOG.error(_LE('ServerProxy: %(action)s failure for all servers: '
- '%(server)r'),
- {'action': action,
- 'server': tuple((s.server,
- s.port) for s in self.servers)})
- return first_response
-
- def rest_action(self, action, resource, data='', errstr='%s',
- ignore_codes=None, headers=None, timeout=False):
- """
- Wrapper for rest_call that verifies success and raises a
- RemoteRestError on failure with a provided error string
- By default, 404 errors on DELETE calls are ignored because
- they already do not exist on the backend.
- """
- ignore_codes = ignore_codes or []
- headers = headers or {}
- if not ignore_codes and action == 'DELETE':
- ignore_codes = [404]
- resp = self.rest_call(action, resource, data, headers, ignore_codes,
- timeout)
- if self.server_failure(resp, ignore_codes):
- LOG.error(errstr, resp[2])
- raise RemoteRestError(reason=resp[2], status=resp[0])
- if resp[0] in ignore_codes:
- LOG.info(_LI("NeutronRestProxyV2: Received and ignored error "
- "code %(code)s on %(action)s action to resource "
- "%(resource)s"),
- {'code': resp[2], 'action': action,
- 'resource': resource})
- return resp
-
- def rest_create_router(self, tenant_id, router):
- resource = ROUTER_RESOURCE_PATH % tenant_id
- data = {"router": router}
- errstr = _("Unable to create remote router: %s")
- self.rest_action('POST', resource, data, errstr)
-
- def rest_update_router(self, tenant_id, router, router_id):
- resource = ROUTERS_PATH % (tenant_id, router_id)
- data = {"router": router}
- errstr = _("Unable to update remote router: %s")
- self.rest_action('PUT', resource, data, errstr)
-
- def rest_delete_router(self, tenant_id, router_id):
- resource = ROUTERS_PATH % (tenant_id, router_id)
- errstr = _("Unable to delete remote router: %s")
- self.rest_action('DELETE', resource, errstr=errstr)
-
- def rest_add_router_interface(self, tenant_id, router_id, intf_details):
- resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id)
- data = {"interface": intf_details}
- errstr = _("Unable to add router interface: %s")
- self.rest_action('POST', resource, data, errstr)
-
- def rest_remove_router_interface(self, tenant_id, router_id, interface_id):
- resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id)
- errstr = _("Unable to delete remote intf: %s")
- self.rest_action('DELETE', resource, errstr=errstr)
-
- def rest_create_network(self, tenant_id, network):
- resource = NET_RESOURCE_PATH % tenant_id
- data = {"network": network}
- errstr = _("Unable to create remote network: %s")
- self.rest_action('POST', resource, data, errstr)
-
- def rest_update_network(self, tenant_id, net_id, network):
- resource = NETWORKS_PATH % (tenant_id, net_id)
- data = {"network": network}
- errstr = _("Unable to update remote network: %s")
- self.rest_action('PUT', resource, data, errstr)
-
- def rest_delete_network(self, tenant_id, net_id):
- resource = NETWORKS_PATH % (tenant_id, net_id)
- errstr = _("Unable to update remote network: %s")
- self.rest_action('DELETE', resource, errstr=errstr)
-
- def rest_create_port(self, tenant_id, net_id, port):
- resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"])
- data = {"port": port}
- device_id = port.get("device_id")
- if not port["mac_address"] or not device_id:
- # controller only cares about ports attached to devices
- LOG.warning(_LW("No device MAC attached to port %s. "
- "Skipping notification to controller."),
- port["id"])
- return
- data["attachment"] = {"id": device_id,
- "mac": port["mac_address"]}
- errstr = _("Unable to create remote port: %s")
- self.rest_action('PUT', resource, data, errstr)
-
- def rest_delete_port(self, tenant_id, network_id, port_id):
- resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id)
- errstr = _("Unable to delete remote port: %s")
- self.rest_action('DELETE', resource, errstr=errstr)
-
- def rest_update_port(self, tenant_id, net_id, port):
- # Controller has no update operation for the port endpoint
- # the create PUT method will replace
- self.rest_create_port(tenant_id, net_id, port)
-
- def rest_create_floatingip(self, tenant_id, floatingip):
- resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id'])
- errstr = _("Unable to create floating IP: %s")
- self.rest_action('PUT', resource, floatingip, errstr=errstr)
-
- def rest_update_floatingip(self, tenant_id, floatingip, oldid):
- resource = FLOATINGIPS_PATH % (tenant_id, oldid)
- errstr = _("Unable to update floating IP: %s")
- self.rest_action('PUT', resource, floatingip, errstr=errstr)
-
- def rest_delete_floatingip(self, tenant_id, oldid):
- resource = FLOATINGIPS_PATH % (tenant_id, oldid)
- errstr = _("Unable to delete floating IP: %s")
- self.rest_action('DELETE', resource, errstr=errstr)
-
- def rest_get_switch(self, switch_id):
- resource = SWITCHES_PATH % switch_id
- errstr = _("Unable to retrieve switch: %s")
- resp = self.rest_action('GET', resource, errstr=errstr,
- ignore_codes=[404])
- # return None if switch not found, else return switch info
- return None if resp[0] == 404 else resp[3]
-
- def _consistency_watchdog(self, polling_interval=60):
- if 'consistency' not in self.get_capabilities():
- LOG.warning(_LW("Backend server(s) do not support automated "
- "consitency checks."))
- return
- if not polling_interval:
- LOG.warning(_LW("Consistency watchdog disabled by polling "
- "interval setting of %s."), polling_interval)
- return
- while True:
- # If consistency is supported, all we have to do is make any
- # rest call and the consistency header will be added. If it
- # doesn't match, the backend will return a synchronization error
- # that will be handled by the rest_action.
- eventlet.sleep(polling_interval)
- try:
- self.rest_action('GET', HEALTH_PATH)
- except Exception:
- LOG.exception(_LE("Encountered an error checking controller "
- "health."))
-
-
-class HTTPSConnectionWithValidation(httplib.HTTPSConnection):
-
- # If combined_cert is None, the connection will continue without
- # any certificate validation.
- combined_cert = None
-
- def connect(self):
- sock = socket.create_connection((self.host, self.port),
- self.timeout, self.source_address)
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
-
- if self.combined_cert:
- self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
- cert_reqs=ssl.CERT_REQUIRED,
- ca_certs=self.combined_cert,
- ssl_version=ssl.PROTOCOL_TLSv1)
- else:
- self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
- cert_reqs=ssl.CERT_NONE,
- ssl_version=ssl.PROTOCOL_TLSv1)
+++ /dev/null
-#!/usr/bin/env python
-# Copyright 2012, Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test server mocking a REST based network ctrl.
-
-Used for NeutronRestProxy tests
-"""
-from __future__ import print_function
-
-import re
-
-from oslo_serialization import jsonutils
-from six import moves
-from wsgiref import simple_server
-
-
-class TestNetworkCtrl(object):
-
- def __init__(self, host='', port=8000,
- default_status='404 Not Found',
- default_response='404 Not Found',
- debug=False):
- self.host = host
- self.port = port
- self.default_status = default_status
- self.default_response = default_response
- self.debug = debug
- self.debug_env = False
- self.debug_resp = False
- self.matches = []
-
- def match(self, prior, method_regexp, uri_regexp, handler, data=None,
- multi=True):
- """Add to the list of expected inputs.
-
- The incoming request is matched in the order of priority. For same
- priority, match the oldest match request first.
-
- :param prior: integer priority of this match (e.g. 100)
- :param method_regexp: regexp to match method (e.g. 'PUT|POST')
- :param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/')
- :param handler: function with signature:
- lambda(method, uri, body, **kwargs) : status, body
- where
- - method: HTTP method for this request
- - uri: URI for this HTTP request
- - body: body of this HTTP request
- - kwargs are:
- - data: data object that was in the match call
- - node: TestNetworkCtrl object itself
- - id: offset of the matching tuple
- and return values is:
- (status, body) where:
- - status: HTTP resp status (e.g. '200 OK').
- If None, use default_status
- - body: HTTP resp body. If None, use ''
- """
- assert int(prior) == prior, 'Priority should an integer be >= 0'
- assert prior >= 0, 'Priority should an integer be >= 0'
-
- lo, hi = 0, len(self.matches)
- while lo < hi:
- mid = (lo + hi) // 2
- if prior < self.matches[mid][0]:
- hi = mid
- else:
- lo = mid + 1
- self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler,
- data, multi))
-
- def remove_id(self, id_):
- assert id_ >= 0, 'remove_id: id < 0'
- assert id_ <= len(self.matches), 'remove_id: id > len()'
- self.matches.pop(id_)
-
- def request_handler(self, method, uri, body):
- retstatus = self.default_status
- retbody = self.default_response
- for i in moves.xrange(len(self.matches)):
- (unused_prior, method_regexp, uri_regexp, handler, data,
- multi) = self.matches[i]
- if re.match(method_regexp, method) and re.match(uri_regexp, uri):
- kwargs = {
- 'data': data,
- 'node': self,
- 'id': i,
- }
- retstatus, retbody = handler(method, uri, body, **kwargs)
- if multi is False:
- self.remove_id(i)
- break
- if retbody is None:
- retbody = ''
- return (retstatus, retbody)
-
- def server(self):
- def app(environ, start_response):
- uri = environ['PATH_INFO']
- method = environ['REQUEST_METHOD']
- headers = [('Content-type', 'text/json')]
- content_len_str = environ['CONTENT_LENGTH']
-
- content_len = 0
- request_data = None
- if content_len_str:
- content_len = int(content_len_str)
- request_data = environ.get('wsgi.input').read(content_len)
- if request_data:
- try:
- request_data = jsonutils.loads(request_data)
- except Exception:
- # OK for it not to be json! Ignore it
- pass
-
- if self.debug:
- print('\n')
- if self.debug_env:
- print('environ:')
- for (key, value) in sorted(environ.iteritems()):
- print(' %16s : %s' % (key, value))
-
- print('%s %s' % (method, uri))
- if request_data:
- print('%s' %
- jsonutils.dumps(
- request_data, sort_keys=True, indent=4))
-
- status, body = self.request_handler(method, uri, None)
- body_data = None
- if body:
- try:
- body_data = jsonutils.loads(body)
- except Exception:
- # OK for it not to be json! Ignore it
- pass
-
- start_response(status, headers)
- if self.debug:
- if self.debug_env:
- print('%s: %s' % ('Response',
- jsonutils.dumps(
- body_data, sort_keys=True, indent=4)))
- return body
- return simple_server.make_server(self.host, self.port, app)
-
- def run(self):
- print("Serving on port %d ..." % self.port)
- try:
- self.server().serve_forever()
- except KeyboardInterrupt:
- pass
-
-
-if __name__ == "__main__":
- import sys
-
- port = 8899
- if len(sys.argv) > 1:
- port = int(sys.argv[1])
-
- debug = False
- if len(sys.argv) > 2:
- if sys.argv[2].lower() in ['debug', 'true']:
- debug = True
-
- ctrl = TestNetworkCtrl(port=port,
- default_status='200 OK',
- default_response='{"status":"200 OK"}',
- debug=debug)
- ctrl.match(100, 'GET', '/test',
- lambda m, u, b, **k: ('200 OK', '["200 OK"]'))
- ctrl.run()
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-version_info = {'branch_nick': u'neutron/trunk',
- 'revision_id': u'1',
- 'revno': 0}
-
-
-NEUTRONRESTPROXY_VERSION = ['2013', '1', None]
-
-
-FINAL = False # This becomes true at Release Candidate time
+++ /dev/null
-#!/usr/bin/env python
-# Copyright 2012 OpenStack Foundation
-# Copyright 2012, Big Switch Networks, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Determine version of NeutronRestProxy plugin"""
-from __future__ import print_function
-
-from neutron.plugins.bigswitch import vcsversion
-
-
-YEAR, COUNT, REVISION = vcsversion.NEUTRONRESTPROXY_VERSION
-
-
-def canonical_version_string():
- return '.'.join(filter(None,
- vcsversion.NEUTRONRESTPROXY_VERSION))
-
-
-def version_string():
- if vcsversion.FINAL:
- return canonical_version_string()
- else:
- return '%s-dev' % (canonical_version_string(),)
-
-
-def vcs_version_string():
- return "%s:%s" % (vcsversion.version_info['branch_nick'],
- vcsversion.version_info['revision_id'])
-
-
-def version_string_with_vcs():
- return "%s-%s" % (canonical_version_string(), vcs_version_string())
-
-
-if __name__ == "__main__":
- print(version_string_with_vcs())
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-import datetime
-import httplib
+from bsnstacklib.plugins.ml2.drivers.mech_bigswitch import driver
-import eventlet
-from oslo_config import cfg
-from oslo_utils import excutils
-from oslo_utils import timeutils
-from neutron import context as ctx
-from neutron.extensions import portbindings
-from neutron.i18n import _LE, _LW
-from neutron.openstack.common import log
-from neutron.plugins.bigswitch import config as pl_config
-from neutron.plugins.bigswitch import plugin
-from neutron.plugins.bigswitch import servermanager
-from neutron.plugins.common import constants as pconst
-from neutron.plugins.ml2 import driver_api as api
-
-
-EXTERNAL_PORT_OWNER = 'neutron:external_port'
-LOG = log.getLogger(__name__)
-put_context_in_serverpool = plugin.put_context_in_serverpool
-
-# time in seconds to maintain existence of vswitch response
-CACHE_VSWITCH_TIME = 60
-
-
-class BigSwitchMechanismDriver(plugin.NeutronRestProxyV2Base,
- api.MechanismDriver):
-
- """Mechanism Driver for Big Switch Networks Controller.
-
- This driver relays the network create, update, delete
- operations to the Big Switch Controller.
- """
-
- def initialize(self):
- LOG.debug('Initializing driver')
-
- # register plugin config opts
- pl_config.register_config()
- self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
-
- # init network ctrl connections
- self.servers = servermanager.ServerPool()
- self.servers.get_topo_function = self._get_all_data
- self.servers.get_topo_function_args = {'get_ports': True,
- 'get_floating_ips': False,
- 'get_routers': False}
- self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers)
- # Track hosts running IVS to avoid excessive calls to the backend
- self.ivs_host_cache = {}
-
- LOG.debug("Initialization done")
-
- @put_context_in_serverpool
- def create_network_postcommit(self, context):
- # create network on the network controller
- self._send_create_network(context.current)
-
- @put_context_in_serverpool
- def update_network_postcommit(self, context):
- # update network on the network controller
- self._send_update_network(context.current)
-
- @put_context_in_serverpool
- def delete_network_postcommit(self, context):
- # delete network on the network controller
- self._send_delete_network(context.current)
-
- @put_context_in_serverpool
- def create_port_postcommit(self, context):
- # create port on the network controller
- port = self._prepare_port_for_controller(context)
- if port:
- self.async_port_create(port["network"]["tenant_id"],
- port["network"]["id"], port)
-
- @put_context_in_serverpool
- def update_port_postcommit(self, context):
- # update port on the network controller
- port = self._prepare_port_for_controller(context)
- if port:
- try:
- self.async_port_create(port["network"]["tenant_id"],
- port["network"]["id"], port)
- except servermanager.RemoteRestError as e:
- with excutils.save_and_reraise_exception() as ctxt:
- if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
- e.status == httplib.NOT_FOUND and
- servermanager.NXNETWORK in e.reason):
- ctxt.reraise = False
- LOG.error(_LE("Inconsistency with backend controller "
- "triggering full synchronization."))
- topoargs = self.servers.get_topo_function_args
- self._send_all_data(
- send_ports=topoargs['get_ports'],
- send_floating_ips=topoargs['get_floating_ips'],
- send_routers=topoargs['get_routers'],
- triggered_by_tenant=port["network"]["tenant_id"]
- )
-
- @put_context_in_serverpool
- def delete_port_postcommit(self, context):
- # delete port on the network controller
- port = context.current
- net = context.network.current
- self.servers.rest_delete_port(net["tenant_id"], net["id"], port['id'])
-
- def _prepare_port_for_controller(self, context):
- # make a copy so the context isn't changed for other drivers
- port = copy.deepcopy(context.current)
- net = context.network.current
- port['network'] = net
- port['bound_segment'] = context.top_bound_segment
- actx = ctx.get_admin_context()
- prepped_port = self._extend_port_dict_binding(actx, port)
- prepped_port = self._map_state_and_status(prepped_port)
- if (portbindings.HOST_ID not in prepped_port or
- prepped_port[portbindings.HOST_ID] == ''):
- LOG.warning(_LW("Ignoring port notification to controller because "
- "of missing host ID."))
- # in ML2, controller doesn't care about ports without
- # the host_id set
- return False
- return prepped_port
-
- def bind_port(self, context):
- """Marks ports as bound.
-
- Binds external ports and IVS ports.
- Fabric configuration will occur on the subsequent port update.
- Currently only vlan segments are supported.
- """
- if context.current['device_owner'] == EXTERNAL_PORT_OWNER:
- # TODO(kevinbenton): check controller to see if the port exists
- # so this driver can be run in parallel with others that add
- # support for external port bindings
- for segment in context.segments_to_bind:
- if segment[api.NETWORK_TYPE] == pconst.TYPE_VLAN:
- context.set_binding(
- segment[api.ID], portbindings.VIF_TYPE_BRIDGE,
- {portbindings.CAP_PORT_FILTER: False,
- portbindings.OVS_HYBRID_PLUG: False})
- return
-
- # IVS hosts will have a vswitch with the same name as the hostname
- if self.does_vswitch_exist(context.host):
- for segment in context.segments_to_bind:
- if segment[api.NETWORK_TYPE] == pconst.TYPE_VLAN:
- context.set_binding(
- segment[api.ID], portbindings.VIF_TYPE_IVS,
- {portbindings.CAP_PORT_FILTER: True,
- portbindings.OVS_HYBRID_PLUG: False})
-
- def does_vswitch_exist(self, host):
- """Check if Indigo vswitch exists with the given hostname.
-
- Returns True if switch exists on backend.
- Returns False if switch does not exist.
- Returns None if backend could not be reached.
- Caches response from backend.
- """
- try:
- return self._get_cached_vswitch_existence(host)
- except ValueError:
- # cache was empty for that switch or expired
- pass
-
- try:
- exists = bool(self.servers.rest_get_switch(host))
- except servermanager.RemoteRestError:
- # Connectivity or internal server error. Skip cache to try again on
- # next binding attempt
- return
- self.ivs_host_cache[host] = {
- 'timestamp': datetime.datetime.now(),
- 'exists': exists
- }
- return exists
-
- def _get_cached_vswitch_existence(self, host):
- """Returns cached existence. Old and non-cached raise ValueError."""
- entry = self.ivs_host_cache.get(host)
- if not entry:
- raise ValueError(_('No cache entry for host %s') % host)
- diff = timeutils.delta_seconds(entry['timestamp'],
- datetime.datetime.now())
- if diff > CACHE_VSWITCH_TIME:
- self.ivs_host_cache.pop(host)
- raise ValueError(_('Expired cache entry for host %s') % host)
- return entry['exists']
+BigSwitchMechanismDriver = driver.BigSwitchMechanismDriver
--- /dev/null
+bsnstacklib>=2015.1,<2015.2
+++ /dev/null
-# Test config file for quantum-proxy-plugin.
-[DEFAULT]
-service_plugins = bigswitch_l3
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main quantum server. (Leave it as is if the database runs on this host.)
-connection = sqlite://
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-# Database reconnection interval in seconds - in event connectivity is lost
-retry_interval = 2
-
-[restproxy]
-# All configuration for this plugin is in section '[restproxy]'
-#
-# The following parameters are supported:
-# servers : <host:port>[,<host:port>]* (Error if not set)
-# serverauth : <username:password> (default: no auth)
-# serverssl : True | False (default: False)
-#
-servers=localhost:9000,localhost:8899
-serverssl=False
-#serverauth=username:password
-
-[nova]
-# Specify the VIF_TYPE that will be controlled on the Nova compute instances
-# options: ivs or ovs
-# default: ovs
-vif_type = ovs
-# Overrides for vif types based on nova compute node host IDs
-# Comma separated list of host IDs to fix to a specific VIF type
-node_override_vif_ivs = ivshost
-
-[router]
-# Specify the default router rules installed in newly created tenant routers
-# Specify multiple times for multiple rules
-# Use an * to specify default for all tenants
-# Default is any any allow for all tenants
-#tenant_default_router_rule=*:any:any:permit
-# Maximum number of rules that a single router may have
-max_router_rules=200
+++ /dev/null
-ca_certs directory for SSL unit tests
-No files will be generated here, but it should exist for the tests
+++ /dev/null
-combined certificates directory for SSL unit tests
-No files will be created here, but it should exist for the tests
+++ /dev/null
-host_certs directory for SSL unit tests
-No files will be created here, but it should exist for the tests
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.bigswitch import servermanager
-
-LOG = logging.getLogger(__name__)
-
-
-class HTTPResponseMock(object):
- status = 200
- reason = 'OK'
-
- def __init__(self, sock, debuglevel=0, strict=0, method=None,
- buffering=False):
- pass
-
- def read(self):
- return "{'status': '200 OK'}"
-
- def getheader(self, header):
- return None
-
-
-class HTTPResponseMock404(HTTPResponseMock):
- status = 404
- reason = 'Not Found'
-
- def read(self):
- return "{'status': '%s 404 Not Found'}" % servermanager.NXNETWORK
-
-
-class HTTPResponseMock500(HTTPResponseMock):
- status = 500
- reason = 'Internal Server Error'
-
- def __init__(self, sock, debuglevel=0, strict=0, method=None,
- buffering=False, errmsg='500 Internal Server Error'):
- self.errmsg = errmsg
-
- def read(self):
- return "{'status': '%s'}" % self.errmsg
-
-
-class HTTPConnectionMock(object):
-
- def __init__(self, server, port, timeout):
- self.response = None
- self.broken = False
- # Port 9000 is the broken server
- if port == 9000:
- self.broken = True
- errmsg = "This server is broken, please try another"
- self.response = HTTPResponseMock500(None, errmsg=errmsg)
-
- def request(self, action, uri, body, headers):
- LOG.debug("Request: action=%(action)s, uri=%(uri)r, "
- "body=%(body)s, headers=%(headers)s",
- {'action': action, 'uri': uri,
- 'body': body, 'headers': headers})
- if self.broken and "ExceptOnBadServer" in uri:
- raise Exception("Broken server got an unexpected request")
- if self.response:
- return
-
- # detachment may return 404 and plugin shouldn't die
- if uri.endswith('attachment') and action == 'DELETE':
- self.response = HTTPResponseMock404(None)
- else:
- self.response = HTTPResponseMock(None)
-
- # Port creations/updates must contain binding information
- if ('port' in uri and 'attachment' not in uri
- and 'binding' not in body and action in ('POST', 'PUT')):
- errmsg = "Port binding info missing in port request '%s'" % body
- self.response = HTTPResponseMock500(None, errmsg=errmsg)
- return
-
- return
-
- def getresponse(self):
- return self.response
-
- def close(self):
- pass
-
-
-class HTTPConnectionMock404(HTTPConnectionMock):
-
- def __init__(self, server, port, timeout):
- self.response = HTTPResponseMock404(None)
- self.broken = True
-
-
-class HTTPConnectionMock500(HTTPConnectionMock):
-
- def __init__(self, server, port, timeout):
- self.response = HTTPResponseMock500(None)
- self.broken = True
-
-
-class VerifyMultiTenantFloatingIP(HTTPConnectionMock):
-
- def request(self, action, uri, body, headers):
- # Only handle network update requests
- if 'network' in uri and 'tenant' in uri and 'ports' not in uri:
- req = jsonutils.loads(body)
- if 'network' not in req or 'floatingips' not in req['network']:
- msg = _("No floating IPs in request"
- "uri=%(uri)s, body=%(body)s") % {'uri': uri,
- 'body': body}
- raise Exception(msg)
- distinct_tenants = []
- for flip in req['network']['floatingips']:
- if flip['tenant_id'] not in distinct_tenants:
- distinct_tenants.append(flip['tenant_id'])
- if len(distinct_tenants) < 2:
- msg = _("Expected floating IPs from multiple tenants."
- "uri=%(uri)s, body=%(body)s") % {'uri': uri,
- 'body': body}
- raise Exception(msg)
- super(VerifyMultiTenantFloatingIP,
- self).request(action, uri, body, headers)
-
-
-class HTTPSMockBase(HTTPConnectionMock):
- expected_cert = ''
- combined_cert = None
-
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=None, source_address=None):
- self.host = host
- super(HTTPSMockBase, self).__init__(host, port, timeout)
-
- def request(self, method, url, body=None, headers={}):
- self.connect()
- super(HTTPSMockBase, self).request(method, url, body, headers)
-
-
-class HTTPSNoValidation(HTTPSMockBase):
-
- def connect(self):
- if self.combined_cert:
- raise Exception('combined_cert set on NoValidation')
-
-
-class HTTPSCAValidation(HTTPSMockBase):
- expected_cert = 'DUMMYCERTIFICATEAUTHORITY'
-
- def connect(self):
- contents = get_cert_contents(self.combined_cert)
- if self.expected_cert not in contents:
- raise Exception('No dummy CA cert in cert_file')
-
-
-class HTTPSHostValidation(HTTPSMockBase):
- expected_cert = 'DUMMYCERTFORHOST%s'
-
- def connect(self):
- contents = get_cert_contents(self.combined_cert)
- expected = self.expected_cert % self.host
- if expected not in contents:
- raise Exception(_('No host cert for %(server)s in cert %(cert)s'),
- {'server': self.host, 'cert': contents})
-
-
-def get_cert_contents(path):
- raise Exception('METHOD MUST BE MOCKED FOR TEST')
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from neutron.tests.unit.bigswitch import test_base
-from neutron.tests.unit.openvswitch import test_agent_scheduler
-
-
-class BigSwitchDhcpAgentNotifierTestCase(
- test_agent_scheduler.OvsDhcpAgentNotifierTestCase,
- test_base.BigSwitchTestBase):
-
- plugin_str = ('%s.NeutronRestProxyV2' %
- test_base.RESTPROXY_PKG_PATH)
-
- def setUp(self):
- self.setup_config_files()
- self.setup_patches()
- super(BigSwitchDhcpAgentNotifierTestCase, self).setUp()
- self.setup_db()
- self.startHttpPatch()
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import mock
-from oslo_config import cfg
-
-import neutron.common.test_lib as test_lib
-from neutron.plugins.bigswitch import config
-from neutron.plugins.bigswitch.db import consistency_db
-from neutron.tests.unit.bigswitch import fake_server
-
-
-RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.plugin'
-L3_RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.l3_router_plugin'
-NOTIFIER = 'neutron.plugins.bigswitch.plugin.AgentNotifierApi'
-CERTFETCH = 'neutron.plugins.bigswitch.servermanager.ServerPool._fetch_cert'
-SERVER_MANAGER = 'neutron.plugins.bigswitch.servermanager'
-HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
-SPAWN = 'neutron.plugins.bigswitch.plugin.eventlet.GreenPool.spawn_n'
-CWATCH = SERVER_MANAGER + '.ServerPool._consistency_watchdog'
-
-
-class BigSwitchTestBase(object):
-
- _plugin_name = ('%s.NeutronRestProxyV2' % RESTPROXY_PKG_PATH)
- _l3_plugin_name = ('%s.L3RestProxy' % L3_RESTPROXY_PKG_PATH)
-
- def setup_config_files(self):
- etc_path = os.path.join(os.path.dirname(__file__), 'etc')
- test_lib.test_config['config_files'] = [os.path.join(etc_path,
- 'restproxy.ini.test')]
- self.addCleanup(cfg.CONF.reset)
- self.addCleanup(consistency_db.clear_db)
- config.register_config()
- # Only try SSL on SSL tests
- cfg.CONF.set_override('server_ssl', False, 'RESTPROXY')
- cfg.CONF.set_override('ssl_cert_directory',
- os.path.join(etc_path, 'ssl'), 'RESTPROXY')
- # The mock interferes with HTTP(S) connection caching
- cfg.CONF.set_override('cache_connections', False, 'RESTPROXY')
- cfg.CONF.set_override('service_plugins', ['bigswitch_l3'])
- cfg.CONF.set_override('add_meta_server_route', False, 'RESTPROXY')
-
- def setup_patches(self):
- self.dhcp_periodic_p = mock.patch(
- 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
- 'start_periodic_dhcp_agent_status_check')
- self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
- self.plugin_notifier_p = mock.patch(NOTIFIER)
- # prevent any greenthreads from spawning
- self.spawn_p = mock.patch(SPAWN, new=lambda *args, **kwargs: None)
- # prevent the consistency watchdog from starting
- self.watch_p = mock.patch(CWATCH, new=lambda *args, **kwargs: None)
- # disable exception log to prevent json parse error from showing
- self.log_exc_p = mock.patch(SERVER_MANAGER + ".LOG.exception",
- new=lambda *args, **kwargs: None)
- self.log_exc_p.start()
- self.plugin_notifier_p.start()
- self.spawn_p.start()
- self.watch_p.start()
-
- def startHttpPatch(self):
- self.httpPatch = mock.patch(HTTPCON,
- new=fake_server.HTTPConnectionMock)
- self.httpPatch.start()
-
- def setup_db(self):
- # setup the db engine and models for the consistency db
- consistency_db.setup_db()
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-import mock
-
-from neutron.tests.unit.bigswitch import test_router_db
-
-PLUGIN = 'neutron.plugins.bigswitch.plugin'
-SERVERMANAGER = PLUGIN + '.servermanager'
-SERVERPOOL = SERVERMANAGER + '.ServerPool'
-SERVERRESTCALL = SERVERMANAGER + '.ServerProxy.rest_call'
-HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection'
-
-
-class CapabilitiesTests(test_router_db.RouterDBTestBase):
-
- def test_floating_ip_capability(self):
- with contextlib.nested(
- mock.patch(SERVERRESTCALL,
- return_value=(200, None, '["floatingip"]', None)),
- mock.patch(SERVERPOOL + '.rest_create_floatingip',
- return_value=(200, None, None, None)),
- mock.patch(SERVERPOOL + '.rest_delete_floatingip',
- return_value=(200, None, None, None))
- ) as (mock_rest, mock_create, mock_delete):
- with self.floatingip_with_assoc() as fip:
- pass
- mock_create.assert_has_calls(
- [mock.call(fip['floatingip']['tenant_id'], fip['floatingip'])]
- )
- mock_delete.assert_has_calls(
- [mock.call(fip['floatingip']['tenant_id'],
- fip['floatingip']['id'])]
- )
-
- def test_floating_ip_capability_neg(self):
- with contextlib.nested(
- mock.patch(SERVERRESTCALL,
- return_value=(200, None, '[""]', None)),
- mock.patch(SERVERPOOL + '.rest_update_network',
- return_value=(200, None, None, None))
- ) as (mock_rest, mock_netupdate):
- with self.floatingip_with_assoc() as fip:
- pass
- updates = [call[0][2]['floatingips']
- for call in mock_netupdate.call_args_list]
- all_floats = [f['floating_ip_address']
- for floats in updates for f in floats]
- self.assertIn(fip['floatingip']['floating_ip_address'], all_floats)
-
- def test_keep_alive_capability(self):
- with mock.patch(
- SERVERRESTCALL, return_value=(200, None, '["keep-alive"]', None)
- ):
- # perform a task to cause capabilities to be retrieved
- with self.floatingip_with_assoc():
- pass
- # stop default HTTP patch since we need a magicmock
- self.httpPatch.stop()
- # now mock HTTP class instead of REST so we can see headers
- conmock = mock.patch(HTTPCON).start()
- instance = conmock.return_value
- instance.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- with self.network():
- callheaders = instance.request.mock_calls[0][1][3]
- self.assertIn('Connection', callheaders)
- self.assertEqual(callheaders['Connection'], 'keep-alive')
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-from oslo_utils import importutils
-
-from neutron.tests import base
-
-OVSBRIDGE = 'neutron.agent.linux.ovs_lib.OVSBridge'
-PLUGINAPI = 'neutron.agent.rpc.PluginApi'
-CONTEXT = 'neutron.context'
-CONSUMERCREATE = 'neutron.agent.rpc.create_consumers'
-SGRPC = 'neutron.agent.securitygroups_rpc'
-SGAGENT = 'neutron.agent.securitygroups_rpc.SecurityGroupAgentRpc'
-AGENTMOD = 'neutron.plugins.bigswitch.agent.restproxy_agent'
-NEUTRONCFG = 'neutron.common.config'
-PLCONFIG = 'neutron.plugins.bigswitch.config'
-
-
-class BaseAgentTestCase(base.BaseTestCase):
-
- def setUp(self):
- super(BaseAgentTestCase, self).setUp()
- self.mod_agent = importutils.import_module(AGENTMOD)
-
-
-class TestRestProxyAgentOVS(BaseAgentTestCase):
- def setUp(self):
- super(TestRestProxyAgentOVS, self).setUp()
- self.plapi = mock.patch(PLUGINAPI).start()
- self.ovsbridge = mock.patch(OVSBRIDGE).start()
- self.context = mock.patch(CONTEXT).start()
- self.rpc = mock.patch(CONSUMERCREATE).start()
- self.sg_agent = mock.patch(SGAGENT).start()
- self.sg_rpc = mock.patch(SGRPC).start()
-
- def mock_agent(self):
- mock_context = mock.Mock(return_value='abc')
- self.context.get_admin_context_without_session = mock_context
- return self.mod_agent.RestProxyAgent('int-br', 2)
-
- def mock_port_update(self, **kwargs):
- agent = self.mock_agent()
- agent.port_update(mock.Mock(), **kwargs)
-
- def test_port_update(self):
- port = {'id': 1, 'security_groups': 'default'}
-
- with mock.patch.object(self.ovsbridge.return_value,
- 'get_vif_port_by_id',
- return_value=1) as get_vif:
- self.mock_port_update(port=port)
-
- get_vif.assert_called_once_with(1)
- self.sg_agent.assert_has_calls([
- mock.call().refresh_firewall()
- ])
-
- def test_port_update_not_vifport(self):
- port = {'id': 1, 'security_groups': 'default'}
-
- with mock.patch.object(self.ovsbridge.return_value,
- 'get_vif_port_by_id',
- return_value=0) as get_vif:
- self.mock_port_update(port=port)
-
- get_vif.assert_called_once_with(1)
- self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
-
- def test_port_update_without_secgroup(self):
- port = {'id': 1}
-
- with mock.patch.object(self.ovsbridge.return_value,
- 'get_vif_port_by_id',
- return_value=1) as get_vif:
- self.mock_port_update(port=port)
-
- get_vif.assert_called_once_with(1)
- self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
-
- def mock_update_ports(self, vif_port_set=None, registered_ports=None):
- with mock.patch.object(self.ovsbridge.return_value,
- 'get_vif_port_set',
- return_value=vif_port_set):
- agent = self.mock_agent()
- return agent._update_ports(registered_ports)
-
- def test_update_ports_unchanged(self):
- self.assertIsNone(self.mock_update_ports())
-
- def test_update_ports_changed(self):
- vif_port_set = set([1, 3])
- registered_ports = set([1, 2])
- expected = dict(current=vif_port_set,
- added=set([3]),
- removed=set([2]))
-
- actual = self.mock_update_ports(vif_port_set, registered_ports)
-
- self.assertEqual(expected, actual)
-
- def mock_process_devices_filter(self, port_info):
- agent = self.mock_agent()
- agent._process_devices_filter(port_info)
-
- def test_process_devices_filter_add(self):
- port_info = {'added': 1}
-
- self.mock_process_devices_filter(port_info)
-
- self.sg_agent.assert_has_calls([
- mock.call().prepare_devices_filter(1)
- ])
-
- def test_process_devices_filter_remove(self):
- port_info = {'removed': 2}
-
- self.mock_process_devices_filter(port_info)
-
- self.sg_agent.assert_has_calls([
- mock.call().remove_devices_filter(2)
- ])
-
- def test_process_devices_filter_both(self):
- port_info = {'added': 1, 'removed': 2}
-
- self.mock_process_devices_filter(port_info)
-
- self.sg_agent.assert_has_calls([
- mock.call().prepare_devices_filter(1),
- mock.call().remove_devices_filter(2)
- ])
-
- def test_process_devices_filter_none(self):
- port_info = {}
-
- self.mock_process_devices_filter(port_info)
-
- self.assertFalse(
- self.sg_agent.return_value.prepare_devices_filter.called)
- self.assertFalse(
- self.sg_agent.return_value.remove_devices_filter.called)
-
-
-class TestRestProxyAgent(BaseAgentTestCase):
- def mock_main(self):
- cfg_attrs = {'CONF.RESTPROXYAGENT.integration_bridge': 'integ_br',
- 'CONF.RESTPROXYAGENT.polling_interval': 5,
- 'CONF.RESTPROXYAGENT.virtual_switch_type': 'ovs'}
- with contextlib.nested(
- mock.patch(AGENTMOD + '.cfg', **cfg_attrs),
- mock.patch(AGENTMOD + '.config.init'),
- mock.patch(NEUTRONCFG),
- mock.patch(PLCONFIG),
- ) as (mock_conf, mock_init, mock_log_conf, mock_pluginconf):
- self.mod_agent.main()
-
- mock_log_conf.assert_has_calls([
- mock.call(mock_conf),
- ])
-
- def test_main(self):
- agent_attrs = {'daemon_loop.side_effect': SystemExit(0)}
- with mock.patch(AGENTMOD + '.RestProxyAgent',
- **agent_attrs) as mock_agent:
- self.assertRaises(SystemExit, self.mock_main)
-
- mock_agent.assert_has_calls([
- mock.call('integ_br', 5, 'ovs'),
- mock.call().daemon_loop()
- ])
+++ /dev/null
-# Copyright 2012 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-import mock
-from oslo_config import cfg
-import webob.exc
-
-from neutron.common import constants
-from neutron import context
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.tests.unit import _test_extension_portbindings as test_bindings
-from neutron.tests.unit.bigswitch import fake_server
-from neutron.tests.unit.bigswitch import test_base
-from neutron.tests.unit import test_api_v2
-import neutron.tests.unit.test_db_plugin as test_plugin
-import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
-
-patch = mock.patch
-HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
-
-
-class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase,
- test_plugin.NeutronDbPluginV2TestCase):
-
- def setUp(self, plugin_name=None):
- if hasattr(self, 'HAS_PORT_FILTER'):
- cfg.CONF.set_override(
- 'enable_security_group', self.HAS_PORT_FILTER, 'SECURITYGROUP')
- self.setup_config_files()
- self.setup_patches()
- if plugin_name:
- self._plugin_name = plugin_name
- service_plugins = {'L3_ROUTER_NAT': self._l3_plugin_name}
- super(BigSwitchProxyPluginV2TestCase,
- self).setUp(self._plugin_name, service_plugins=service_plugins)
- self.setup_db()
- self.port_create_status = 'BUILD'
- self.startHttpPatch()
-
-
-class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet,
- BigSwitchProxyPluginV2TestCase):
-
- pass
-
-
-class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse,
- BigSwitchProxyPluginV2TestCase):
-
- def test_failover_memory(self):
- # first request causes failover so next shouldn't hit bad server
- with self.network() as net:
- kwargs = {'tenant_id': 'ExceptOnBadServer'}
- with self.network(**kwargs) as net:
- req = self.new_show_request('networks', net['network']['id'])
- res = req.get_response(self.api)
- self.assertEqual(res.status_int, 200)
-
-
-class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2,
- BigSwitchProxyPluginV2TestCase,
- test_bindings.PortBindingsTestCase):
-
- VIF_TYPE = portbindings.VIF_TYPE_OVS
- HAS_PORT_FILTER = False
-
- def setUp(self, plugin_name=None):
- super(TestBigSwitchProxyPortsV2,
- self).setUp(self._plugin_name)
-
- def test_get_ports_no_id(self):
- with self.port(name='test'):
- ports = manager.NeutronManager.get_plugin().get_ports(
- context.get_admin_context(), fields=['name'])
- self.assertEqual(['name'], ports[0].keys())
-
- def test_router_port_status_active(self):
- # router ports screw up port auto-deletion so it has to be
- # disabled for this test
- with self.network() as net:
- with self.subnet(network=net) as sub:
- with self.port(
- subnet=sub,
- device_owner=constants.DEVICE_OWNER_ROUTER_INTF
- ) as port:
- # router ports should be immediately active
- self.assertEqual(port['port']['status'], 'ACTIVE')
-
- def test_update_port_status_build(self):
- # normal ports go into the pending build state for async creation
- with self.port() as port:
- self.assertEqual(port['port']['status'], 'BUILD')
- self.assertEqual(self.port_create_status, 'BUILD')
-
- def _get_ports(self, netid):
- return self.deserialize('json',
- self._list_ports('json', netid=netid))['ports']
-
- def test_rollback_for_port_create(self):
- plugin = manager.NeutronManager.get_plugin()
- with self.subnet() as s:
- # stop normal patch
- self.httpPatch.stop()
- # allow thread spawns for this test
- self.spawn_p.stop()
- kwargs = {'device_id': 'somedevid'}
- # put in a broken 'server'
- httpPatch = patch(HTTPCON, new=fake_server.HTTPConnectionMock500)
- httpPatch.start()
- with self.port(subnet=s, **kwargs):
- # wait for async port create request to finish
- plugin.evpool.waitall()
- # put good 'server' back in
- httpPatch.stop()
- self.httpPatch.start()
- ports = self._get_ports(s['subnet']['network_id'])
- #failure to create should result in port in error state
- self.assertEqual(ports[0]['status'], 'ERROR')
-
- def test_rollback_for_port_update(self):
- with self.network() as n:
- with self.port(network_id=n['network']['id'],
- device_id='66') as port:
- port = self._get_ports(n['network']['id'])[0]
- data = {'port': {'name': 'aNewName', 'device_id': '99'}}
- # stop normal patch
- self.httpPatch.stop()
- with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self.new_update_request(
- 'ports', data, port['id']).get_response(self.api)
- self.httpPatch.start()
- uport = self._get_ports(n['network']['id'])[0]
- # name should have stayed the same
- self.assertEqual(port['name'], uport['name'])
-
- def test_rollback_for_port_delete(self):
- with self.network() as n:
- with self.port(network_id=n['network']['id'],
- device_id='somedevid') as port:
- # stop normal patch
- self.httpPatch.stop()
- with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self._delete(
- 'ports',
- port['port']['id'],
- expected_code=webob.exc.HTTPInternalServerError.code)
- self.httpPatch.start()
- port = self._get_ports(n['network']['id'])[0]
- self.assertEqual('BUILD', port['status'])
-
- def test_correct_shared_net_tenant_id(self):
- # tenant_id in port requests should match network tenant_id instead
- # of port tenant_id
- def rest_port_op(self, ten_id, netid, port):
- if ten_id != 'SHARED':
- raise Exception('expecting tenant_id SHARED. got %s' % ten_id)
- with self.network(tenant_id='SHARED', shared=True) as net:
- with self.subnet(network=net) as sub:
- pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s'
- tomock = [pref % 'rest_create_port',
- pref % 'rest_update_port',
- pref % 'rest_delete_port']
- patches = [patch(f, create=True, new=rest_port_op)
- for f in tomock]
- for restp in patches:
- restp.start()
- with self.port(subnet=sub, tenant_id='port-owner') as port:
- data = {'port': {'binding:host_id': 'someotherhost',
- 'device_id': 'override_dev'}}
- req = self.new_update_request('ports', data,
- port['port']['id'])
- res = req.get_response(self.api)
- self.assertEqual(res.status_int, 200)
-
- def test_create404_triggers_sync(self):
- # allow async port thread for this patch
- self.spawn_p.stop()
- with contextlib.nested(
- self.subnet(),
- patch(HTTPCON, create=True,
- new=fake_server.HTTPConnectionMock404),
- patch(test_base.RESTPROXY_PKG_PATH
- + '.NeutronRestProxyV2._send_all_data')
- ) as (s, mock_http, mock_send_all):
- with self.port(subnet=s, device_id='somedevid') as p:
- # wait for the async port thread to finish
- plugin = manager.NeutronManager.get_plugin()
- plugin.evpool.waitall()
- call = mock.call(
- send_routers=True, send_ports=True, send_floating_ips=True,
- triggered_by_tenant=p['port']['tenant_id']
- )
- mock_send_all.assert_has_calls([call])
- self.spawn_p.start()
-
- def test_port_vif_details_default(self):
- kwargs = {'name': 'name', 'device_id': 'override_dev'}
- with self.port(**kwargs) as port:
- self.assertEqual(port['port']['binding:vif_type'],
- portbindings.VIF_TYPE_OVS)
-
- def test_port_vif_details_override(self):
- # ivshost is in the test config to override to IVS
- kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
- 'device_id': 'override_dev',
- 'arg_list': ('binding:host_id',)}
- with self.port(**kwargs) as port:
- self.assertEqual(port['port']['binding:vif_type'],
- portbindings.VIF_TYPE_IVS)
- self._delete('ports', port['port']['id'])
- self._delete('networks', port['port']['network_id'])
- kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost',
- 'device_id': 'other_dev'}
- with self.port(**kwargs) as port:
- self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE)
-
- def test_port_move(self):
- # ivshost is in the test config to override to IVS
- kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
- 'device_id': 'override_dev'}
- with self.port(**kwargs) as port:
- data = {'port': {'binding:host_id': 'someotherhost',
- 'device_id': 'override_dev'}}
- req = self.new_update_request('ports', data, port['port']['id'])
- res = self.deserialize(self.fmt, req.get_response(self.api))
- self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE)
-
-
-class TestVifDifferentDefault(BigSwitchProxyPluginV2TestCase):
-
- def setup_config_files(self):
- super(TestVifDifferentDefault, self).setup_config_files()
- cfg.CONF.set_override('vif_type', 'ivs', 'NOVA')
-
- def test_default_viftype(self):
- with self.port() as port:
- self.assertEqual(port['port']['binding:vif_type'], 'ivs')
-
-
-class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2,
- BigSwitchProxyPluginV2TestCase):
-
- def _get_networks(self, tenant_id):
- ctx = context.Context('', tenant_id)
- return manager.NeutronManager.get_plugin().get_networks(ctx)
-
- def test_rollback_on_network_create(self):
- tid = test_api_v2._uuid()
- kwargs = {'tenant_id': tid}
- self.httpPatch.stop()
- with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self._create_network('json', 'netname', True, **kwargs)
- self.httpPatch.start()
- self.assertFalse(self._get_networks(tid))
-
- def test_rollback_on_network_update(self):
- with self.network() as n:
- data = {'network': {'name': 'aNewName'}}
- self.httpPatch.stop()
- with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self.new_update_request(
- 'networks', data, n['network']['id']
- ).get_response(self.api)
- self.httpPatch.start()
- updatedn = self._get_networks(n['network']['tenant_id'])[0]
- # name should have stayed the same due to failure
- self.assertEqual(n['network']['name'], updatedn['name'])
-
- def test_rollback_on_network_delete(self):
- with self.network() as n:
- self.httpPatch.stop()
- with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self._delete(
- 'networks', n['network']['id'],
- expected_code=webob.exc.HTTPInternalServerError.code)
- self.httpPatch.start()
- # network should still exist in db
- self.assertEqual(n['network']['id'],
- self._get_networks(n['network']['tenant_id']
- )[0]['id'])
-
- def test_notify_on_security_group_change(self):
- plugin = manager.NeutronManager.get_plugin()
- with self.port() as p:
- with contextlib.nested(
- mock.patch.object(plugin, 'notifier'),
- mock.patch.object(plugin, 'is_security_group_member_updated',
- return_value=True)
- ) as (n_mock, s_mock):
- # any port update should trigger a notification due to s_mock
- data = {'port': {'name': 'aNewName'}}
- self.new_update_request(
- 'ports', data, p['port']['id']).get_response(self.api)
- self.assertTrue(n_mock.port_update.called)
-
-
-class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2,
- BigSwitchProxyPluginV2TestCase):
-
- pass
-
-
-class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase):
-
- def test_send_data(self):
- plugin_obj = manager.NeutronManager.get_plugin()
- result = plugin_obj._send_all_data()
- self.assertEqual(result[0], 200)
-
-
-class TestBigSwitchAddressPairs(test_addr_pair.TestAllowedAddressPairs,
- BigSwitchProxyPluginV2TestCase):
- pass
+++ /dev/null
-# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# Adapted from neutron.tests.unit.test_l3_plugin
-
-import contextlib
-import copy
-
-import mock
-from oslo_config import cfg
-from six import moves
-from webob import exc
-
-from neutron.common import test_lib
-from neutron import context
-from neutron.extensions import l3
-from neutron import manager
-from neutron.openstack.common import uuidutils
-from neutron.plugins.bigswitch.extensions import routerrule
-from neutron.tests.unit.bigswitch import fake_server
-from neutron.tests.unit.bigswitch import test_base
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_extension_extradhcpopts as test_extradhcp
-from neutron.tests.unit import test_l3_plugin
-
-
-HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
-_uuid = uuidutils.generate_uuid
-
-
-class RouterRulesTestExtensionManager(object):
-
- def get_resources(self):
- l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
- routerrule.EXTENDED_ATTRIBUTES_2_0['routers'])
- return l3.L3.get_resources()
-
- def get_actions(self):
- return []
-
- def get_request_extensions(self):
- return []
-
-
-class DHCPOptsTestCase(test_base.BigSwitchTestBase,
- test_extradhcp.TestExtraDhcpOpt):
-
- def setUp(self, plugin=None):
- self.setup_patches()
- self.setup_config_files()
- super(test_extradhcp.ExtraDhcpOptDBTestCase,
- self).setUp(plugin=self._plugin_name)
- self.setup_db()
- self.startHttpPatch()
-
-
-class RouterDBTestBase(test_base.BigSwitchTestBase,
- test_l3_plugin.L3BaseForIntTests,
- test_l3_plugin.L3NatTestCaseMixin):
-
- mock_rescheduling = False
-
- def setUp(self):
- self.setup_patches()
- self.setup_config_files()
- ext_mgr = RouterRulesTestExtensionManager()
- service_plugins = {'L3_ROUTER_NAT': self._l3_plugin_name}
- super(RouterDBTestBase, self).setUp(plugin=self._plugin_name,
- ext_mgr=ext_mgr,
- service_plugins=service_plugins)
- self.setup_db()
- cfg.CONF.set_default('allow_overlapping_ips', False)
- self.plugin_obj = manager.NeutronManager.get_service_plugins().get(
- 'L3_ROUTER_NAT')
- self.startHttpPatch()
-
- def tearDown(self):
- super(RouterDBTestBase, self).tearDown()
- del test_lib.test_config['config_files']
-
-
-class RouterDBTestCase(RouterDBTestBase,
- test_l3_plugin.L3NatDBIntTestCase):
-
- def test_router_remove_router_interface_wrong_subnet_returns_400(self):
- with self.router() as r:
- with self.subnet() as s:
- with self.subnet(cidr='10.0.10.0/24') as s1:
- with self.port(subnet=s1) as p:
- self._router_interface_action('add',
- r['router']['id'],
- None,
- p['port']['id'])
- self._router_interface_action('remove',
- r['router']['id'],
- s['subnet']['id'],
- p['port']['id'],
- exc.HTTPBadRequest.code)
- #remove properly to clean-up
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p['port']['id'])
-
- def test_router_remove_router_interface_wrong_port_returns_404(self):
- with self.router() as r:
- with self.subnet() as s:
- with self.port(subnet=s) as p:
- self._router_interface_action('add',
- r['router']['id'],
- None,
- p['port']['id'])
- # create another port for testing failure case
- res = self._create_port('json', p['port']['network_id'])
- p2 = self.deserialize('json', res)
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p2['port']['id'],
- exc.HTTPNotFound.code)
- # remove correct interface to cleanup
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p['port']['id'])
- # remove extra port created
- self._delete('ports', p2['port']['id'])
-
- def test_add_network_to_ext_gw_backend_body(self):
- plugin_obj = manager.NeutronManager.get_plugin()
- with contextlib.nested(
- self.network(), self.router()
- ) as (n1, r1):
- with self.subnet(network=n1, cidr='10.10.10.10/24') as s1:
- self._set_net_external(s1['subnet']['network_id'])
- with mock.patch.object(plugin_obj.servers,
- 'rest_update_router') as upmock:
- self._add_external_gateway_to_router(r1['router']['id'],
- n1['network']['id'])
- router_body = upmock.mock_calls[0][1][1]
- self.assertEqual(
- plugin_obj.get_network(context.get_admin_context(),
- n1['network']['id']),
- router_body['external_gateway_info']['network'])
-
- def test_multi_tenant_flip_alllocation(self):
- tenant1_id = _uuid()
- tenant2_id = _uuid()
- with contextlib.nested(
- self.network(tenant_id=tenant1_id),
- self.network(tenant_id=tenant2_id)) as (n1, n2):
- with contextlib.nested(
- self.subnet(network=n1, cidr='11.0.0.0/24'),
- self.subnet(network=n2, cidr='12.0.0.0/24'),
- self.subnet(cidr='13.0.0.0/24')) as (s1, s2, psub):
- with contextlib.nested(
- self.router(tenant_id=tenant1_id),
- self.router(tenant_id=tenant2_id),
- self.port(subnet=s1, tenant_id=tenant1_id),
- self.port(subnet=s2, tenant_id=tenant2_id)) as (r1, r2,
- p1, p2):
- self._set_net_external(psub['subnet']['network_id'])
- s1id = p1['port']['fixed_ips'][0]['subnet_id']
- s2id = p2['port']['fixed_ips'][0]['subnet_id']
- s1 = {'subnet': {'id': s1id}}
- s2 = {'subnet': {'id': s2id}}
- self._add_external_gateway_to_router(
- r1['router']['id'],
- psub['subnet']['network_id'])
- self._add_external_gateway_to_router(
- r2['router']['id'],
- psub['subnet']['network_id'])
- self._router_interface_action(
- 'add', r1['router']['id'],
- s1['subnet']['id'], None)
- self._router_interface_action(
- 'add', r2['router']['id'],
- s2['subnet']['id'], None)
- fl1 = self._make_floatingip_for_tenant_port(
- net_id=psub['subnet']['network_id'],
- port_id=p1['port']['id'],
- tenant_id=tenant1_id)
- self.httpPatch.stop()
- multiFloatPatch = mock.patch(
- HTTPCON,
- new=fake_server.VerifyMultiTenantFloatingIP)
- multiFloatPatch.start()
- fl2 = self._make_floatingip_for_tenant_port(
- net_id=psub['subnet']['network_id'],
- port_id=p2['port']['id'],
- tenant_id=tenant2_id)
- multiFloatPatch.stop()
- self.httpPatch.start()
- self._delete('floatingips', fl1['floatingip']['id'])
- self._delete('floatingips', fl2['floatingip']['id'])
- self._router_interface_action(
- 'remove', r1['router']['id'],
- s1['subnet']['id'], None)
- self._router_interface_action(
- 'remove', r2['router']['id'],
- s2['subnet']['id'], None)
-
- def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
- data = {'floatingip': {'floating_network_id': net_id,
- 'tenant_id': tenant_id,
- 'port_id': port_id}}
- floatingip_req = self.new_create_request('floatingips', data, self.fmt)
- res = floatingip_req.get_response(self.ext_api)
- return self.deserialize(self.fmt, res)
-
- def test_floatingip_with_invalid_create_port(self):
- self._test_floatingip_with_invalid_create_port(
- 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2')
-
- def test_create_floatingip_no_ext_gateway_return_404(self):
- with self.subnet(cidr='10.0.10.0/24') as public_sub:
- self._set_net_external(public_sub['subnet']['network_id'])
- with self.port() as private_port:
- with self.router():
- res = self._create_floatingip(
- 'json',
- public_sub['subnet']['network_id'],
- port_id=private_port['port']['id'])
- self.assertEqual(res.status_int, exc.HTTPNotFound.code)
-
- def test_router_update_gateway(self):
- with self.router() as r:
- with self.subnet() as s1:
- with self.subnet(cidr='10.0.10.0/24') as s2:
- self._set_net_external(s1['subnet']['network_id'])
- self._add_external_gateway_to_router(
- r['router']['id'],
- s1['subnet']['network_id'])
- body = self._show('routers', r['router']['id'])
- net_id = (body['router']
- ['external_gateway_info']['network_id'])
- self.assertEqual(net_id, s1['subnet']['network_id'])
- self._set_net_external(s2['subnet']['network_id'])
- self._add_external_gateway_to_router(
- r['router']['id'],
- s2['subnet']['network_id'])
- body = self._show('routers', r['router']['id'])
- net_id = (body['router']
- ['external_gateway_info']['network_id'])
- self.assertEqual(net_id, s2['subnet']['network_id'])
- self._remove_external_gateway_from_router(
- r['router']['id'],
- s2['subnet']['network_id'])
-
- def test_router_add_interface_overlapped_cidr(self):
- self.skipTest("Plugin does not support")
-
- def test_router_add_interface_overlapped_cidr_returns_400(self):
- self.skipTest("Plugin does not support")
-
- def test_list_nets_external(self):
- self.skipTest("Plugin does not support")
-
- def test_router_update_gateway_with_existed_floatingip(self):
- with self.subnet(cidr='10.0.10.0/24') as subnet:
- self._set_net_external(subnet['subnet']['network_id'])
- with self.floatingip_with_assoc() as fip:
- self._add_external_gateway_to_router(
- fip['floatingip']['router_id'],
- subnet['subnet']['network_id'],
- expected_code=exc.HTTPConflict.code)
-
- def test_router_remove_interface_wrong_subnet_returns_400(self):
- with self.router() as r:
- with self.subnet(cidr='10.0.10.0/24') as s:
- with self.port() as p:
- self._router_interface_action('add',
- r['router']['id'],
- None,
- p['port']['id'])
- self._router_interface_action('remove',
- r['router']['id'],
- s['subnet']['id'],
- p['port']['id'],
- exc.HTTPBadRequest.code)
- #remove properly to clean-up
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p['port']['id'])
-
- def test_router_remove_interface_wrong_port_returns_404(self):
- with self.router() as r:
- with self.subnet(cidr='10.0.10.0/24'):
- with self.port() as p:
- self._router_interface_action('add',
- r['router']['id'],
- None,
- p['port']['id'])
- # create another port for testing failure case
- res = self._create_port('json', p['port']['network_id'])
- p2 = self.deserialize('json', res)
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p2['port']['id'],
- exc.HTTPNotFound.code)
- # remove correct interface to cleanup
- self._router_interface_action('remove',
- r['router']['id'],
- None,
- p['port']['id'])
- # remove extra port created
- self._delete('ports', p2['port']['id'])
-
- def test_send_data(self):
- fmt = 'json'
- plugin_obj = manager.NeutronManager.get_plugin()
-
- with self.router() as r:
- r_id = r['router']['id']
-
- with self.subnet(cidr='10.0.10.0/24') as s:
- s_id = s['subnet']['id']
-
- with self.router() as r1:
- r1_id = r1['router']['id']
- body = self._router_interface_action('add', r_id, s_id,
- None)
- self.assertIn('port_id', body)
- r_port_id = body['port_id']
- body = self._show('ports', r_port_id)
- self.assertEqual(body['port']['device_id'], r_id)
-
- with self.subnet(cidr='10.0.20.0/24') as s1:
- s1_id = s1['subnet']['id']
- body = self._router_interface_action('add', r1_id,
- s1_id, None)
- self.assertIn('port_id', body)
- r1_port_id = body['port_id']
- body = self._show('ports', r1_port_id)
- self.assertEqual(body['port']['device_id'], r1_id)
-
- with self.subnet(cidr='11.0.0.0/24') as public_sub:
- public_net_id = public_sub['subnet']['network_id']
- self._set_net_external(public_net_id)
-
- with self.port() as prv_port:
- prv_fixed_ip = prv_port['port']['fixed_ips'][0]
- priv_sub_id = prv_fixed_ip['subnet_id']
- self._add_external_gateway_to_router(
- r_id, public_net_id)
- self._router_interface_action('add', r_id,
- priv_sub_id,
- None)
-
- priv_port_id = prv_port['port']['id']
- res = self._create_floatingip(
- fmt, public_net_id,
- port_id=priv_port_id)
- self.assertEqual(res.status_int,
- exc.HTTPCreated.code)
- floatingip = self.deserialize(fmt, res)
-
- result = plugin_obj._send_all_data()
- self.assertEqual(result[0], 200)
-
- self._delete('floatingips',
- floatingip['floatingip']['id'])
- self._remove_external_gateway_from_router(
- r_id, public_net_id)
- self._router_interface_action('remove', r_id,
- priv_sub_id,
- None)
- self._router_interface_action('remove', r_id, s_id,
- None)
- self._show('ports', r_port_id,
- expected_code=exc.HTTPNotFound.code)
- self._router_interface_action('remove', r1_id, s1_id,
- None)
- self._show('ports', r1_port_id,
- expected_code=exc.HTTPNotFound.code)
-
- def test_router_rules_update(self):
- with self.router() as r:
- r_id = r['router']['id']
- router_rules = [{'destination': '1.2.3.4/32',
- 'source': '4.3.2.1/32',
- 'action': 'permit',
- 'nexthops': ['4.4.4.4', '4.4.4.5']}]
- body = self._update('routers', r_id,
- {'router': {'router_rules': router_rules}})
-
- body = self._show('routers', r['router']['id'])
- self.assertIn('router_rules', body['router'])
- rules = body['router']['router_rules']
- self.assertEqual(_strip_rule_ids(rules), router_rules)
- # Try after adding another rule
- router_rules.append({'source': 'external',
- 'destination': '8.8.8.8/32',
- 'action': 'permit', 'nexthops': []})
- body = self._update('routers', r['router']['id'],
- {'router': {'router_rules': router_rules}})
-
- body = self._show('routers', r['router']['id'])
- self.assertIn('router_rules', body['router'])
- rules = body['router']['router_rules']
- self.assertEqual(_strip_rule_ids(rules), router_rules)
-
- def test_router_rules_separation(self):
- with self.router() as r1:
- with self.router() as r2:
- r1_id = r1['router']['id']
- r2_id = r2['router']['id']
- router1_rules = [{'destination': '5.6.7.8/32',
- 'source': '8.7.6.5/32',
- 'action': 'permit',
- 'nexthops': ['8.8.8.8', '9.9.9.9']}]
- router2_rules = [{'destination': '1.2.3.4/32',
- 'source': '4.3.2.1/32',
- 'action': 'permit',
- 'nexthops': ['4.4.4.4', '4.4.4.5']}]
- body1 = self._update('routers', r1_id,
- {'router':
- {'router_rules': router1_rules}})
- body2 = self._update('routers', r2_id,
- {'router':
- {'router_rules': router2_rules}})
-
- body1 = self._show('routers', r1_id)
- body2 = self._show('routers', r2_id)
- rules1 = body1['router']['router_rules']
- rules2 = body2['router']['router_rules']
- self.assertEqual(_strip_rule_ids(rules1), router1_rules)
- self.assertEqual(_strip_rule_ids(rules2), router2_rules)
-
- def test_router_rules_validation(self):
- with self.router() as r:
- r_id = r['router']['id']
- good_rules = [{'destination': '1.2.3.4/32',
- 'source': '4.3.2.1/32',
- 'action': 'permit',
- 'nexthops': ['4.4.4.4', '4.4.4.5']}]
-
- body = self._update('routers', r_id,
- {'router': {'router_rules': good_rules}})
- body = self._show('routers', r_id)
- self.assertIn('router_rules', body['router'])
- self.assertEqual(good_rules,
- _strip_rule_ids(body['router']['router_rules']))
-
- # Missing nexthops should be populated with an empty list
- light_rules = copy.deepcopy(good_rules)
- del light_rules[0]['nexthops']
- body = self._update('routers', r_id,
- {'router': {'router_rules': light_rules}})
- body = self._show('routers', r_id)
- self.assertIn('router_rules', body['router'])
- light_rules[0]['nexthops'] = []
- self.assertEqual(light_rules,
- _strip_rule_ids(body['router']['router_rules']))
- # bad CIDR
- bad_rules = copy.deepcopy(good_rules)
- bad_rules[0]['destination'] = '1.1.1.1'
- body = self._update('routers', r_id,
- {'router': {'router_rules': bad_rules}},
- expected_code=exc.HTTPBadRequest.code)
- # bad next hop
- bad_rules = copy.deepcopy(good_rules)
- bad_rules[0]['nexthops'] = ['1.1.1.1', 'f2']
- body = self._update('routers', r_id,
- {'router': {'router_rules': bad_rules}},
- expected_code=exc.HTTPBadRequest.code)
- # bad action
- bad_rules = copy.deepcopy(good_rules)
- bad_rules[0]['action'] = 'dance'
- body = self._update('routers', r_id,
- {'router': {'router_rules': bad_rules}},
- expected_code=exc.HTTPBadRequest.code)
- # duplicate rule with opposite action
- bad_rules = copy.deepcopy(good_rules)
- bad_rules.append(copy.deepcopy(bad_rules[0]))
- bad_rules.append(copy.deepcopy(bad_rules[0]))
- bad_rules[1]['source'] = 'any'
- bad_rules[2]['action'] = 'deny'
- body = self._update('routers', r_id,
- {'router': {'router_rules': bad_rules}},
- expected_code=exc.HTTPBadRequest.code)
- # duplicate nexthop
- bad_rules = copy.deepcopy(good_rules)
- bad_rules[0]['nexthops'] = ['1.1.1.1', '1.1.1.1']
- body = self._update('routers', r_id,
- {'router': {'router_rules': bad_rules}},
- expected_code=exc.HTTPBadRequest.code)
- # make sure light rules persisted during bad updates
- body = self._show('routers', r_id)
- self.assertIn('router_rules', body['router'])
- self.assertEqual(light_rules,
- _strip_rule_ids(body['router']['router_rules']))
-
- def test_router_rules_config_change(self):
- cfg.CONF.set_override('tenant_default_router_rule',
- ['*:any:any:deny',
- '*:8.8.8.8/32:any:permit:1.2.3.4'],
- 'ROUTER')
- with self.router() as r:
- body = self._show('routers', r['router']['id'])
- expected_rules = [{'source': 'any', 'destination': 'any',
- 'nexthops': [], 'action': 'deny'},
- {'source': '8.8.8.8/32', 'destination': 'any',
- 'nexthops': ['1.2.3.4'], 'action': 'permit'}]
- self.assertEqual(expected_rules,
- _strip_rule_ids(body['router']['router_rules']))
-
- def test_rule_exhaustion(self):
- cfg.CONF.set_override('max_router_rules', 10, 'ROUTER')
- with self.router() as r:
- rules = []
- for i in moves.xrange(1, 12):
- rule = {'source': 'any', 'nexthops': [],
- 'destination': '1.1.1.' + str(i) + '/32',
- 'action': 'permit'}
- rules.append(rule)
- self._update('routers', r['router']['id'],
- {'router': {'router_rules': rules}},
- expected_code=exc.HTTPBadRequest.code)
-
- def test_rollback_on_router_create(self):
- tid = test_api_v2._uuid()
- self.httpPatch.stop()
- with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self._create_router('json', tid)
- self.assertTrue(len(self._get_routers(tid)) == 0)
-
- def test_rollback_on_router_update(self):
- with self.router() as r:
- data = {'router': {'name': 'aNewName'}}
- self.httpPatch.stop()
- with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self.new_update_request(
- 'routers', data, r['router']['id']).get_response(self.api)
- self.httpPatch.start()
- updatedr = self._get_routers(r['router']['tenant_id'])[0]
- # name should have stayed the same due to failure
- self.assertEqual(r['router']['name'], updatedr['name'])
-
- def test_rollback_on_router_delete(self):
- with self.router() as r:
- self.httpPatch.stop()
- with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
- self._delete('routers', r['router']['id'],
- expected_code=exc.HTTPInternalServerError.code)
- self.httpPatch.start()
- self.assertEqual(r['router']['id'],
- self._get_routers(r['router']['tenant_id']
- )[0]['id'])
-
- def _get_routers(self, tenant_id):
- ctx = context.Context('', tenant_id)
- return self.plugin_obj.get_routers(ctx)
-
-
-def _strip_rule_ids(rules):
- cleaned = []
- for rule in rules:
- del rule['id']
- cleaned.append(rule)
- return cleaned
+++ /dev/null
-# Copyright 2014, Big Switch Networks
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron import manager
-from neutron.tests.unit.bigswitch import test_base
-from neutron.tests.unit import test_extension_security_group as test_sg
-from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
-
-
-class RestProxySecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase,
- test_base.BigSwitchTestBase):
- plugin_str = ('%s.NeutronRestProxyV2' %
- test_base.RESTPROXY_PKG_PATH)
-
- def setUp(self, plugin=None):
- test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
- self.setup_config_files()
- self.setup_patches()
- self._attribute_map_bk_ = {}
- super(RestProxySecurityGroupsTestCase, self).setUp(self.plugin_str)
- self.setup_db()
- plugin = manager.NeutronManager.get_plugin()
- self.notifier = plugin.notifier
- self.rpc = plugin.endpoints[0]
- self.startHttpPatch()
-
-
-class TestSecServerRpcCallBack(test_sg_rpc.SGServerRpcCallBackTestCase,
- RestProxySecurityGroupsTestCase):
- pass
-
-
-class TestSecurityGroupsMixin(test_sg.TestSecurityGroups,
- test_sg_rpc.SGNotificationTestMixin,
- RestProxySecurityGroupsTestCase):
- pass
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import contextlib
-import httplib
-import socket
-import ssl
-
-import mock
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_serialization import jsonutils
-from oslo_utils import importutils
-
-from neutron import context
-from neutron import manager
-from neutron.plugins.bigswitch.db import consistency_db
-from neutron.plugins.bigswitch import servermanager
-from neutron.tests.unit.bigswitch import test_restproxy_plugin as test_rp
-
-SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager'
-HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection'
-HTTPSCON = SERVERMANAGER + '.HTTPSConnectionWithValidation'
-
-
-class ServerManagerTests(test_rp.BigSwitchProxyPluginV2TestCase):
-
- def setUp(self):
- self.socket_mock = mock.patch(
- SERVERMANAGER + '.socket.create_connection').start()
- self.wrap_mock = mock.patch(SERVERMANAGER + '.ssl.wrap_socket').start()
- super(ServerManagerTests, self).setUp()
- # http patch must not be running or it will mangle the servermanager
- # import where the https connection classes are defined
- self.httpPatch.stop()
- self.sm = importutils.import_module(SERVERMANAGER)
-
- def test_no_servers(self):
- cfg.CONF.set_override('servers', [], 'RESTPROXY')
- self.assertRaises(cfg.Error, servermanager.ServerPool)
-
- def test_malformed_servers(self):
- cfg.CONF.set_override('servers', ['1.2.3.4', '1.1.1.1:a'], 'RESTPROXY')
- self.assertRaises(cfg.Error, servermanager.ServerPool)
-
- def test_ipv6_server_address(self):
- cfg.CONF.set_override(
- 'servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'],
- 'RESTPROXY')
- s = servermanager.ServerPool()
- self.assertEqual(s.servers[0].server,
- '[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]')
-
- def test_sticky_cert_fetch_fail(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.ssl = True
- with mock.patch(
- 'ssl.get_server_certificate',
- side_effect=Exception('There is no more entropy in the universe')
- ) as sslgetmock:
- self.assertRaises(
- cfg.Error,
- pl.servers._get_combined_cert_for_server,
- *('example.org', 443)
- )
- sslgetmock.assert_has_calls([mock.call(
- ('example.org', 443), ssl_version=ssl.PROTOCOL_TLSv1)])
-
- def test_consistency_watchdog_stops_with_0_polling_interval(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.capabilities = ['consistency']
- self.watch_p.stop()
- with mock.patch('eventlet.sleep') as smock:
- # should return immediately a polling interval of 0
- pl.servers._consistency_watchdog(0)
- self.assertFalse(smock.called)
-
- def test_consistency_watchdog(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.capabilities = []
- self.watch_p.stop()
- with contextlib.nested(
- mock.patch('eventlet.sleep'),
- mock.patch(
- SERVERMANAGER + '.ServerPool.rest_call',
- side_effect=servermanager.RemoteRestError(
- reason='Failure to trigger except clause.'
- )
- ),
- mock.patch(
- SERVERMANAGER + '.LOG.exception',
- side_effect=KeyError('Failure to break loop')
- )
- ) as (smock, rmock, lmock):
- # should return immediately without consistency capability
- pl.servers._consistency_watchdog()
- self.assertFalse(smock.called)
- pl.servers.capabilities = ['consistency']
- self.assertRaises(KeyError,
- pl.servers._consistency_watchdog)
- rmock.assert_called_with('GET', '/health', '', {}, [], False)
- self.assertEqual(1, len(lmock.mock_calls))
-
- def test_consistency_hash_header(self):
- # mock HTTP class instead of rest_call so we can see headers
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- rv.getresponse.return_value.status = 200
- rv.getresponse.return_value.read.return_value = ''
- with self.network() as network:
- callheaders = rv.request.mock_calls[0][1][3]
- self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders)
- # first call will be empty to indicate no previous state hash
- self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], '')
- # change the header that will be received on delete call
- rv.getresponse.return_value.getheader.return_value = 'HASH2'
- self._delete('networks', network['network']['id'])
- # net delete should have used header received on create
- callheaders = rv.request.mock_calls[1][1][3]
- self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], 'HASHHEADER')
-
- # create again should now use header received from prev delete
- with self.network():
- callheaders = rv.request.mock_calls[2][1][3]
- self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders)
- self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'],
- 'HASH2')
-
- def test_consistency_hash_header_no_update_on_bad_response(self):
- # mock HTTP class instead of rest_call so we can see headers
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- rv.getresponse.return_value.status = 200
- rv.getresponse.return_value.read.return_value = ''
- with self.network() as net:
- # change the header that will be received on delete call
- rv.getresponse.return_value.getheader.return_value = 'EVIL'
- rv.getresponse.return_value.status = 'GARBAGE'
- self._delete('networks', net['network']['id'])
-
- # create again should not use header from delete call
- with self.network():
- callheaders = rv.request.mock_calls[2][1][3]
- self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders)
- self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'],
- 'HASHHEADER')
-
- def test_file_put_contents(self):
- pl = manager.NeutronManager.get_plugin()
- with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
- pl.servers._file_put_contents('somepath', 'contents')
- omock.assert_has_calls([mock.call('somepath', 'w')])
- omock.return_value.__enter__.return_value.assert_has_calls([
- mock.call.write('contents')
- ])
-
- def test_combine_certs_to_file(self):
- pl = manager.NeutronManager.get_plugin()
- with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
- omock.return_value.__enter__().read.return_value = 'certdata'
- pl.servers._combine_certs_to_file(['cert1.pem', 'cert2.pem'],
- 'combined.pem')
- # mock shared between read and write file handles so the calls
- # are mixed together
- omock.assert_has_calls([
- mock.call('combined.pem', 'w'),
- mock.call('cert1.pem', 'r'),
- mock.call('cert2.pem', 'r'),
- ], any_order=True)
- omock.return_value.__enter__.return_value.assert_has_calls([
- mock.call.read(),
- mock.call.write('certdata'),
- mock.call.read(),
- mock.call.write('certdata')
- ])
-
- def test_auth_header(self):
- cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY')
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- sp.rest_create_network('tenant', 'network')
- callheaders = rv.request.mock_calls[0][1][3]
- self.assertIn('Authorization', callheaders)
- self.assertEqual(callheaders['Authorization'],
- 'Basic dXNlcm5hbWU6cGFzcw==')
-
- def test_header_add(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'})
- callheaders = rv.request.mock_calls[0][1][3]
- # verify normal headers weren't mangled
- self.assertIn('Content-type', callheaders)
- self.assertEqual(callheaders['Content-type'],
- 'application/json')
- # verify new header made it in
- self.assertIn('EXTRA-HEADER', callheaders)
- self.assertEqual(callheaders['EXTRA-HEADER'], 'HI')
-
- def test_req_context_header(self):
- sp = manager.NeutronManager.get_plugin().servers
- ncontext = context.Context('uid', 'tid')
- sp.set_context(ncontext)
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- sp.rest_action('GET', '/')
- callheaders = rv.request.mock_calls[0][1][3]
- self.assertIn(servermanager.REQ_CONTEXT_HEADER, callheaders)
- ctxdct = ncontext.to_dict()
- # auth token is not included
- ctxdct.pop('auth_token')
- self.assertEqual(
- ctxdct, jsonutils.loads(
- callheaders[servermanager.REQ_CONTEXT_HEADER]))
-
- def test_capabilities_retrieval(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value.getresponse.return_value
- rv.getheader.return_value = 'HASHHEADER'
-
- # each server will get different capabilities
- rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]']
- # pool capabilities is intersection between both
- self.assertEqual(set(['b', 'c']), sp.get_capabilities())
- self.assertEqual(2, rv.read.call_count)
-
- # the pool should cache after the first call so no more
- # HTTP calls should be made
- rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]']
- self.assertEqual(set(['b', 'c']), sp.get_capabilities())
- self.assertEqual(2, rv.read.call_count)
-
- def test_capabilities_retrieval_failure(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value.getresponse.return_value
- rv.getheader.return_value = 'HASHHEADER'
- # a failure to parse should result in an empty capability set
- rv.read.return_value = 'XXXXX'
- self.assertEqual([], sp.servers[0].get_capabilities())
-
- # One broken server should affect all capabilities
- rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]']
- self.assertEqual(set(), sp.get_capabilities())
-
- def test_reconnect_on_timeout_change(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
- sp.servers[0].capabilities = ['keep-alive']
- sp.servers[0].rest_call('GET', '/', timeout=10)
- # even with keep-alive enabled, a change in timeout will trigger
- # a reconnect
- sp.servers[0].rest_call('GET', '/', timeout=75)
- conmock.assert_has_calls([
- mock.call('localhost', 9000, timeout=10),
- mock.call('localhost', 9000, timeout=75),
- ], any_order=True)
-
- def test_connect_failures(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON, return_value=None):
- resp = sp.servers[0].rest_call('GET', '/')
- self.assertEqual(resp, (0, None, None, None))
- # verify same behavior on ssl class
- sp.servers[0].currentcon = False
- sp.servers[0].ssl = True
- with mock.patch(HTTPSCON, return_value=None):
- resp = sp.servers[0].rest_call('GET', '/')
- self.assertEqual(resp, (0, None, None, None))
-
- def test_reconnect_cached_connection(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- rv.getresponse.return_value.getheader.return_value = 'HASH'
- sp.servers[0].capabilities = ['keep-alive']
- sp.servers[0].rest_call('GET', '/first')
- # raise an error on re-use to verify reconnect
- # return okay the second time so the reconnect works
- rv.request.side_effect = [httplib.ImproperConnectionState(),
- mock.MagicMock()]
- sp.servers[0].rest_call('GET', '/second')
- uris = [c[1][1] for c in rv.request.mock_calls]
- expected = [
- sp.base_uri + '/first',
- sp.base_uri + '/second',
- sp.base_uri + '/second',
- ]
- self.assertEqual(uris, expected)
-
- def test_no_reconnect_recurse_to_infinity(self):
- # retry uses recursion when a reconnect is necessary
- # this test makes sure it stops after 1 recursive call
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- rv = conmock.return_value
- # hash header must be string instead of mock object
- rv.getresponse.return_value.getheader.return_value = 'HASH'
- sp.servers[0].capabilities = ['keep-alive']
- sp.servers[0].rest_call('GET', '/first')
- # after retrying once, the rest call should raise the
- # exception up
- rv.request.side_effect = httplib.ImproperConnectionState()
- self.assertRaises(httplib.ImproperConnectionState,
- sp.servers[0].rest_call,
- *('GET', '/second'))
- # 1 for the first call, 2 for the second with retry
- self.assertEqual(rv.request.call_count, 3)
-
- def test_socket_error(self):
- sp = servermanager.ServerPool()
- with mock.patch(HTTPCON) as conmock:
- conmock.return_value.request.side_effect = socket.timeout()
- resp = sp.servers[0].rest_call('GET', '/')
- self.assertEqual(resp, (0, None, None, None))
-
- def test_cert_get_fail(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.ssl = True
- with mock.patch('os.path.exists', return_value=False):
- self.assertRaises(cfg.Error,
- pl.servers._get_combined_cert_for_server,
- *('example.org', 443))
-
- def test_cert_make_dirs(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.ssl = True
- cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
- # pretend base dir exists, 3 children don't, and host cert does
- with contextlib.nested(
- mock.patch('os.path.exists', side_effect=[True, False, False,
- False, True]),
- mock.patch('os.makedirs'),
- mock.patch(SERVERMANAGER + '.ServerPool._combine_certs_to_file')
- ) as (exmock, makemock, combmock):
- # will raise error because no certs found
- self.assertIn(
- 'example.org',
- pl.servers._get_combined_cert_for_server('example.org', 443)
- )
- base = cfg.CONF.RESTPROXY.ssl_cert_directory
- hpath = base + '/host_certs/example.org.pem'
- combpath = base + '/combined/example.org.pem'
- combmock.assert_has_calls([mock.call([hpath], combpath)])
- self.assertEqual(exmock.call_count, 5)
- self.assertEqual(makemock.call_count, 3)
-
- def test_no_cert_error(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.ssl = True
- cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
- # pretend base dir exists and 3 children do, but host cert doesn't
- with mock.patch(
- 'os.path.exists',
- side_effect=[True, True, True, True, False]
- ) as exmock:
- # will raise error because no certs found
- self.assertRaises(
- cfg.Error,
- pl.servers._get_combined_cert_for_server,
- *('example.org', 443)
- )
- self.assertEqual(exmock.call_count, 5)
-
- def test_action_success(self):
- pl = manager.NeutronManager.get_plugin()
- self.assertTrue(pl.servers.action_success((200,)))
-
- def test_server_failure(self):
- pl = manager.NeutronManager.get_plugin()
- self.assertTrue(pl.servers.server_failure((404,)))
- # server failure has an ignore codes option
- self.assertFalse(pl.servers.server_failure((404,),
- ignore_codes=[404]))
-
- def test_retry_on_unavailable(self):
- pl = manager.NeutronManager.get_plugin()
- with contextlib.nested(
- mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
- return_value=(httplib.SERVICE_UNAVAILABLE, 0, 0, 0)),
- mock.patch(SERVERMANAGER + '.time.sleep')
- ) as (srestmock, tmock):
- # making a call should trigger retries with sleeps in between
- pl.servers.rest_call('GET', '/', '', None, [])
- rest_call = [mock.call('GET', '/', '', None, False, reconnect=True,
- hash_handler=mock.ANY)]
- rest_call_count = (
- servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1)
- srestmock.assert_has_calls(rest_call * rest_call_count)
- sleep_call = [mock.call(
- servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)]
- # should sleep 1 less time than the number of calls
- sleep_call_count = rest_call_count - 1
- tmock.assert_has_calls(sleep_call * sleep_call_count)
-
- def test_delete_failure_sets_bad_hash(self):
- pl = manager.NeutronManager.get_plugin()
- hash_handler = consistency_db.HashHandler()
- with mock.patch(
- SERVERMANAGER + '.ServerProxy.rest_call',
- return_value=(httplib.INTERNAL_SERVER_ERROR, 0, 0, 0)
- ):
- # a failed delete call should put a bad hash in the DB
- pl.servers.rest_call('DELETE', '/', '', None, [])
- self.assertEqual('INCONSISTENT,INCONSISTENT',
- hash_handler.read_for_update())
-
- def test_conflict_triggers_sync(self):
- pl = manager.NeutronManager.get_plugin()
- with mock.patch(
- SERVERMANAGER + '.ServerProxy.rest_call',
- return_value=(httplib.CONFLICT, 0, 0, 0)
- ) as srestmock:
- # making a call should trigger a conflict sync
- pl.servers.rest_call('GET', '/', '', None, [])
- srestmock.assert_has_calls([
- mock.call('GET', '/', '', None, False, reconnect=True,
- hash_handler=mock.ANY),
- mock.call('PUT', '/topology',
- {'routers': [], 'networks': []},
- timeout=None)
- ])
-
- def test_conflict_sync_raises_error_without_topology(self):
- pl = manager.NeutronManager.get_plugin()
- pl.servers.get_topo_function = None
- with mock.patch(
- SERVERMANAGER + '.ServerProxy.rest_call',
- return_value=(httplib.CONFLICT, 0, 0, 0)
- ):
- # making a call should trigger a conflict sync that will
- # error without the topology function set
- self.assertRaises(
- cfg.Error,
- pl.servers.rest_call,
- *('GET', '/', '', None, [])
- )
-
- def test_floating_calls(self):
- pl = manager.NeutronManager.get_plugin()
- with mock.patch(SERVERMANAGER + '.ServerPool.rest_action') as ramock:
- body1 = {'id': 'somefloat'}
- body2 = {'name': 'myfl'}
- pl.servers.rest_create_floatingip('tenant', body1)
- pl.servers.rest_update_floatingip('tenant', body2, 'id')
- pl.servers.rest_delete_floatingip('tenant', 'oldid')
- ramock.assert_has_calls([
- mock.call('PUT', '/tenants/tenant/floatingips/somefloat',
- body1,
- errstr=u'Unable to create floating IP: %s'),
- mock.call('PUT', '/tenants/tenant/floatingips/id',
- body2,
- errstr=u'Unable to update floating IP: %s'),
- mock.call('DELETE', '/tenants/tenant/floatingips/oldid',
- errstr=u'Unable to delete floating IP: %s')
- ])
-
- def test_HTTPSConnectionWithValidation_without_cert(self):
- con = self.sm.HTTPSConnectionWithValidation(
- 'www.example.org', 443, timeout=90)
- con.source_address = '127.0.0.1'
- con.request("GET", "/")
- self.socket_mock.assert_has_calls([mock.call(
- ('www.example.org', 443), 90, '127.0.0.1'
- )])
- self.wrap_mock.assert_has_calls([mock.call(
- self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE,
- ssl_version=ssl.PROTOCOL_TLSv1
- )])
- self.assertEqual(con.sock, self.wrap_mock())
-
- def test_HTTPSConnectionWithValidation_with_cert(self):
- con = self.sm.HTTPSConnectionWithValidation(
- 'www.example.org', 443, timeout=90)
- con.combined_cert = 'SOMECERTS.pem'
- con.source_address = '127.0.0.1'
- con.request("GET", "/")
- self.socket_mock.assert_has_calls([mock.call(
- ('www.example.org', 443), 90, '127.0.0.1'
- )])
- self.wrap_mock.assert_has_calls([mock.call(
- self.socket_mock(), None, None, ca_certs='SOMECERTS.pem',
- cert_reqs=ssl.CERT_REQUIRED,
- ssl_version=ssl.PROTOCOL_TLSv1
- )])
- self.assertEqual(con.sock, self.wrap_mock())
-
- def test_HTTPSConnectionWithValidation_tunnel(self):
- tunnel_mock = mock.patch.object(
- self.sm.HTTPSConnectionWithValidation,
- '_tunnel').start()
- con = self.sm.HTTPSConnectionWithValidation(
- 'www.example.org', 443, timeout=90)
- con.source_address = '127.0.0.1'
- con.set_tunnel('myproxy.local', 3128)
- con.request("GET", "/")
- self.socket_mock.assert_has_calls([mock.call(
- ('www.example.org', 443), 90, '127.0.0.1'
- )])
- self.wrap_mock.assert_has_calls([mock.call(
- self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE,
- ssl_version=ssl.PROTOCOL_TLSv1
- )])
- # _tunnel() doesn't take any args
- tunnel_mock.assert_has_calls([mock.call()])
- self.assertEqual(con._tunnel_host, 'myproxy.local')
- self.assertEqual(con._tunnel_port, 3128)
- self.assertEqual(con.sock, self.wrap_mock())
-
-
-class TestSockets(test_rp.BigSwitchProxyPluginV2TestCase):
-
- def setUp(self):
- super(TestSockets, self).setUp()
- # http patch must not be running or it will mangle the servermanager
- # import where the https connection classes are defined
- self.httpPatch.stop()
- self.sm = importutils.import_module(SERVERMANAGER)
-
- def test_socket_create_attempt(self):
- # exercise the socket creation to make sure it works on both python
- # versions
- con = self.sm.HTTPSConnectionWithValidation('127.0.0.1', 0, timeout=1)
- # if httpcon was created, a connect attempt should raise a socket error
- self.assertRaises(socket.error, con.connect)
-
-
-class HashLockingTests(test_rp.BigSwitchProxyPluginV2TestCase):
-
- def _get_hash_from_handler_db(self, handler):
- with handler.session.begin(subtransactions=True):
- res = (handler.session.query(consistency_db.ConsistencyHash).
- filter_by(hash_id=handler.hash_id).first())
- return res.hash
-
- def test_hash_handle_lock_no_initial_record(self):
- handler = consistency_db.HashHandler()
- h1 = handler.read_for_update()
- # return to caller should be empty even with lock in DB
- self.assertFalse(h1)
- # db should have a lock marker
- self.assertEqual(handler.lock_marker,
- self._get_hash_from_handler_db(handler))
- # an entry should clear the lock
- handler.put_hash('DIGEST')
- self.assertEqual('DIGEST', self._get_hash_from_handler_db(handler))
-
- def test_hash_handle_lock_existing_record(self):
- handler = consistency_db.HashHandler()
- handler.put_hash('DIGEST') # set initial hash
-
- h1 = handler.read_for_update()
- self.assertEqual('DIGEST', h1)
- self.assertEqual(handler.lock_marker + 'DIGEST',
- self._get_hash_from_handler_db(handler))
-
- # make sure update works
- handler.put_hash('DIGEST2')
- self.assertEqual('DIGEST2', self._get_hash_from_handler_db(handler))
-
- def test_db_duplicate_on_insert(self):
- handler = consistency_db.HashHandler()
- with mock.patch.object(
- handler.session, 'add', side_effect=[db_exc.DBDuplicateEntry, '']
- ) as add_mock:
- handler.read_for_update()
- # duplicate insert failure should result in retry
- self.assertEqual(2, add_mock.call_count)
-
- def test_update_hit_no_records(self):
- handler = consistency_db.HashHandler()
- # set initial hash so update will be required
- handler.put_hash('DIGEST')
- with mock.patch.object(handler._FACADE, 'get_engine') as ge:
- conn = ge.return_value.begin.return_value.__enter__.return_value
- firstresult = mock.Mock()
- # a rowcount of 0 simulates the effect of another db client
- # updating the same record the handler was trying to update
- firstresult.rowcount = 0
- secondresult = mock.Mock()
- secondresult.rowcount = 1
- conn.execute.side_effect = [firstresult, secondresult]
- handler.read_for_update()
- # update should have been called again after the failure
- self.assertEqual(2, conn.execute.call_count)
-
- def test_handler_already_holding_lock(self):
- handler = consistency_db.HashHandler()
- handler.read_for_update() # lock the table
- with mock.patch.object(handler._FACADE, 'get_engine') as ge:
- handler.read_for_update()
- # get engine should not have been called because no update
- # should have been made
- self.assertFalse(ge.called)
-
- def test_clear_lock(self):
- handler = consistency_db.HashHandler()
- handler.put_hash('SOMEHASH')
- handler.read_for_update() # lock the table
- self.assertEqual(handler.lock_marker + 'SOMEHASH',
- self._get_hash_from_handler_db(handler))
- handler.clear_lock()
- self.assertEqual('SOMEHASH',
- self._get_hash_from_handler_db(handler))
-
- def test_clear_lock_skip_after_steal(self):
- handler1 = consistency_db.HashHandler()
- handler1.read_for_update() # lock the table
- handler2 = consistency_db.HashHandler()
- with mock.patch.object(consistency_db, 'MAX_LOCK_WAIT_TIME', new=0):
- handler2.read_for_update()
- before = self._get_hash_from_handler_db(handler1)
- # handler1 should not clear handler2's lock
- handler1.clear_lock()
- self.assertEqual(before, self._get_hash_from_handler_db(handler1))
-
- def test_take_lock_from_other(self):
- handler1 = consistency_db.HashHandler()
- handler1.read_for_update() # lock the table
- handler2 = consistency_db.HashHandler()
- with mock.patch.object(consistency_db, 'MAX_LOCK_WAIT_TIME') as mlock:
- # make handler2 wait for only one iteration
- mlock.__lt__.side_effect = [False, True]
- handler2.read_for_update()
- # once MAX LOCK exceeded, comparisons should stop due to lock steal
- self.assertEqual(2, mlock.__lt__.call_count)
- dbentry = self._get_hash_from_handler_db(handler1)
- # handler2 should have the lock
- self.assertIn(handler2.lock_marker, dbentry)
- self.assertNotIn(handler1.lock_marker, dbentry)
- # lock protection only blocks read_for_update, anyone can change
- handler1.put_hash('H1')
-
- def test_failure_to_steal_lock(self):
- handler1 = consistency_db.HashHandler()
- handler1.read_for_update() # lock the table
- handler2 = consistency_db.HashHandler()
- with contextlib.nested(
- mock.patch.object(consistency_db, 'MAX_LOCK_WAIT_TIME'),
- mock.patch.object(handler2, '_optimistic_update_hash_record',
- side_effect=[False, True])
- ) as (mlock, oplock):
- # handler2 will go through 2 iterations since the lock will fail on
- # the first attempt
- mlock.__lt__.side_effect = [False, True, False, True]
- handler2.read_for_update()
- self.assertEqual(4, mlock.__lt__.call_count)
- self.assertEqual(2, oplock.call_count)
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import contextlib
-import os
-import ssl
-
-import mock
-from oslo_config import cfg
-import webob.exc
-
-from neutron.openstack.common import log as logging
-from neutron.tests.unit.bigswitch import fake_server
-from neutron.tests.unit.bigswitch import test_base
-from neutron.tests.unit import test_api_v2
-from neutron.tests.unit import test_db_plugin as test_plugin
-
-LOG = logging.getLogger(__name__)
-
-SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager'
-HTTPS = SERVERMANAGER + '.HTTPSConnectionWithValidation'
-CERTCOMBINER = SERVERMANAGER + '.ServerPool._combine_certs_to_file'
-FILEPUT = SERVERMANAGER + '.ServerPool._file_put_contents'
-GETCACERTS = SERVERMANAGER + '.ServerPool._get_ca_cert_paths'
-GETHOSTCERT = SERVERMANAGER + '.ServerPool._get_host_cert_path'
-SSLGETCERT = SERVERMANAGER + '.ssl.get_server_certificate'
-FAKECERTGET = 'neutron.tests.unit.bigswitch.fake_server.get_cert_contents'
-
-
-class test_ssl_certificate_base(test_plugin.NeutronDbPluginV2TestCase,
- test_base.BigSwitchTestBase):
-
- plugin_str = ('%s.NeutronRestProxyV2' %
- test_base.RESTPROXY_PKG_PATH)
- servername = None
- cert_base = None
-
- def _setUp(self):
- self.servername = test_api_v2._uuid()
- self.cert_base = cfg.CONF.RESTPROXY.ssl_cert_directory
- self.host_cert_val = 'DUMMYCERTFORHOST%s' % self.servername
- self.host_cert_path = os.path.join(
- self.cert_base,
- 'host_certs',
- '%s.pem' % self.servername
- )
- self.comb_cert_path = os.path.join(
- self.cert_base,
- 'combined',
- '%s.pem' % self.servername
- )
- self.ca_certs_path = os.path.join(
- self.cert_base,
- 'ca_certs'
- )
- cfg.CONF.set_override('servers', ["%s:443" % self.servername],
- 'RESTPROXY')
- self.setup_patches()
-
- # Mock method SSL lib uses to grab cert from server
- self.sslgetcert_m = mock.patch(SSLGETCERT, create=True).start()
- self.sslgetcert_m.return_value = self.host_cert_val
-
- # Mock methods that write and read certs from the file-system
- self.fileput_m = mock.patch(FILEPUT, create=True).start()
- self.certcomb_m = mock.patch(CERTCOMBINER, create=True).start()
- self.getcacerts_m = mock.patch(GETCACERTS, create=True).start()
-
- # this is used to configure what certificate contents the fake HTTPS
- # lib should expect to receive
- self.fake_certget_m = mock.patch(FAKECERTGET, create=True).start()
-
- def setUp(self):
- super(test_ssl_certificate_base, self).setUp(self.plugin_str)
- self.setup_db()
-
-
-class TestSslSticky(test_ssl_certificate_base):
-
- def setUp(self):
- self.setup_config_files()
- cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
- cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
- self._setUp()
- # Set fake HTTPS connection's expectation
- self.fake_certget_m.return_value = self.host_cert_val
- # No CA certs for this test
- self.getcacerts_m.return_value = []
- super(TestSslSticky, self).setUp()
-
- def test_sticky_cert(self):
- # SSL connection should be successful and cert should be cached
- with contextlib.nested(
- mock.patch(HTTPS, new=fake_server.HTTPSHostValidation),
- self.network()
- ):
- # CA certs should have been checked for
- self.getcacerts_m.assert_has_calls([mock.call(self.ca_certs_path)])
- # cert should have been fetched via SSL lib
- self.sslgetcert_m.assert_has_calls(
- [mock.call((self.servername, 443),
- ssl_version=ssl.PROTOCOL_TLSv1)]
- )
-
- # cert should have been recorded
- self.fileput_m.assert_has_calls([mock.call(self.host_cert_path,
- self.host_cert_val)])
- # no ca certs, so host cert only for this combined cert
- self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
- self.comb_cert_path)])
-
-
-class TestSslHostCert(test_ssl_certificate_base):
-
- def setUp(self):
- self.setup_config_files()
- cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
- cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
- self.httpsPatch = mock.patch(HTTPS, create=True,
- new=fake_server.HTTPSHostValidation)
- self.httpsPatch.start()
- self._setUp()
- # Set fake HTTPS connection's expectation
- self.fake_certget_m.return_value = self.host_cert_val
- # No CA certs for this test
- self.getcacerts_m.return_value = []
- # Pretend host cert exists
- self.hcertpath_p = mock.patch(GETHOSTCERT,
- return_value=(self.host_cert_path, True),
- create=True).start()
- super(TestSslHostCert, self).setUp()
-
- def test_host_cert(self):
- # SSL connection should be successful because of pre-configured cert
- with self.network():
- self.hcertpath_p.assert_has_calls([
- mock.call(os.path.join(self.cert_base, 'host_certs'),
- self.servername)
- ])
- # sticky is disabled, no fetching allowed
- self.assertFalse(self.sslgetcert_m.call_count)
- # no ca certs, so host cert is only for this combined cert
- self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
- self.comb_cert_path)])
-
-
-class TestSslCaCert(test_ssl_certificate_base):
-
- def setUp(self):
- self.setup_config_files()
- cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
- cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
- self.httpsPatch = mock.patch(HTTPS, create=True,
- new=fake_server.HTTPSCAValidation)
- self.httpsPatch.start()
- self._setUp()
-
- # pretend to have a few ca certs
- self.getcacerts_m.return_value = ['ca1.pem', 'ca2.pem']
-
- # Set fake HTTPS connection's expectation
- self.fake_certget_m.return_value = 'DUMMYCERTIFICATEAUTHORITY'
-
- super(TestSslCaCert, self).setUp()
-
- def test_ca_cert(self):
- # SSL connection should be successful because CA cert was present
- # If not, attempting to create a network would raise an exception
- with self.network():
- # sticky is disabled, no fetching allowed
- self.assertFalse(self.sslgetcert_m.call_count)
- # 2 CAs and no host cert so combined should only contain both CAs
- self.certcomb_m.assert_has_calls([mock.call(['ca1.pem', 'ca2.pem'],
- self.comb_cert_path)])
-
-
-class TestSslWrongHostCert(test_ssl_certificate_base):
-
- def setUp(self):
- self.setup_config_files()
- cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
- cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
- self._setUp()
-
- # Set fake HTTPS connection's expectation to something wrong
- self.fake_certget_m.return_value = 'OTHERCERT'
-
- # No CA certs for this test
- self.getcacerts_m.return_value = []
-
- # Pretend host cert exists
- self.hcertpath_p = mock.patch(GETHOSTCERT,
- return_value=(self.host_cert_path, True),
- create=True).start()
- super(TestSslWrongHostCert, self).setUp()
-
- def test_error_no_cert(self):
- # since there will already be a host cert, sticky should not take
- # effect and there will be an error because the host cert's contents
- # will be incorrect
- tid = test_api_v2._uuid()
- data = {}
- data['network'] = {'tenant_id': tid, 'name': 'name',
- 'admin_state_up': True}
- with mock.patch(HTTPS, new=fake_server.HTTPSHostValidation):
- req = self.new_create_request('networks', data, 'json')
- res = req.get_response(self.api)
- self.assertEqual(res.status_int,
- webob.exc.HTTPInternalServerError.code)
- self.hcertpath_p.assert_has_calls([
- mock.call(os.path.join(self.cert_base, 'host_certs'),
- self.servername)
- ])
- # sticky is enabled, but a host cert already exists so it shant fetch
- self.assertFalse(self.sslgetcert_m.call_count)
- # no ca certs, so host cert only for this combined cert
- self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
- self.comb_cert_path)])
-
-
-class TestSslNoValidation(test_ssl_certificate_base):
-
- def setUp(self):
- self.setup_config_files()
- cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
- cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
- cfg.CONF.set_override('no_ssl_validation', True, 'RESTPROXY')
- self._setUp()
- super(TestSslNoValidation, self).setUp()
-
- def test_validation_disabled(self):
- # SSL connection should be successful without any certificates
- # If not, attempting to create a network will raise an exception
- with contextlib.nested(
- mock.patch(HTTPS, new=fake_server.HTTPSNoValidation),
- self.network()
- ):
- # no sticky grabbing and no cert combining with no enforcement
- self.assertFalse(self.sslgetcert_m.call_count)
- self.assertFalse(self.certcomb_m.call_count)
+++ /dev/null
-# Copyright 2014 Big Switch Networks, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import contextlib
-import functools
-
-import mock
-from oslo_serialization import jsonutils
-
-from neutron import context as neutron_context
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.bigswitch import servermanager
-from neutron.plugins.ml2 import config as ml2_config
-from neutron.plugins.ml2.drivers.mech_bigswitch import driver as bsn_driver
-from neutron.plugins.ml2.drivers import type_vlan as vlan_config
-import neutron.tests.unit.bigswitch.test_restproxy_plugin as trp
-from neutron.tests.unit.ml2 import test_ml2_plugin
-from neutron.tests.unit import test_db_plugin
-
-PHYS_NET = 'physnet1'
-VLAN_START = 1000
-VLAN_END = 1100
-SERVER_MANAGER = 'neutron.plugins.bigswitch.servermanager'
-SERVER_POOL = SERVER_MANAGER + '.ServerPool'
-DRIVER_MOD = 'neutron.plugins.ml2.drivers.mech_bigswitch.driver'
-DRIVER = DRIVER_MOD + '.BigSwitchMechanismDriver'
-
-
-class TestBigSwitchMechDriverBase(trp.BigSwitchProxyPluginV2TestCase):
-
- def setUp(self):
- # Configure the ML2 mechanism drivers and network types
- ml2_opts = {
- 'mechanism_drivers': ['bigswitch'],
- 'tenant_network_types': ['vlan'],
- }
- for opt, val in ml2_opts.items():
- ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
-
- # Configure the ML2 VLAN parameters
- phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
- vlan_config.cfg.CONF.set_override('network_vlan_ranges',
- [phys_vrange],
- 'ml2_type_vlan')
- super(TestBigSwitchMechDriverBase,
- self).setUp(test_ml2_plugin.PLUGIN_NAME)
-
-
-class TestBigSwitchMechDriverNetworksV2(test_db_plugin.TestNetworksV2,
- TestBigSwitchMechDriverBase):
- pass
-
-
-class TestBigSwitchMechDriverPortsV2(test_db_plugin.TestPortsV2,
- TestBigSwitchMechDriverBase):
-
- VIF_TYPE = portbindings.VIF_TYPE_OVS
-
- def setUp(self):
- super(TestBigSwitchMechDriverPortsV2, self).setUp()
- self.port_create_status = 'DOWN'
-
- def test_update_port_status_build(self):
- with self.port() as port:
- self.assertEqual(port['port']['status'], 'DOWN')
- self.assertEqual(self.port_create_status, 'DOWN')
-
- def test_bind_ivs_port(self):
- host_arg = {portbindings.HOST_ID: 'hostname'}
- with contextlib.nested(
- mock.patch(SERVER_POOL + '.rest_get_switch', return_value=True),
- self.port(arg_list=(portbindings.HOST_ID,), **host_arg)
- ) as (rmock, port):
- rmock.assert_called_once_with('hostname')
- p = port['port']
- self.assertEqual('ACTIVE', p['status'])
- self.assertEqual('hostname', p[portbindings.HOST_ID])
- self.assertEqual(portbindings.VIF_TYPE_IVS,
- p[portbindings.VIF_TYPE])
-
- def test_dont_bind_non_ivs_port(self):
- host_arg = {portbindings.HOST_ID: 'hostname'}
- with contextlib.nested(
- mock.patch(SERVER_POOL + '.rest_get_switch',
- side_effect=servermanager.RemoteRestError(
- reason='No such switch', status=404)),
- self.port(arg_list=(portbindings.HOST_ID,), **host_arg)
- ) as (rmock, port):
- rmock.assert_called_once_with('hostname')
- p = port['port']
- self.assertNotEqual(portbindings.VIF_TYPE_IVS,
- p[portbindings.VIF_TYPE])
-
- def test_bind_port_cache(self):
- with contextlib.nested(
- self.subnet(),
- mock.patch(SERVER_POOL + '.rest_get_switch', return_value=True)
- ) as (sub, rmock):
- makeport = functools.partial(self.port, **{
- 'subnet': sub, 'arg_list': (portbindings.HOST_ID,),
- portbindings.HOST_ID: 'hostname'})
-
- with contextlib.nested(makeport(), makeport(),
- makeport()) as ports:
- # response from first should be cached
- rmock.assert_called_once_with('hostname')
- for port in ports:
- self.assertEqual(portbindings.VIF_TYPE_IVS,
- port['port'][portbindings.VIF_TYPE])
- rmock.reset_mock()
- # expired cache should result in new calls
- mock.patch(DRIVER_MOD + '.CACHE_VSWITCH_TIME', new=0).start()
- with contextlib.nested(makeport(), makeport(),
- makeport()) as ports:
- self.assertEqual(3, rmock.call_count)
- for port in ports:
- self.assertEqual(portbindings.VIF_TYPE_IVS,
- port['port'][portbindings.VIF_TYPE])
-
- def test_create404_triggers_background_sync(self):
- # allow the async background thread to run for this test
- self.spawn_p.stop()
- with contextlib.nested(
- mock.patch(SERVER_POOL + '.rest_create_port',
- side_effect=servermanager.RemoteRestError(
- reason=servermanager.NXNETWORK, status=404)),
- mock.patch(DRIVER + '._send_all_data'),
- self.port(**{'device_id': 'devid', 'binding:host_id': 'host',
- 'arg_list': ('binding:host_id',)})
- ) as (mock_http, mock_send_all, p):
- # wait for thread to finish
- mm = manager.NeutronManager.get_plugin().mechanism_manager
- bigdriver = mm.mech_drivers['bigswitch'].obj
- bigdriver.evpool.waitall()
- mock_send_all.assert_has_calls([
- mock.call(
- send_routers=False, send_ports=True,
- send_floating_ips=False,
- triggered_by_tenant=p['port']['tenant_id']
- )
- ])
- self.spawn_p.start()
-
- def test_udpate404_triggers_background_sync(self):
- with contextlib.nested(
- mock.patch(DRIVER + '.async_port_create',
- side_effect=servermanager.RemoteRestError(
- reason=servermanager.NXNETWORK, status=404)),
- mock.patch(DRIVER + '._send_all_data'),
- self.port()
- ) as (mock_update, mock_send_all, p):
- plugin = manager.NeutronManager.get_plugin()
- context = neutron_context.get_admin_context()
- plugin.update_port(context, p['port']['id'],
- {'port': {'device_id': 'devid',
- 'binding:host_id': 'host'}})
- mock_send_all.assert_has_calls([
- mock.call(
- send_routers=False, send_ports=True,
- send_floating_ips=False,
- triggered_by_tenant=p['port']['tenant_id']
- )
- ])
-
- def test_backend_request_contents(self):
- with contextlib.nested(
- mock.patch(SERVER_POOL + '.rest_create_port'),
- self.port(**{'device_id': 'devid', 'binding:host_id': 'host',
- 'arg_list': ('binding:host_id',)})
- ) as (mock_rest, p):
- # make sure basic expected keys are present in the port body
- pb = mock_rest.mock_calls[0][1][2]
- self.assertEqual('host', pb['binding:host_id'])
- self.assertIn('bound_segment', pb)
- self.assertIn('network', pb)
-
- def test_bind_external_port(self):
- ext_id = jsonutils.dumps({'type': 'vlan', 'chassis_id': 'FF',
- 'port_id': '1'})
- port_kwargs = {
- portbindings.HOST_ID: ext_id,
- 'device_owner': bsn_driver.EXTERNAL_PORT_OWNER
- }
- with contextlib.nested(
- mock.patch(SERVER_POOL + '.rest_create_port'),
- self.port(arg_list=(portbindings.HOST_ID,), **port_kwargs)
- ) as (rmock, port):
- create_body = rmock.mock_calls[-1][1][2]
- self.assertIsNotNone(create_body['bound_segment'])
- self.assertEqual(create_body[portbindings.HOST_ID], ext_id)
-
- def test_req_context_header_present(self):
- with contextlib.nested(
- mock.patch(SERVER_MANAGER + '.ServerProxy.rest_call'),
- self.port(**{'device_id': 'devid', 'binding:host_id': 'host'})
- ) as (mock_rest, p):
- headers = mock_rest.mock_calls[0][1][3]
- self.assertIn('X-REQ-CONTEXT', headers)