admin_password = %SERVICE_PASSWORD%
signing_dir = $state_path/keystone-signing
-[lbaas]
-# ==================================================================================================
-# driver_fqn is the fully qualified name of the lbaas driver that will be loaded by the lbass plugin
-# ==================================================================================================
-# driver_fqn = neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver
-
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import model_base
from neutron.db import models_v2
+from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.extensions.loadbalancer import LoadBalancerPluginBase
from neutron import manager
cascade="all, delete-orphan")
vip = orm.relationship(Vip, backref='pool')
+ provider = orm.relationship(
+ st_db.ProviderResourceAssociation,
+ uselist=False,
+ lazy="joined",
+ primaryjoin="Pool.id==ProviderResourceAssociation.resource_id",
+ foreign_keys=[st_db.ProviderResourceAssociation.resource_id]
+ )
+
class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron loadbalancer healthmonitor."""
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up'],
'status': pool['status'],
- 'status_description': pool['status_description']}
+ 'status_description': pool['status_description'],
+ 'provider': ''
+ }
+
+ if pool.provider:
+ res['provider'] = pool.provider.provider_name
# Get the associated members
res['members'] = [member['id'] for member in pool['members']]
# Get the associated health_monitors
res['health_monitors'] = [
monitor['monitor_id'] for monitor in pool['monitors']]
-
return self._fields(res, fields)
def update_pool_stats(self, context, pool_id, data=None):
pool_db.stats = self._create_pool_stats(context, pool_db['id'])
context.session.add(pool_db)
- pool_db = self._get_resource(context, Pool, pool_db['id'])
return self._make_pool_dict(pool_db)
def update_pool(self, context, id, pool):
p = pool['pool']
-
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, id)
self.assert_modification_allowed(pool_db)
def add_resource_association(self, context, service_type, provider_name,
resource_id):
r = self.conf.get_service_providers(
- filters={'service_type': service_type, 'name': provider_name})
+ filters={'service_type': [service_type], 'name': [provider_name]})
if not r:
- raise pconf.ServiceProviderNotFound(service_type=service_type)
+ raise pconf.ServiceProviderNotFound(provider=provider_name,
+ service_type=service_type)
with context.session.begin(subtransactions=True):
# we don't actually need service type for association.
assoc = ProviderResourceAssociation(provider_name=provider_name,
resource_id=resource_id)
context.session.add(assoc)
+
+ def del_resource_associations(self, context, resource_ids):
+ if not resource_ids:
+ return
+ with context.session.begin(subtransactions=True):
+ (context.session.query(ProviderResourceAssociation).
+ filter(
+ ProviderResourceAssociation.resource_id.in_(resource_ids)).
+ delete(synchronize_session='fetch'))
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
+ 'provider': {'allow_post': True, 'allow_put': False,
+ 'validate': {'type:string': None},
+ 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED},
'lb_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
def delete_member(self, context, member):
pass
- @abc.abstractmethod
- def create_health_monitor(self, context, health_monitor):
- pass
-
@abc.abstractmethod
def update_health_monitor(self, context,
old_health_monitor,
pool_id):
pass
- @abc.abstractmethod
- def delete_health_monitor(self, context, health_monitor):
- """Driver may call the code below in order to delete the monitor.
- self.plugin._delete_db_health_monitor(context, health_monitor["id"])
- """
- pass
-
@abc.abstractmethod
def create_pool_health_monitor(self, context,
health_monitor,
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.modify_pool(context, pool_id, agent['host'])
- def create_health_monitor(self, context, health_monitor):
- pass
-
- def delete_health_monitor(self, context, health_monitor):
- self.plugin._delete_db_health_monitor(context, health_monitor["id"])
-
def stats(self, context, pool_id):
pass
def delete_member(self, context, member):
self.plugin._delete_db_member(context, member["id"])
- @log.log
- def create_health_monitor(self, context, health_monitor):
- pass
-
@log.log
def update_health_monitor(self, context, old_health_monitor,
health_monitor,
pool_association):
pass
- @log.log
- def delete_health_monitor(self, context, health_monitor):
- self.plugin._delete_db_health_monitor(context, health_monitor["id"])
-
@log.log
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
#
# @author: Avishay Balderman, Radware
-from oslo.config import cfg
-
-from neutron.common import legacy
+from neutron.api.v2 import attributes as attrs
+from neutron.common import exceptions as n_exc
+from neutron import context
from neutron.db import api as qdbapi
-from neutron.db.loadbalancer import loadbalancer_db
-from neutron.openstack.common import importutils
+from neutron.db.loadbalancer import loadbalancer_db as ldb
+from neutron.db import servicetype_db as st_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
+from neutron.services import provider_configuration as pconf
+from neutron.services import service_base
LOG = logging.getLogger(__name__)
-DEFAULT_DRIVER = ("neutron.services.loadbalancer.drivers.haproxy"
- ".plugin_driver.HaproxyOnHostPluginDriver")
-
-lbaas_plugin_opts = [
- cfg.StrOpt('driver_fqn',
- default=DEFAULT_DRIVER,
- help=_('LBaaS driver Fully Qualified Name'))
-]
-
-cfg.CONF.register_opts(lbaas_plugin_opts, "LBAAS")
-legacy.override_config(cfg.CONF, [('LBAAS', 'driver_fqn')])
-
-class LoadBalancerPlugin(loadbalancer_db.LoadBalancerPluginDb,
+class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
-
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
- supported_extension_aliases = ["lbaas", "lbaas_agent_scheduler"]
+ supported_extension_aliases = ["lbaas",
+ "lbaas_agent_scheduler",
+ "service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
+ self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
- """Loads plugin-driver from configuration.
-
- That method will later leverage service type framework
+ """Loads plugin-drivers specified in configuration."""
+ self.drivers, self.default_provider = service_base.load_drivers(
+ constants.LOADBALANCER, self)
+
+ # we're at the point when extensions are not loaded yet
+ # so prevent policy from being loaded
+ ctx = context.get_admin_context(load_admin_roles=False)
+ # stop service in case provider was removed, but resources were not
+ self._check_orphan_pool_associations(ctx, self.drivers.keys())
+
+ def _check_orphan_pool_associations(self, context, provider_names):
+ """Checks remaining associations between pools and providers.
+
+ If admin has not undeployed resources with provider that was deleted
+ from configuration, neutron service is stopped. Admin must delete
+ resources prior to removing providers from configuration.
"""
+ pools = self.get_pools(context)
+ lost_providers = set([pool['provider'] for pool in pools
+ if pool['provider'] not in provider_names])
+ # resources are left without provider - stop the service
+ if lost_providers:
+ msg = _("Delete associated loadbalancer pools before "
+ "removing providers %s") % list(lost_providers)
+ LOG.exception(msg)
+ raise SystemExit(msg)
+
+ def _get_driver_for_provider(self, provider):
+ if provider in self.drivers:
+ return self.drivers[provider]
+ # raise if not associated (should never be reached)
+ raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
+ provider)
+
+ def _get_driver_for_pool(self, context, pool_id):
+ pool = self.get_pool(context, pool_id)
try:
- self.driver = importutils.import_object(
- cfg.CONF.LBAAS.driver_fqn, self
- )
- except ImportError:
- LOG.exception(_("Error loading LBaaS driver %s"),
- cfg.CONF.LBAAS.driver_fqn)
+ return self.drivers[pool['provider']]
+ except KeyError:
+ raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
+ pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
- self.driver.create_vip(context, v)
+ driver = self._get_driver_for_pool(context, v['pool_id'])
+ driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
- self.driver.update_vip(context, old_vip, v)
+ driver = self._get_driver_for_pool(context, v['pool_id'])
+ driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
- self.update_status(context, loadbalancer_db.Vip,
+ self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
- self.driver.delete_vip(context, v)
+ driver = self._get_driver_for_pool(context, v['pool_id'])
+ driver.delete_vip(context, v)
+
+ def _get_provider_name(self, context, pool):
+ if ('provider' in pool and
+ pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
+ provider_name = pconf.normalize_provider_name(pool['provider'])
+ self.validate_provider(provider_name)
+ return provider_name
+ else:
+ if not self.default_provider:
+ raise pconf.DefaultServiceProviderNotFound(
+ service_type=constants.LOADBALANCER)
+ return self.default_provider
def create_pool(self, context, pool):
+ provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
- self.driver.create_pool(context, p)
+
+ self.service_type_manager.add_resource_association(
+ context,
+ constants.LOADBALANCER,
+ provider_name, p['id'])
+ #need to add provider name to pool dict,
+ #because provider was not known to db plugin at pool creation
+ p['provider'] = provider_name
+ driver = self.drivers[provider_name]
+ driver.create_pool(context, p)
return p
def update_pool(self, context, id, pool):
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
- self.driver.update_pool(context, old_pool, p)
+ driver = self._get_driver_for_provider(p['provider'])
+ driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
- super(LoadBalancerPlugin, self).delete_pool(context, id)
+ # rely on uuid uniqueness:
+ with context.session.begin(subtransactions=True):
+ self.service_type_manager.del_resource_associations(context, [id])
+ super(LoadBalancerPlugin, self).delete_pool(context, id)
def delete_pool(self, context, id):
- self.update_status(context, loadbalancer_db.Pool,
+ self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
p = self.get_pool(context, id)
- self.driver.delete_pool(context, p)
+ driver = self._get_driver_for_provider(p['provider'])
+ driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
- self.driver.create_member(context, m)
+ driver = self._get_driver_for_pool(context, m['pool_id'])
+ driver.create_member(context, m)
return m
def update_member(self, context, id, member):
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
- self.driver.update_member(context, old_member, m)
+ driver = self._get_driver_for_pool(context, m['pool_id'])
+ driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
- self.update_status(context, loadbalancer_db.Member,
+ self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
- self.driver.delete_member(context, m)
+ driver = self._get_driver_for_pool(context, m['pool_id'])
+ driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
- self.driver.create_health_monitor(context, hm)
return hm
def update_health_monitor(self, context, id, health_monitor):
with context.session.begin(subtransactions=True):
qry = context.session.query(
- loadbalancer_db.PoolMonitorAssociation
- ).filter_by(monitor_id=hm['id'])
+ ldb.PoolMonitorAssociation
+ ).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
- self.driver.update_health_monitor(context, old_hm,
- hm, assoc['pool_id'])
+ driver = self._get_driver_for_pool(context, assoc['pool_id'])
+ driver.update_health_monitor(context, old_hm,
+ hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
with context.session.begin(subtransactions=True):
hm = self.get_health_monitor(context, id)
qry = context.session.query(
- loadbalancer_db.PoolMonitorAssociation
- ).filter_by(monitor_id=id)
+ ldb.PoolMonitorAssociation
+ ).filter_by(monitor_id=id).join(ldb.Pool)
for assoc in qry:
- self.driver.delete_pool_health_monitor(context,
- hm,
- assoc['pool_id'])
- self.driver.delete_health_monitor(context, hm)
+ driver = self._get_driver_for_pool(context, assoc['pool_id'])
+ driver.delete_pool_health_monitor(context,
+ hm,
+ assoc['pool_id'])
+ super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
- self.driver.create_pool_health_monitor(
- context, hm, pool_id)
+ driver = self._get_driver_for_pool(context, pool_id)
+ driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
- self.driver.delete_pool_health_monitor(
- context, hm, pool_id)
+ driver = self._get_driver_for_pool(context, pool_id)
+ driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
- stats_data = self.driver.stats(context, pool_id)
+ driver = self._get_driver_for_pool(context, pool_id)
+ stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
- vip['members'] = [
- self.get_member(context, member_id)
- for member_id in pool['members']]
- vip['health_monitors'] = [
- self.get_health_monitor(context, hm_id)
- for hm_id in pool['health_monitors']]
+ vip['members'] = [self.get_member(context, member_id)
+ for member_id in pool['members']]
+ vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
+ for hm_id in pool['health_monitors']]
return vip
+
+ def validate_provider(self, provider):
+ if provider not in self.drivers:
+ raise pconf.ServiceProviderNotFound(
+ provider=provider, service_type=constants.LOADBALANCER)
"""Parse service definition opts and returns result."""
def validate_name(name):
if len(name) > 255:
- raise n_exc.Invalid("Provider name is limited by 255 characters:"
- " %s" % name)
+ raise n_exc.Invalid(
+ _("Provider name is limited by 255 characters: %s") % name)
svc_providers_opt = cfg.CONF.service_providers.service_provider
res = []
return res
-class ServiceProviderNotFound(n_exc.NotFound):
- message = _("Service provider could not be found "
+class ServiceProviderNotFound(n_exc.InvalidInput):
+ message = _("Service provider '%(provider)s' could not be found "
"for service type %(service_type)s")
-class DefaultServiceProviderNotFound(ServiceProviderNotFound):
+class DefaultServiceProviderNotFound(n_exc.InvalidInput):
message = _("Service type %(service_type)s does not have a default "
"service provider")
+class ServiceProviderAlreadyAssociated(n_exc.Conflict):
+ message = _("Resource '%(resource_id)s' is already associated with "
+ "provider '%(provider)s' for service type '%(service_type)s'")
+
+
class ProviderConfiguration(object):
def __init__(self, prov_data):
self.providers = {}
import abc
from neutron.api import extensions
+from neutron.db import servicetype_db as sdb
+from neutron.openstack.common import importutils
+from neutron.openstack.common import log as logging
+from neutron.services import provider_configuration as pconf
+
+LOG = logging.getLogger(__name__)
class ServicePluginBase(extensions.PluginInterface):
def get_plugin_description(self):
"""Return string description of the plugin."""
pass
+
+
+def load_drivers(service_type, plugin):
+ """Loads drivers for specific service.
+
+ Passes plugin instance to driver's constructor
+ """
+ service_type_manager = sdb.ServiceTypeManager.get_instance()
+ providers = (service_type_manager.
+ get_service_providers(
+ None,
+ filters={'service_type': [service_type]})
+ )
+ if not providers:
+ msg = (_("No providers specified for '%s' service, exiting") %
+ service_type)
+ LOG.error(msg)
+ raise SystemExit(msg)
+
+ drivers = {}
+ for provider in providers:
+ try:
+ drivers[provider['name']] = importutils.import_object(
+ provider['driver'], plugin
+ )
+ LOG.debug(_("Loaded '%(provider)s' provider for service "
+ "%(service_type)s"),
+ {'provider': provider['driver'],
+ 'service_type': service_type})
+ except ImportError:
+ LOG.exception(_("Error loading provider '%(provider)s' for "
+ "service %(service_type)s"),
+ {'provider': provider['driver'],
+ 'service_type': service_type})
+ raise
+
+ default_provider = None
+ try:
+ provider = service_type_manager.get_default_service_provider(
+ None, service_type)
+ default_provider = provider['name']
+ except pconf.DefaultServiceProviderNotFound:
+ LOG.info(_("Default provider is not specified for service type %s"),
+ service_type)
+
+ return drivers, default_provider
from neutron import context
import neutron.db.l3_db # noqa
from neutron.db.loadbalancer import loadbalancer_db as ldb
+from neutron.db import servicetype_db as sdb
import neutron.extensions
from neutron.extensions import loadbalancer
from neutron.plugins.common import constants
from neutron.services.loadbalancer import (
plugin as loadbalancer_plugin
)
+from neutron.services import provider_configuration as pconf
from neutron.tests.unit import test_db_plugin
'protocol': protocol,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
- for arg in ('description'):
+ for arg in ('description', 'provider'):
if arg in kwargs and kwargs[arg] is not None:
data['pool'][arg] = kwargs[arg]
-
pool_req = self.new_create_request('pools', data, fmt)
pool_res = pool_req.get_response(self.ext_api)
if expected_res_status:
class LoadBalancerPluginDbTestCase(LoadBalancerTestMixin,
test_db_plugin.NeutronDbPluginV2TestCase):
- def setUp(self, core_plugin=None, lb_plugin=None):
+ def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None):
service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS}
+ if not lbaas_provider:
+ lbaas_provider = (
+ constants.LOADBALANCER +
+ ':lbaas:neutron.services.loadbalancer.'
+ 'drivers.noop.noop_driver.NoopLbaaSDriver:default')
+ cfg.CONF.set_override('service_provider',
+ [lbaas_provider],
+ 'service_providers')
+ #force service type manager to reload configuration:
+ sdb.ServiceTypeManager._instance = None
+
super(LoadBalancerPluginDbTestCase, self).setUp(
service_plugins=service_plugins
)
get_lbaas_agent_patcher.start().return_value = mock_lbaas_agent
mock_lbaas_agent.__getitem__.return_value = {'host': 'host'}
self.addCleanup(mock.patch.stopall)
+ self.addCleanup(cfg.CONF.reset)
ext_mgr = PluginAwareExtensionManager(
extensions_path,
class TestLoadBalancer(LoadBalancerPluginDbTestCase):
def setUp(self):
- cfg.CONF.set_override('driver_fqn',
- 'neutron.services.loadbalancer.drivers.noop'
- '.noop_driver.NoopLbaaSDriver',
- group='LBAAS')
self.addCleanup(cfg.CONF.reset)
super(TestLoadBalancer, self).setUp()
pool = self.pool(name=name, lb_method='UNSUPPORTED')
self.assertRaises(webob.exc.HTTPClientError, pool.__enter__)
+ def _create_pool_directly_via_plugin(self, provider_name):
+ #default provider will be haproxy
+ prov1 = (constants.LOADBALANCER +
+ ':lbaas:neutron.services.loadbalancer.'
+ 'drivers.noop.noop_driver.NoopLbaaSDriver')
+ prov2 = (constants.LOADBALANCER +
+ ':haproxy:neutron.services.loadbalancer.'
+ 'drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver'
+ ':default')
+ cfg.CONF.set_override('service_provider',
+ [prov1, prov2],
+ 'service_providers')
+ sdb.ServiceTypeManager._instance = None
+ self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
+ with self.subnet() as subnet:
+ ctx = context.get_admin_context()
+ #create pool with another provider - lbaas
+ #which is noop driver
+ pool = {'name': 'pool1',
+ 'subnet_id': subnet['subnet']['id'],
+ 'lb_method': 'ROUND_ROBIN',
+ 'protocol': 'HTTP',
+ 'admin_state_up': True,
+ 'tenant_id': self._tenant_id,
+ 'provider': provider_name,
+ 'description': ''}
+ self.plugin.create_pool(ctx, {'pool': pool})
+ assoc = ctx.session.query(sdb.ProviderResourceAssociation).one()
+ self.assertEqual(assoc.provider_name,
+ pconf.normalize_provider_name(provider_name))
+
+ def test_create_pool_another_provider(self):
+ self._create_pool_directly_via_plugin('lbaas')
+
+ def test_create_pool_unnormalized_provider_name(self):
+ self._create_pool_directly_via_plugin('LBAAS')
+
+ def test_create_pool_unexisting_provider(self):
+ self.assertRaises(
+ pconf.ServiceProviderNotFound,
+ self._create_pool_directly_via_plugin, 'unexisting')
+
def test_create_pool(self):
name = "pool1"
keys = [('name', name),
res)
def test_driver_call_create_pool_health_monitor(self):
- with mock.patch.object(self.plugin.driver,
+ with mock.patch.object(self.plugin.drivers['lbaas'],
'create_pool_health_monitor') as driver_call:
with contextlib.nested(
self.pool(),
self.assertEqual(assoc['status'], 'ACTIVE')
self.assertEqual(assoc['status_description'], 'ok')
+ def test_check_orphan_pool_associations(self):
+ with contextlib.nested(
+ #creating pools with default noop driver
+ self.pool(),
+ self.pool()
+ ) as (p1, p2):
+ #checking that 3 associations exist
+ ctx = context.get_admin_context()
+ qry = ctx.session.query(sdb.ProviderResourceAssociation)
+ self.assertEqual(qry.count(), 2)
+ #removing driver
+ cfg.CONF.set_override('service_provider',
+ [constants.LOADBALANCER +
+ ':lbaas1:neutron.services.loadbalancer.'
+ 'drivers.noop.noop_driver.'
+ 'NoopLbaaSDriver:default'],
+ 'service_providers')
+ sdb.ServiceTypeManager._instance = None
+ # calling _remove_orphan... in constructor
+ self.assertRaises(
+ SystemExit,
+ loadbalancer_plugin.LoadBalancerPlugin
+ )
+
class TestLoadBalancerXML(TestLoadBalancer):
fmt = 'xml'
from neutron.common import exceptions
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
+from neutron.db import servicetype_db as st_db
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
- super(TestLoadBalancerPluginBase, self).setUp()
-
+ # needed to reload provider configuration
+ st_db.ServiceTypeManager._instance = None
+ super(TestLoadBalancerPluginBase, self).setUp(
+ lbaas_provider=('LOADBALANCER:lbaas:neutron.services.'
+ 'loadbalancer.drivers.haproxy.plugin_driver.'
+ 'HaproxyOnHostPluginDriver:default'))
# create another API instance to make testing easier
# pass a mock to our API instance
'.plugin_driver.HaproxyOnHostPluginDriver'
'.create_pool').start()
+ self.mock_get_driver = mock.patch.object(self.plugin_instance,
+ '_get_driver')
+ self.mock_get_driver.return_value = (plugin_driver.
+ HaproxyOnHostPluginDriver(
+ self.plugin_instance
+ ))
+
self.addCleanup(mock.patch.stopall)
def test_create_vip(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
+ del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.destroy_pool.assert_called_once_with(
mock.ANY, pool['pool']['id'], 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
+ del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.assertFalse(self.mock_api.destroy_pool.called)
self.assertFalse(self.mock_api.reload_pool.called)
with self.pool() as pool:
with self.vip(pool=pool):
ctx = context.get_admin_context()
+ del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.reload_pool.assert_called_once_with(
mock.ANY, pool['pool']['id'], 'host')
# limitations under the License.
import mock
+from oslo.config import cfg
from webob import exc
from neutron.api import extensions
self.saved_attr_map[resource] = attrs.copy()
service_plugins = {
'lb_plugin_name': test_db_loadbalancer.DB_LB_PLUGIN_KLASS}
+
+ #default provider should support agent scheduling
+ cfg.CONF.set_override(
+ 'service_provider',
+ [('LOADBALANCER:lbaas:neutron.services.'
+ 'loadbalancer.drivers.haproxy.plugin_driver.'
+ 'HaproxyOnHostPluginDriver:default')],
+ 'service_providers')
+
super(LBaaSAgentSchedulerTestCase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.assertRaises(lbaas_agentscheduler.NoEligibleLbaasAgent,
lbaas_plugin.create_pool, self.adminContext, pool)
- def test_schedule_poll_with_down_agent(self):
+ def test_schedule_pool_with_down_agent(self):
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'subnet_id': 'test',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
+ 'provider': 'lbaas',
'admin_state_up': True,
'tenant_id': 'test',
'description': 'test'}}
import webtest
from neutron.api import extensions
-from neutron.api.v2 import attributes
+from neutron.api.v2 import attributes as attr
from neutron.common import config
from neutron.extensions import loadbalancer
from neutron import manager
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
- attributes.RESOURCE_ATTRIBUTE_MAP.update(
+ attr.RESOURCE_ATTRIBUTE_MAP.update(
loadbalancer.RESOURCE_ATTRIBUTE_MAP)
return loadbalancer.Loadbalancer.get_resources()
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['pool'])
+ return_value['provider'] = 'lbaas'
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
+ data['pool']['provider'] = attr.ATTR_NOT_SPECIFIED
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)