From e28c49670b6e5cfb86e54175bcd8d9067cb4f80e Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Thu, 12 Dec 2013 12:13:22 +0400 Subject: [PATCH] LBaaS: fix handling pending create/update members and health monitors When agent requests loadbalancer logical config from server, server returns only active pool members and health_monitors. Need to make server return also members and monitors which are in pending states. Also a small refactoring moving ACTIVE_PENDING set to common place Change-Id: I8e10004f199f982b055da18ea7a0e5e4d11fa7fb Closes-Bug: #1259965 --- neutron/plugins/common/constants.py | 6 +++ .../loadbalancer/drivers/haproxy/cfg.py | 6 ++- .../drivers/haproxy/namespace_driver.py | 8 +--- .../drivers/haproxy/plugin_driver.py | 28 +++++------ .../loadbalancer/drivers/haproxy/test_cfg.py | 10 +++- .../drivers/haproxy/test_plugin_driver.py | 48 +++++++++++++++++++ 6 files changed, 80 insertions(+), 26 deletions(-) diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py index 4f49f67fd..d2af7e473 100644 --- a/neutron/plugins/common/constants.py +++ b/neutron/plugins/common/constants.py @@ -58,6 +58,12 @@ PENDING_DELETE = "PENDING_DELETE" INACTIVE = "INACTIVE" ERROR = "ERROR" +ACTIVE_PENDING = ( + ACTIVE, + PENDING_CREATE, + PENDING_UPDATE +) + # FWaaS firewall rule action FWAAS_ALLOW = "allow" FWAAS_DENY = "deny" diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py index 776b4b150..74447c6b5 100644 --- a/neutron/services/loadbalancer/drivers/haproxy/cfg.py +++ b/neutron/services/loadbalancer/drivers/haproxy/cfg.py @@ -47,7 +47,7 @@ STATS_MAP = { constants.STATS_RESPONSE_ERRORS: 'eresp' } -ACTIVE = qconstants.ACTIVE +ACTIVE_PENDING = qconstants.ACTIVE_PENDING INACTIVE = qconstants.INACTIVE @@ -138,7 +138,9 @@ def _build_backend(config): # add the members for member in config['members']: - if member['status'] in (ACTIVE, INACTIVE) and member['admin_state_up']: + if ((member['status'] in ACTIVE_PENDING or + member['status'] == INACTIVE) + and member['admin_state_up']): server = (('server %(id)s %(address)s:%(protocol_port)s ' 'weight %(weight)s') % member) + server_addon if _has_http_cookie_persistence(config): diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py index b4b56a3ae..c3aa7adfe 100644 --- a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py @@ -38,12 +38,6 @@ LOG = logging.getLogger(__name__) NS_PREFIX = 'qlbaas-' DRIVER_NAME = 'haproxy_ns' -ACTIVE_PENDING = ( - constants.ACTIVE, - constants.PENDING_CREATE, - constants.PENDING_UPDATE -) - STATE_PATH_DEFAULT = '$state_path/lbaas' USER_GROUP_DEFAULT = 'nogroup' OPTS = [ @@ -272,7 +266,7 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): def deploy_instance(self, logical_config): # do actual deploy only if vip is configured and active if ('vip' not in logical_config or - logical_config['vip']['status'] not in ACTIVE_PENDING or + logical_config['vip']['status'] not in constants.ACTIVE_PENDING or not logical_config['vip']['admin_state_up']): return diff --git a/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py b/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py index ad42b0c98..a269af306 100644 --- a/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py +++ b/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py @@ -36,12 +36,6 @@ from neutron.services.loadbalancer.drivers import abstract_driver LOG = logging.getLogger(__name__) -ACTIVE_PENDING = ( - constants.ACTIVE, - constants.PENDING_CREATE, - constants.PENDING_UPDATE -) - AGENT_SCHEDULER_OPTS = [ cfg.StrOpt('loadbalancer_pool_scheduler_driver', default='neutron.services.loadbalancer.agent_scheduler' @@ -92,7 +86,8 @@ class LoadBalancerCallbacks(object): qry = context.session.query(loadbalancer_db.Pool.id) qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids)) - qry = qry.filter(loadbalancer_db.Pool.status.in_(ACTIVE_PENDING)) + qry = qry.filter( + loadbalancer_db.Pool.status.in_(constants.ACTIVE_PENDING)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up) return [id for id, in qry] @@ -123,13 +118,14 @@ class LoadBalancerCallbacks(object): ) retval['members'] = [ self.plugin._make_member_dict(m) - for m in pool.members if m.status in (constants.ACTIVE, - constants.INACTIVE) + for m in pool.members if ( + m.status in constants.ACTIVE_PENDING or + m.status == constants.INACTIVE) ] retval['healthmonitors'] = [ self.plugin._make_health_monitor_dict(hm.healthmonitor) for hm in pool.monitors - if hm.status == constants.ACTIVE + if hm.status in constants.ACTIVE_PENDING ] retval['driver'] = ( self.plugin.drivers[pool.provider.provider_name].device_driver) @@ -143,18 +139,18 @@ class LoadBalancerCallbacks(object): pool = qry.one() # set all resources to active - if pool.status in ACTIVE_PENDING: + if pool.status in constants.ACTIVE_PENDING: pool.status = constants.ACTIVE - if pool.vip and pool.vip.status in ACTIVE_PENDING: + if pool.vip and pool.vip.status in constants.ACTIVE_PENDING: pool.vip.status = constants.ACTIVE for m in pool.members: - if m.status in ACTIVE_PENDING: + if m.status in constants.ACTIVE_PENDING: m.status = constants.ACTIVE for hm in pool.monitors: - if hm.status in ACTIVE_PENDING: + if hm.status in constants.ACTIVE_PENDING: hm.status = constants.ACTIVE def update_status(self, context, obj_type, obj_id, status): @@ -365,7 +361,7 @@ class AgentBasedPluginDriver(abstract_driver.LoadBalancerAbstractDriver): def update_vip(self, context, old_vip, vip): agent = self.get_pool_agent(context, vip['pool_id']) - if vip['status'] in ACTIVE_PENDING: + if vip['status'] in constants.ACTIVE_PENDING: self.agent_rpc.update_vip(context, old_vip, vip, agent['host']) else: self.agent_rpc.delete_vip(context, vip, agent['host']) @@ -385,7 +381,7 @@ class AgentBasedPluginDriver(abstract_driver.LoadBalancerAbstractDriver): def update_pool(self, context, old_pool, pool): agent = self.get_pool_agent(context, pool['id']) - if pool['status'] in ACTIVE_PENDING: + if pool['status'] in constants.ACTIVE_PENDING: self.agent_rpc.update_pool(context, old_pool, pool, agent['host']) else: diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py index 6b40393fd..7c937c982 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py @@ -110,6 +110,12 @@ class TestHaproxyCfg(base.BaseTestCase): 'id': 'member2_id', 'address': '10.0.0.4', 'protocol_port': 80, + 'weight': 1}, + {'status': 'PENDING_CREATE', + 'admin_state_up': True, + 'id': 'member3_id', + 'address': '10.0.0.5', + 'protocol_port': 80, 'weight': 1}], 'healthmonitors': [{'admin_state_up': True, 'delay': 3, @@ -126,7 +132,9 @@ class TestHaproxyCfg(base.BaseTestCase): '\tserver member1_id 10.0.0.3:80 weight 1 ' 'check inter 3s fall 4 cookie 0', '\tserver member2_id 10.0.0.4:80 weight 1 ' - 'check inter 3s fall 4 cookie 1'] + 'check inter 3s fall 4 cookie 1', + '\tserver member3_id 10.0.0.5:80 weight 1 ' + 'check inter 3s fall 4 cookie 2'] opts = cfg._build_backend(test_config) self.assertEqual(expected_opts, list(opts)) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_plugin_driver.py index 031888e09..821c8cc6d 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_plugin_driver.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_plugin_driver.py @@ -261,6 +261,54 @@ class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase): self.assertEqual([member['member']], logical_config['members']) + def test_get_logical_device_pending_create_member(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']) as member: + ctx = context.get_admin_context() + self.plugin_instance.update_status(ctx, ldb.Pool, + pool['pool']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Vip, + vip['vip']['id'], + 'ACTIVE') + + member = self.plugin_instance.get_member( + ctx, member['member']['id']) + self.assertEqual('PENDING_CREATE', + member['status']) + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id']) + + self.assertEqual([member], logical_config['members']) + + def test_get_logical_device_pending_create_health_monitor(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.health_monitor() as monitor: + ctx = context.get_admin_context() + self.plugin_instance.update_status(ctx, ldb.Pool, + pool['pool']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Vip, + vip['vip']['id'], + 'ACTIVE') + self.plugin_instance.create_pool_health_monitor( + ctx, monitor, pool['pool']['id']) + pool = self.plugin_instance.get_pool( + ctx, pool['pool']['id']) + monitor = self.plugin_instance.get_health_monitor( + ctx, monitor['health_monitor']['id']) + + self.assertEqual( + 'PENDING_CREATE', + pool['health_monitors_status'][0]['status']) + logical_config = self.callbacks.get_logical_device( + ctx, pool['id']) + + self.assertEqual([monitor], + logical_config['healthmonitors']) + def _update_port_test_helper(self, expected, func, **kwargs): core = self.plugin_instance._core_plugin -- 2.45.2