From: Oleg Bondarev Date: Mon, 24 Mar 2014 12:58:55 +0000 (+0400) Subject: LBaaS: make device driver decide whether to deploy instance X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=2ecd8203f006408d2b761abd344d96fad3fd311e;p=openstack-build%2Fneutron-build.git LBaaS: make device driver decide whether to deploy instance Currently server throws an error in case agent (device driver) requests logical config for non-active pool, which is a bug as it's ok if pool is in pending create/update states. Also the pool may be already scheduled for delete (pending_delete) while agent requests it to perform some previous update, which as also ok and agent just doesn't deploy such config. This patch moves active pool check from server side to agent side Closes-Bug: #1295491 Change-Id: Ib088355a0b3efffdcd211a8cfe6942833bb9f895 --- diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py index aec7be0ef..99b2351ca 100644 --- a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +++ b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py @@ -95,12 +95,6 @@ class LoadBalancerCallbacks(object): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() - if pool.status != constants.ACTIVE: - raise n_exc.Invalid(_('Pool_id %(pool_id)s status ACTIVE ' - 'is expected but status is %(status)s') % - {'pool_id': pool_id, - 'status': pool.status}) - retval = {} retval['pool'] = self.plugin._make_pool_dict(pool) diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py index 6bb971be3..7307bca1e 100644 --- a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py @@ -265,11 +265,15 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): @n_utils.synchronized('haproxy-driver') def deploy_instance(self, logical_config): - # do actual deploy only if vip is configured and active - if ('vip' not in logical_config or - (logical_config['vip']['status'] not in - constants.ACTIVE_PENDING_STATUSES) or - not logical_config['vip']['admin_state_up']): + # do actual deploy only if vip and pool are configured and active + if (not logical_config or + 'vip' not in logical_config or + (logical_config['vip']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['vip']['admin_state_up'] or + (logical_config['pool']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['pool']['admin_state_up']): return if self.exists(logical_config['pool']['id']): diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py index b98cc29f2..504c592e9 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py @@ -48,7 +48,8 @@ class TestHaproxyNSDriver(base.BaseTestCase): self.driver.vif_driver = self.vif_driver self.fake_config = { - 'pool': {'id': 'pool_id'}, + 'pool': {'id': 'pool_id', 'status': 'ACTIVE', + 'admin_state_up': True}, 'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}, 'status': 'ACTIVE', 'admin_state_up': True} } @@ -357,6 +358,18 @@ class TestHaproxyNSDriver(base.BaseTestCase): self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) + def test_deploy_instance_pool_status_non_active(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['status'] = 'NON_ACTIVE' + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_pool_admin_state_down(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['admin_state_up'] = False + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + def test_refresh_device(self): with mock.patch.object(self.driver, 'deploy_instance') as deploy: pool_id = 'pool_id1' diff --git a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py index c828bd684..8efec3a79 100644 --- a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py +++ b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py @@ -22,7 +22,6 @@ import mock from six.moves import xrange from webob import exc -from neutron.common import exceptions from neutron import context from neutron.db.loadbalancer import loadbalancer_db as ldb from neutron.db import servicetype_db as st_db @@ -180,15 +179,25 @@ class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase): ) self.assertFalse(ready) - def test_get_logical_device_inactive(self): + def test_get_logical_device_non_active(self): with self.pool() as pool: - with self.vip(pool=pool) as vip: - with self.member(pool_id=vip['vip']['pool_id']): - self.assertRaises( - exceptions.Invalid, - self.callbacks.get_logical_device, - context.get_admin_context(), - pool['pool']['id']) + ctx = context.get_admin_context() + for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'): + self.plugin_instance.update_status( + ctx, ldb.Pool, pool['pool']['id'], status) + pool['pool']['status'] = status + expected = { + 'pool': pool['pool'], + 'members': [], + 'healthmonitors': [], + 'driver': 'dummy' + } + + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id'] + ) + + self.assertEqual(expected, logical_config) def test_get_logical_device_active(self): with self.pool() as pool: