Currently server throws an error in case agent (device driver)
requests logical config for non-active pool, which is a bug
as it's ok if pool is in pending create/update states.
Also the pool may be already scheduled for delete (pending_delete)
while agent requests it to perform some previous update, which
as also ok and agent just doesn't deploy such config.
This patch moves active pool check from server side to agent side
Closes-Bug: #
1295491
Change-Id: Ib088355a0b3efffdcd211a8cfe6942833bb9f895
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
- if pool.status != constants.ACTIVE:
- raise n_exc.Invalid(_('Pool_id %(pool_id)s status ACTIVE '
- 'is expected but status is %(status)s') %
- {'pool_id': pool_id,
- 'status': pool.status})
-
retval = {}
retval['pool'] = self.plugin._make_pool_dict(pool)
@n_utils.synchronized('haproxy-driver')
def deploy_instance(self, logical_config):
- # do actual deploy only if vip is configured and active
- if ('vip' not in logical_config or
- (logical_config['vip']['status'] not in
- constants.ACTIVE_PENDING_STATUSES) or
- not logical_config['vip']['admin_state_up']):
+ # do actual deploy only if vip and pool are configured and active
+ if (not logical_config or
+ 'vip' not in logical_config or
+ (logical_config['vip']['status'] not in
+ constants.ACTIVE_PENDING_STATUSES) or
+ not logical_config['vip']['admin_state_up'] or
+ (logical_config['pool']['status'] not in
+ constants.ACTIVE_PENDING_STATUSES) or
+ not logical_config['pool']['admin_state_up']):
return
if self.exists(logical_config['pool']['id']):
self.driver.vif_driver = self.vif_driver
self.fake_config = {
- 'pool': {'id': 'pool_id'},
+ 'pool': {'id': 'pool_id', 'status': 'ACTIVE',
+ 'admin_state_up': True},
'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
'status': 'ACTIVE', 'admin_state_up': True}
}
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
+ def test_deploy_instance_pool_status_non_active(self):
+ with mock.patch.object(self.driver, 'exists') as exists:
+ self.fake_config['pool']['status'] = 'NON_ACTIVE'
+ self.driver.deploy_instance(self.fake_config)
+ self.assertFalse(exists.called)
+
+ def test_deploy_instance_pool_admin_state_down(self):
+ with mock.patch.object(self.driver, 'exists') as exists:
+ self.fake_config['pool']['admin_state_up'] = False
+ self.driver.deploy_instance(self.fake_config)
+ self.assertFalse(exists.called)
+
def test_refresh_device(self):
with mock.patch.object(self.driver, 'deploy_instance') as deploy:
pool_id = 'pool_id1'
from six.moves import xrange
from webob import exc
-from neutron.common import exceptions
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
)
self.assertFalse(ready)
- def test_get_logical_device_inactive(self):
+ def test_get_logical_device_non_active(self):
with self.pool() as pool:
- with self.vip(pool=pool) as vip:
- with self.member(pool_id=vip['vip']['pool_id']):
- self.assertRaises(
- exceptions.Invalid,
- self.callbacks.get_logical_device,
- context.get_admin_context(),
- pool['pool']['id'])
+ ctx = context.get_admin_context()
+ for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
+ self.plugin_instance.update_status(
+ ctx, ldb.Pool, pool['pool']['id'], status)
+ pool['pool']['status'] = status
+ expected = {
+ 'pool': pool['pool'],
+ 'members': [],
+ 'healthmonitors': [],
+ 'driver': 'dummy'
+ }
+
+ logical_config = self.callbacks.get_logical_device(
+ ctx, pool['pool']['id']
+ )
+
+ self.assertEqual(expected, logical_config)
def test_get_logical_device_active(self):
with self.pool() as pool: