]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
LBaaS: make device driver decide whether to deploy instance
authorOleg Bondarev <obondarev@mirantis.com>
Mon, 24 Mar 2014 12:58:55 +0000 (16:58 +0400)
committerOleg Bondarev <obondarev@mirantis.com>
Mon, 24 Mar 2014 14:55:34 +0000 (18:55 +0400)
Currently server throws an error in case agent (device driver)
requests logical config for non-active pool, which is a bug
as it's ok if pool is in pending create/update states.
Also the pool may be already scheduled for delete (pending_delete)
while agent requests it to perform some previous update, which
as also ok and agent just doesn't deploy such config.

This patch moves active pool check from server side to agent side

Closes-Bug: #1295491
Change-Id: Ib088355a0b3efffdcd211a8cfe6942833bb9f895

neutron/services/loadbalancer/drivers/common/agent_driver_base.py
neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py
neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py

index aec7be0ef2c65dbf76532b4c7e9bcf3b2e47564c..99b2351caf90bf101e4511da8651b4dbe429aa83 100644 (file)
@@ -95,12 +95,6 @@ class LoadBalancerCallbacks(object):
             qry = context.session.query(loadbalancer_db.Pool)
             qry = qry.filter_by(id=pool_id)
             pool = qry.one()
-            if pool.status != constants.ACTIVE:
-                raise n_exc.Invalid(_('Pool_id %(pool_id)s status ACTIVE '
-                                      'is expected but status is %(status)s') %
-                                    {'pool_id': pool_id,
-                                     'status': pool.status})
-
             retval = {}
             retval['pool'] = self.plugin._make_pool_dict(pool)
 
index 6bb971be3e45478af51620285e083a9f0b538ea8..7307bca1e1dc78e4f074d70f735f3cae473ec05e 100644 (file)
@@ -265,11 +265,15 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
 
     @n_utils.synchronized('haproxy-driver')
     def deploy_instance(self, logical_config):
-        # do actual deploy only if vip is configured and active
-        if ('vip' not in logical_config or
-            (logical_config['vip']['status'] not in
-             constants.ACTIVE_PENDING_STATUSES) or
-            not logical_config['vip']['admin_state_up']):
+        # do actual deploy only if vip and pool are configured and active
+        if (not logical_config or
+                'vip' not in logical_config or
+                (logical_config['vip']['status'] not in
+                 constants.ACTIVE_PENDING_STATUSES) or
+                not logical_config['vip']['admin_state_up'] or
+                (logical_config['pool']['status'] not in
+                 constants.ACTIVE_PENDING_STATUSES) or
+                not logical_config['pool']['admin_state_up']):
             return
 
         if self.exists(logical_config['pool']['id']):
index b98cc29f208209481b1f0e27b9253104bc7789bf..504c592e997023ff8c6dc60483635d3579ce7ba2 100644 (file)
@@ -48,7 +48,8 @@ class TestHaproxyNSDriver(base.BaseTestCase):
         self.driver.vif_driver = self.vif_driver
 
         self.fake_config = {
-            'pool': {'id': 'pool_id'},
+            'pool': {'id': 'pool_id', 'status': 'ACTIVE',
+                     'admin_state_up': True},
             'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
                     'status': 'ACTIVE', 'admin_state_up': True}
         }
@@ -357,6 +358,18 @@ class TestHaproxyNSDriver(base.BaseTestCase):
             self.driver.deploy_instance(self.fake_config)
             self.assertFalse(exists.called)
 
+    def test_deploy_instance_pool_status_non_active(self):
+        with mock.patch.object(self.driver, 'exists') as exists:
+            self.fake_config['pool']['status'] = 'NON_ACTIVE'
+            self.driver.deploy_instance(self.fake_config)
+            self.assertFalse(exists.called)
+
+    def test_deploy_instance_pool_admin_state_down(self):
+        with mock.patch.object(self.driver, 'exists') as exists:
+            self.fake_config['pool']['admin_state_up'] = False
+            self.driver.deploy_instance(self.fake_config)
+            self.assertFalse(exists.called)
+
     def test_refresh_device(self):
         with mock.patch.object(self.driver, 'deploy_instance') as deploy:
             pool_id = 'pool_id1'
index c828bd6846c216c784f1f7e81b080ab7f2f38297..8efec3a79781bab77890c6ab05aa6b38ec404e42 100644 (file)
@@ -22,7 +22,6 @@ import mock
 from six.moves import xrange
 from webob import exc
 
-from neutron.common import exceptions
 from neutron import context
 from neutron.db.loadbalancer import loadbalancer_db as ldb
 from neutron.db import servicetype_db as st_db
@@ -180,15 +179,25 @@ class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
                 )
                 self.assertFalse(ready)
 
-    def test_get_logical_device_inactive(self):
+    def test_get_logical_device_non_active(self):
         with self.pool() as pool:
-            with self.vip(pool=pool) as vip:
-                with self.member(pool_id=vip['vip']['pool_id']):
-                    self.assertRaises(
-                        exceptions.Invalid,
-                        self.callbacks.get_logical_device,
-                        context.get_admin_context(),
-                        pool['pool']['id'])
+            ctx = context.get_admin_context()
+            for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
+                self.plugin_instance.update_status(
+                    ctx, ldb.Pool, pool['pool']['id'], status)
+                pool['pool']['status'] = status
+                expected = {
+                    'pool': pool['pool'],
+                    'members': [],
+                    'healthmonitors': [],
+                    'driver': 'dummy'
+                }
+
+                logical_config = self.callbacks.get_logical_device(
+                    ctx, pool['pool']['id']
+                )
+
+                self.assertEqual(expected, logical_config)
 
     def test_get_logical_device_active(self):
         with self.pool() as pool: