]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Adding static routes data for members
authorEvgeny Fedoruk <evgenyf@radware.com>
Sun, 25 May 2014 13:52:20 +0000 (06:52 -0700)
committerEvgeny Fedoruk <evgenyf@radware.com>
Sun, 15 Jun 2014 07:54:06 +0000 (00:54 -0700)
Adding extra data on member entities for routing.
Subnet, mask and gateway data is attached to each member.
Default values are:
 subnet=255.255.255.255
 mask=255.255.255.255
 GW=255.255.255.255

Only in case when LB is connected to both, pool sub-net and vip sub-net
and member is not on pool's sub-net, routing info will be determined
instead of setting to default values:

1. In case when member's port is found, member's subnet and mask
   data will be set to those of the port, gateway will be set to pool's
   gateway address

2. In case when no port found or more than one port found for member's IP address,
   member's subnet will be set to member's IP address value , gateway will be set
   to pool's gateway value, and mask will be set to default 255.255.255.255

New unit tests were added for testing following scenarios:
1. Adding 3 members on 3 different sub-nets, vip's, pool's and other sub-net
2. Adding member on other than pool's sub-net with no port
3. Adding member on other than pool's sub-net with multiple ports with same IP address
   but on different sub-nets

Internal vip graph building function was added (_populate_vip_graph)
for more convenient code re-use and better structure

Change-Id: I6d592ffe8ee3f82f56a4bfbe1d420541871dad2d
Closes-Bug: 1291417

neutron/services/loadbalancer/drivers/radware/driver.py
neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py

index 4490e919086d5c67de8e9fc09f11281116769083..7f198a5645bf0622598e03e04488bc48aaf21d2b 100644 (file)
@@ -19,6 +19,7 @@
 import base64
 import copy
 import httplib
+import netaddr
 import threading
 import time
 
@@ -189,54 +190,56 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
         self.completion_handler.setDaemon(True)
         self.completion_handler_started = False
 
+    def _populate_vip_graph(self, context, vip):
+        ext_vip = self.plugin.populate_vip_graph(context, vip)
+        vip_network_id = self._get_vip_network_id(context, ext_vip)
+        pool_network_id = self._get_pool_network_id(context, ext_vip)
+
+        # if VIP and PIP are different, we need an IP address for the PIP
+        # so create port on PIP's network and use its IP address
+        if vip_network_id != pool_network_id:
+            pip_address = self._create_port_for_pip(
+                context,
+                vip['tenant_id'],
+                _make_pip_name_from_vip(vip),
+                pool_network_id)
+            ext_vip['pip_address'] = pip_address
+        else:
+            ext_vip['pip_address'] = vip['address']
+
+        ext_vip['vip_network_id'] = vip_network_id
+        ext_vip['pool_network_id'] = pool_network_id
+        return ext_vip
+
     def create_vip(self, context, vip):
         log_info = {'vip': vip,
                     'extended_vip': 'NOT_ASSIGNED',
-                    'vip_network_id': 'NOT_ASSIGNED',
-                    'service_name': 'NOT_ASSIGNED',
-                    'pip_info': 'NOT_ASSIGNED'}
+                    'service_name': 'NOT_ASSIGNED'}
         try:
-            extended_vip = self.plugin.populate_vip_graph(context, vip)
-            vip_network_id = self._get_vip_network_id(context, extended_vip)
-            pool_network_id = self._get_pool_network_id(context, extended_vip)
-            service_name = self._get_service(vip_network_id, pool_network_id,
-                                             vip['tenant_id'])
-            log_info['extended_vip'] = extended_vip
-            log_info['vip_network_id'] = vip_network_id
+            ext_vip = self._populate_vip_graph(context, vip)
+
+            service_name = self._get_service(ext_vip)
+            log_info['extended_vip'] = ext_vip
             log_info['service_name'] = service_name
 
             self._create_workflow(
                 vip['pool_id'], self.l4_wf_name,
                 {"service": service_name})
-            # if VIP and PIP are different, we need an IP address for the PIP
-            # so create port on PIP's network and use its IP address
-            if vip_network_id != pool_network_id:
-                pip_address = self._create_port_for_pip(
-                    context,
-                    vip['tenant_id'],
-                    _make_pip_name_from_vip(vip),
-                    pool_network_id)
-                extended_vip['pip_address'] = pip_address
-                log_info['pip_info'] = 'pip_address: ' + pip_address
-            else:
-                extended_vip['pip_address'] = extended_vip['address']
-                log_info['pip_info'] = 'vip == pip: %(address)s' % extended_vip
             self._update_workflow(
                 vip['pool_id'],
-                self.l4_action_name, extended_vip, context)
+                self.l4_action_name, ext_vip, context)
 
         finally:
             LOG.debug(_('vip: %(vip)s, '
                         'extended_vip: %(extended_vip)s, '
-                        'network_id: %(vip_network_id)s, '
-                        'service_name: %(service_name)s, '
-                        'pip_info: %(pip_info)s'), log_info)
+                        'service_name: %(service_name)s, '),
+                      log_info)
 
     def update_vip(self, context, old_vip, vip):
-        extended_vip = self.plugin.populate_vip_graph(context, vip)
+        ext_vip = self._populate_vip_graph(context, vip)
         self._update_workflow(
             vip['pool_id'], self.l4_action_name,
-            extended_vip, context, False, lb_db.Vip, vip['id'])
+            ext_vip, context, False, lb_db.Vip, vip['id'])
 
     def delete_vip(self, context, vip):
         """Delete a Vip
@@ -247,8 +250,8 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
 
         """
 
-        extended_vip = self.plugin.populate_vip_graph(context, vip)
-        params = _translate_vip_object_graph(extended_vip,
+        ext_vip = self._populate_vip_graph(context, vip)
+        params = _translate_vip_object_graph(ext_vip,
                                              self.plugin, context)
         ids = params.pop('__ids__')
 
@@ -277,7 +280,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
             self._remove_workflow(ids, context, delete_pip_nport_function)
 
         except r_exc.RESTRequestFailure:
-            pool_id = extended_vip['pool_id']
+            pool_id = ext_vip['pool_id']
             LOG.exception(_('Failed to remove workflow %s. '
                             'Going to set vip to ERROR status'),
                           pool_id)
@@ -317,10 +320,10 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
                 raise loadbalancer.PoolInUse(pool_id=pool['id'])
             else:
                 vip = self.plugin.get_vip(context, vip_id)
-                extended_vip = self.plugin.populate_vip_graph(context, vip)
+                ext_vip = self._populate_vip_graph(context, vip)
                 self._update_workflow(
                     pool['id'], self.l4_action_name,
-                    extended_vip, context, delete, lb_db.Pool, pool['id'])
+                    ext_vip, context, delete, lb_db.Pool, pool['id'])
         else:
             if delete:
                 self.plugin._delete_db_pool(context, pool['id'])
@@ -345,10 +348,10 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
             context, member['pool_id']).get('vip_id')
         if vip_id:
             vip = self.plugin.get_vip(context, vip_id)
-            extended_vip = self.plugin.populate_vip_graph(context, vip)
+            ext_vip = self._populate_vip_graph(context, vip)
             self._update_workflow(
                 member['pool_id'], self.l4_action_name,
-                extended_vip, context,
+                ext_vip, context,
                 delete, lb_db.Member, member['id'])
         # We have to delete this member but it is not connected to a vip yet
         elif delete:
@@ -391,9 +394,9 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
 
         if vip_id:
             vip = self.plugin.get_vip(context, vip_id)
-            extended_vip = self.plugin.populate_vip_graph(context, vip)
+            ext_vip = self._populate_vip_graph(context, vip)
             self._update_workflow(pool_id, self.l4_action_name,
-                                  extended_vip, context,
+                                  ext_vip, context,
                                   delete, lb_db.PoolMonitorAssociation,
                                   health_monitor['id'])
         elif delete:
@@ -496,8 +499,7 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
                       resource, None, None),
                       [202])
 
-    def _get_service(self, vip_network_id, pool_network_id,
-                     tenant_id):
+    def _get_service(self, ext_vip):
         """Get a service name.
 
         if you can't find one,
@@ -506,20 +508,21 @@ class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
         """
         if not self.workflow_templates_exists:
             self._verify_workflow_templates()
-        if vip_network_id != pool_network_id:
-            networks_name = '%s_%s' % (vip_network_id, pool_network_id)
+        if ext_vip['vip_network_id'] != ext_vip['pool_network_id']:
+            networks_name = '%s_%s' % (ext_vip['vip_network_id'],
+                                       ext_vip['pool_network_id'])
             self.l2_l3_ctor_params["twoleg_enabled"] = True
         else:
-            networks_name = vip_network_id
+            networks_name = ext_vip['vip_network_id']
             self.l2_l3_ctor_params["twoleg_enabled"] = False
         incoming_service_name = 'srv_%s' % (networks_name,)
         service_name = self._get_available_service(incoming_service_name)
         if not service_name:
             LOG.debug(
                 'Could not find a service named ' + incoming_service_name)
-            service_name = self._create_service(vip_network_id,
-                                                pool_network_id,
-                                                tenant_id)
+            service_name = self._create_service(ext_vip['vip_network_id'],
+                                                ext_vip['pool_network_id'],
+                                                ext_vip['tenant_id'])
             self.l2_l3_ctor_params["service"] = incoming_service_name
             wf_name = 'l2_l3_' + networks_name
             self._create_workflow(
@@ -989,13 +992,17 @@ TRANSLATION_DEFAULTS = {'session_persistence_type': 'none',
                         'session_persistence_cookie_name': 'none',
                         'url_path': '/',
                         'http_method': 'GET',
-                        'expected_codes': '200'
+                        'expected_codes': '200',
+                        'subnet': '255.255.255.255',
+                        'mask': '255.255.255.255',
+                        'gw': '255.255.255.255',
                         }
 VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit',
                   'admin_state_up', 'session_persistence_type',
                   'session_persistence_cookie_name']
 POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up']
-MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up']
+MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up',
+                     'subnet', 'mask', 'gw']
 HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
                              'admin_state_up', 'url_path', 'http_method',
                              'expected_codes', 'id']
@@ -1037,12 +1044,37 @@ def _translate_vip_object_graph(extended_vip, plugin, context):
             'pool'][pool_property]
     for member_property in MEMBER_PROPERTIES:
         trans_vip[_create_key('member', member_property)] = []
+
+    two_leg = (extended_vip['pip_address'] != extended_vip['address'])
+    if two_leg:
+        pool_subnet = plugin._core_plugin.get_subnet(
+            context, extended_vip['pool']['subnet_id'])
+
     for member in extended_vip['members']:
         if member['status'] != constants.PENDING_DELETE:
+            if (two_leg and netaddr.IPAddress(member['address'])
+                not in netaddr.IPNetwork(pool_subnet['cidr'])):
+                member_ports = plugin._core_plugin.get_ports(
+                    context,
+                    filters={'fixed_ips': {'ip_address': [member['address']]},
+                             'tenant_id': [extended_vip['tenant_id']]})
+                if len(member_ports) == 1:
+                    member_subnet = plugin._core_plugin.get_subnet(
+                        context,
+                        member_ports[0]['fixed_ips'][0]['subnet_id'])
+                    member_network = netaddr.IPNetwork(member_subnet['cidr'])
+                    member['subnet'] = str(member_network.network)
+                    member['mask'] = str(member_network.netmask)
+                else:
+                    member['subnet'] = member['address']
+
+                member['gw'] = pool_subnet['gateway_ip']
+
             for member_property in MEMBER_PROPERTIES:
                 trans_vip[_create_key('member', member_property)].append(
                     member.get(member_property,
                                TRANSLATION_DEFAULTS.get(member_property)))
+
     for hm_property in HEALTH_MONITOR_PROPERTIES:
         trans_vip[
             _create_key('hm', _trans_prop_name(hm_property))] = []
index a073d30e0bf636da77e84dbaff39fa309eddd0f6..ca6080e1fe7718fbf3ac0b9e01674562e51a57c7 100644 (file)
@@ -20,6 +20,7 @@ import re
 
 import contextlib
 import mock
+from oslo.config import cfg
 from six.moves import queue as Queue
 
 from neutron import context
@@ -644,6 +645,138 @@ class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
                         self.driver_rest_call_mock.assert_has_calls(
                             calls, any_order=True)
 
+    def test_create_member_on_different_subnets(self):
+        with contextlib.nested(
+            self.subnet(),
+            self.subnet(cidr='20.0.0.0/24'),
+            self.subnet(cidr='30.0.0.0/24')
+        ) as (vip_sub, pool_sub, member_sub):
+            with self.pool(provider='radware',
+                           subnet_id=pool_sub['subnet']['id']) as pool:
+                with contextlib.nested(
+                    self.port(subnet=vip_sub,
+                              fixed_ips=[{'ip_address': '10.0.0.2'}]),
+                    self.port(subnet=pool_sub,
+                              fixed_ips=[{'ip_address': '20.0.0.2'}]),
+                    self.port(subnet=member_sub,
+                              fixed_ips=[{'ip_address': '30.0.0.2'}])
+                ):
+                    with contextlib.nested(
+                        self.member(pool_id=pool['pool']['id'],
+                                    address='10.0.0.2'),
+                        self.member(pool_id=pool['pool']['id'],
+                                    address='20.0.0.2'),
+                        self.member(pool_id=pool['pool']['id'],
+                                    address='30.0.0.2')
+                    ) as (member_vip, member_pool, member_out):
+                        with self.vip(pool=pool, subnet=vip_sub):
+                            calls = [
+                                mock.call(
+                                    'POST', '/api/workflow/' +
+                                    pool['pool']['id'] +
+                                    '/action/BaseCreate',
+                                    mock.ANY, driver.TEMPLATE_HEADER
+                                )
+                            ]
+                            self.driver_rest_call_mock.assert_has_calls(
+                                calls, any_order=True)
+
+                            mock_calls = self.driver_rest_call_mock.mock_calls
+                            params = mock_calls[-2][1][2]['parameters']
+                            member_subnet_array = params['member_subnet_array']
+                            member_mask_array = params['member_mask_array']
+                            member_gw_array = params['member_gw_array']
+                            self.assertEqual(member_subnet_array,
+                                             ['10.0.0.0',
+                                              '255.255.255.255',
+                                              '30.0.0.0'])
+                            self.assertEqual(member_mask_array,
+                                             ['255.255.255.0',
+                                              '255.255.255.255',
+                                              '255.255.255.0'])
+                            self.assertEqual(
+                                member_gw_array,
+                                [pool_sub['subnet']['gateway_ip'],
+                                 '255.255.255.255',
+                                 pool_sub['subnet']['gateway_ip']])
+
+    def test_create_member_on_different_subnet_no_port(self):
+        with contextlib.nested(
+            self.subnet(),
+            self.subnet(cidr='20.0.0.0/24'),
+            self.subnet(cidr='30.0.0.0/24')
+        ) as (vip_sub, pool_sub, member_sub):
+            with self.pool(provider='radware',
+                           subnet_id=pool_sub['subnet']['id']) as pool:
+                with self.member(pool_id=pool['pool']['id'],
+                                 address='30.0.0.2'):
+                    with self.vip(pool=pool, subnet=vip_sub):
+                        calls = [
+                            mock.call(
+                                'POST', '/api/workflow/' +
+                                pool['pool']['id'] +
+                                '/action/BaseCreate',
+                                mock.ANY, driver.TEMPLATE_HEADER
+                            )
+                        ]
+                        self.driver_rest_call_mock.assert_has_calls(
+                            calls, any_order=True)
+
+                        mock_calls = self.driver_rest_call_mock.mock_calls
+                        params = mock_calls[-2][1][2]['parameters']
+                        member_subnet_array = params['member_subnet_array']
+                        member_mask_array = params['member_mask_array']
+                        member_gw_array = params['member_gw_array']
+                        self.assertEqual(member_subnet_array,
+                                         ['30.0.0.2'])
+                        self.assertEqual(member_mask_array,
+                                         ['255.255.255.255'])
+                        self.assertEqual(member_gw_array,
+                                         [pool_sub['subnet']['gateway_ip']])
+
+    def test_create_member_on_different_subnet_multiple_ports(self):
+        cfg.CONF.set_override("allow_overlapping_ips", 'true')
+        with self.network() as other_net:
+            with contextlib.nested(
+                self.subnet(),
+                self.subnet(cidr='20.0.0.0/24'),
+                self.subnet(cidr='30.0.0.0/24'),
+                self.subnet(network=other_net, cidr='30.0.0.0/24')
+            ) as (vip_sub, pool_sub, member_sub1, member_sub2):
+                with self.pool(provider='radware',
+                               subnet_id=pool_sub['subnet']['id']) as pool:
+                    with contextlib.nested(
+                        self.port(subnet=member_sub1,
+                                  fixed_ips=[{'ip_address': '30.0.0.2'}]),
+                        self.port(subnet=member_sub2,
+                                  fixed_ips=[{'ip_address': '30.0.0.2'}])):
+                        with self.member(pool_id=pool['pool']['id'],
+                                         address='30.0.0.2'):
+                            with self.vip(pool=pool, subnet=vip_sub):
+                                calls = [
+                                    mock.call(
+                                        'POST', '/api/workflow/' +
+                                        pool['pool']['id'] +
+                                        '/action/BaseCreate',
+                                        mock.ANY, driver.TEMPLATE_HEADER
+                                    )
+                                ]
+                                self.driver_rest_call_mock.assert_has_calls(
+                                    calls, any_order=True)
+
+                                calls = self.driver_rest_call_mock.mock_calls
+                                params = calls[-2][1][2]['parameters']
+                                m_sub_array = params['member_subnet_array']
+                                m_mask_array = params['member_mask_array']
+                                m_gw_array = params['member_gw_array']
+                                self.assertEqual(m_sub_array,
+                                                 ['30.0.0.2'])
+                                self.assertEqual(m_mask_array,
+                                                 ['255.255.255.255'])
+                                self.assertEqual(
+                                    m_gw_array,
+                                    [pool_sub['subnet']['gateway_ip']])
+
     def test_update_member_with_vip(self):
         with self.subnet() as subnet:
             with self.pool(provider='radware',