import eventlet
from oslo.config import cfg
-from neutron.common import exceptions as q_exc
from neutron.common import log as call_log
-from neutron import context as qcontext
-import neutron.db.loadbalancer.loadbalancer_db as lb_db
+from neutron import context
+from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
+from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
eventlet.monkey_patch(thread=True)
user=rad.vdirect_user,
password=rad.vdirect_password)
self.queue = Queue.Queue()
- self.completion_handler = OperationCompletionHander(self.queue,
- self.rest_client,
- plugin)
+ self.completion_handler = OperationCompletionHandler(self.queue,
+ self.rest_client,
+ plugin)
self.workflow_templates_exists = False
self.completion_handler.setDaemon(True)
self.completion_handler.start()
First delete it from the device. If deletion ended OK
- remove data from DB as well.
- If the deletion failed - mark elements with error status in DB
+ If the deletion failed - mark vip with error status in DB
"""
extended_vip = self.plugin.populate_vip_graph(context, vip)
+ params = _translate_vip_object_graph(extended_vip,
+ self.plugin, context)
+ ids = params.pop('__ids__')
+
try:
# removing the WF will cause deletion of the configuration from the
# device
- self._remove_workflow(extended_vip, context)
- except Exception:
+ self._remove_workflow(ids, context)
+ except r_exc.RESTRequestFailure:
pool_id = extended_vip['pool_id']
- LOG.exception(_('Failed to remove workflow %s'), pool_id)
- _update_vip_graph_status(
- self.plugin, context, extended_vip, constants.ERROR
- )
+ LOG.exception(_('Failed to remove workflow %s. '
+ 'Going to set vip to ERROR status'),
+ pool_id)
+
+ self.plugin.update_status(context, lb_db.Vip, ids['vip'],
+ constants.ERROR)
def create_pool(self, context, pool):
# nothing to do
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
- 'pool_id = %(pool_id)s delete = %(delete)s '
- 'vip_id = %(vip_id)s'),
+ 'pool_id = %(pool_id)s delete = %(delete)s '
+ 'vip_id = %(vip_id)s'),
debug_params)
if vip_id:
if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
- if not ids:
- raise q_exc.NeutronException(
- _('params must contain __ids__ field!')
- )
-
oper = OperationAttributes(response['uri'],
ids,
lbaas_entity,
LOG.debug(_('Pushing operation %s to the queue'), oper)
self.queue.put_nowait(oper)
- def _remove_workflow(self, wf_params, context):
- params = _translate_vip_object_graph(wf_params, self.plugin, context)
- ids = params.pop('__ids__', None)
- if not ids:
- raise q_exc.NeutronException(
- _('params must contain __ids__ field!')
- )
+ def _remove_workflow(self, ids, context):
wf_name = ids['pool']
LOG.debug(_('Remove the workflow %s') % wf_name)
break
for wf, found in workflows.items():
if not found:
- msg = _('The workflow %s does not exist on vDirect.') % wf
- raise q_exc.NeutronException(msg)
+ raise r_exc.WorkflowMissing(workflow=wf)
self.workflow_templates_exists = True
self.auth = base64.encodestring('%s:%s' % (user, password))
self.auth = self.auth.replace('\n', '')
else:
- msg = _('User and password must be specified')
- raise q_exc.NeutronException(msg)
+ raise r_exc.AuthenticationMissing()
+
debug_params = {'server': self.server,
'port': self.port,
'ssl': self.ssl}
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
-class OperationCompletionHander(threading.Thread):
+class OperationCompletionHandler(threading.Thread):
"""Update DB with operation status or delete the entity from DB."""
threading.Thread.__init__(self)
self.queue = queue
self.rest_client = rest_client
- self.admin_ctx = qcontext.get_admin_context()
self.plugin = plugin
self.stoprequest = threading.Event()
+ self.opers_to_handle_before_rest = 0
def _get_db_status(self, operation, success, messages=None):
"""Get the db_status based on the status of the vdirect operation."""
def join(self, timeout=None):
self.stoprequest.set()
- super(OperationCompletionHander, self).join(timeout)
+ super(OperationCompletionHandler, self).join(timeout)
def run(self):
oper = None
while not self.stoprequest.isSet():
try:
oper = self.queue.get(timeout=1)
+
+ # Get the current queue size (N) and set the counter with it.
+ # Handle N operations with no intermission.
+ # Once N operations handles, get the size again and repeat.
+ if self.opers_to_handle_before_rest <= 0:
+ self.opers_to_handle_before_rest = self.queue.qsize() + 1
+
LOG.debug('Operation consumed from the queue: ' +
str(oper))
# check the status - if oper is done: update the db ,
db_status = self._get_db_status(oper, success)
if db_status:
_update_vip_graph_status(
- self.plugin, self.admin_ctx,
- oper, db_status)
+ self.plugin, oper, db_status)
else:
_remove_object_from_db(
- self.plugin, self.admin_ctx, oper)
+ self.plugin, oper)
else:
- # not completed - push to the queue again
LOG.debug(_('Operation %s is not completed yet..') % oper)
- # queue is empty - lets take a short rest
- if self.queue.empty():
- time.sleep(1)
+ # Not completed - push to the queue again
self.queue.put_nowait(oper)
- # send a signal to the queue that the job is done
+
self.queue.task_done()
+ self.opers_to_handle_before_rest -= 1
+
+ # Take one second rest before start handling
+ # new operations or operations handled before
+ if self.opers_to_handle_before_rest <= 0:
+ time.sleep(1)
+
except Queue.Empty:
continue
except Exception:
- m = _("Exception was thrown inside OperationCompletionHander")
+ m = _("Exception was thrown inside OperationCompletionHandler")
LOG.exception(m)
def _rest_wrapper(response, success_codes=[202]):
"""Wrap a REST call and make sure a valid status is returned."""
if response[RESP_STATUS] not in success_codes:
- raise q_exc.NeutronException(str(response[RESP_STATUS]) + ':' +
- response[RESP_REASON] +
- '. Error description: ' +
- response[RESP_STR])
+ raise r_exc.RESTRequestFailure(
+ status=response[RESP_STATUS],
+ reason=response[RESP_REASON],
+ description=response[RESP_STR],
+ success_codes=success_codes
+ )
else:
return response[RESP_DATA]
-def _update_vip_graph_status(plugin, context, oper, status):
+def _update_vip_graph_status(plugin, oper, status):
"""Update the status
Of all the Vip object graph
"""
+ ctx = context.get_admin_context(load_admin_roles=False)
+
LOG.debug(_('_update: %s '), oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
- plugin.update_pool_health_monitor(context,
+ plugin.update_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'],
status)
elif oper.entity_id:
- plugin.update_status(context,
+ plugin.update_status(ctx,
oper.lbaas_entity,
oper.entity_id,
status)
else:
- # update the whole vip graph status
- plugin.update_status(context,
- lb_db.Vip,
- oper.object_graph['vip'],
+ _update_vip_graph_status_cascade(plugin,
+ oper.object_graph,
+ ctx, status)
+
+
+def _update_vip_graph_status_cascade(plugin, ids, ctx, status):
+ plugin.update_status(ctx,
+ lb_db.Vip,
+ ids['vip'],
+ status)
+ plugin.update_status(ctx,
+ lb_db.Pool,
+ ids['pool'],
+ status)
+ for member_id in ids['members']:
+ plugin.update_status(ctx,
+ lb_db.Member,
+ member_id,
status)
- plugin.update_status(context,
- lb_db.Pool,
- oper.object_graph['pool'],
- status)
- for member_id in oper.object_graph['members']:
- plugin.update_status(context,
- lb_db.Member,
- member_id,
- status)
- for hm_id in oper.object_graph['health_monitors']:
- plugin.update_pool_health_monitor(context,
- hm_id,
- oper.object_graph['pool'],
- status)
-
-
-def _remove_object_from_db(plugin, context, oper):
+ for hm_id in ids['health_monitors']:
+ plugin.update_pool_health_monitor(ctx,
+ hm_id,
+ ids['pool'],
+ status)
+
+
+def _remove_object_from_db(plugin, oper):
"""Remove a specific entity from db."""
LOG.debug(_('_remove_object_from_db %s'), str(oper))
+
+ ctx = context.get_admin_context(load_admin_roles=False)
+
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
- plugin._delete_db_pool_health_monitor(context,
+ plugin._delete_db_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'])
elif oper.lbaas_entity == lb_db.Member:
- plugin._delete_db_member(context, oper.entity_id)
+ plugin._delete_db_member(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip:
- plugin._delete_db_vip(context, oper.entity_id)
+ plugin._delete_db_vip(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Pool:
- plugin._delete_db_pool(context, oper.entity_id)
+ plugin._delete_db_pool(ctx, oper.entity_id)
else:
- raise q_exc.NeutronException(
- _('Tried to remove unsupported lbaas entity %s!'),
- str(oper.lbaas_entity)
+ raise r_exc.UnsupportedEntityOperation(
+ operation='Remove from DB', entity=oper.lbaas_entity
)
TRANSLATION_DEFAULTS = {'session_persistence_type': 'SOURCE_IP',
import re
-from eventlet import greenthread
+import contextlib
+import eventlet
import mock
from neutron import context
from neutron.openstack.common import jsonutils as json
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.radware import driver
+from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
- return 400, 'error_status', 'error_reason', None
+ return 400, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
- data = []
+ data = json.loads('[]')
else:
- data = [{"name": "openstack_l2_l3"}, {"name": "openstack_l4"}]
+ data = json.loads(
+ '[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
+ )
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
- data = {"complete": "True", "success": "True"}
+ data = json.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.rest_call_mock
- self.ctx = context.get_admin_context()
-
self.addCleanup(radware_driver.completion_handler.join)
self.addCleanup(mock.patch.stopall)
- def test_create_vip_templates_missing(self):
+ def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
- self.rest_call_mock.reset_mock()
- with self.subnet() as subnet:
- with self.pool(provider='radware') as pool:
- vip_data = {
- 'name': 'vip1',
- 'subnet_id': subnet['subnet']['id'],
- 'pool_id': pool['pool']['id'],
- 'description': '',
- 'protocol_port': 80,
- 'protocol': 'HTTP',
- 'connection_limit': -1,
- 'admin_state_up': True,
- 'status': 'PENDING_CREATE',
- 'tenant_id': self._tenant_id,
- 'session_persistence': ''
- }
+ rest_call_function_mock.__dict__.update(
+ {'TEMPLATES_MISSING': True})
- rest_call_function_mock.__dict__.update(
- {'TEMPLATES_MISSING': True})
- #TODO(avishayb) Check that NeutronException is raised
- self.assertRaises(StandardError,
- self.plugin_instance.create_vip,
- (self.ctx, {'vip': vip_data}))
+ self.assertRaises(r_exc.WorkflowMissing,
+ self.plugin_instance.drivers['radware'].
+ _verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
- with self.subnet() as subnet:
- with self.pool(provider='radware') as pool:
- vip_data = {
- 'name': 'vip1',
- 'subnet_id': subnet['subnet']['id'],
- 'pool_id': pool['pool']['id'],
- 'description': '',
- 'protocol_port': 80,
- 'protocol': 'HTTP',
- 'connection_limit': -1,
- 'admin_state_up': True,
- 'status': 'PENDING_CREATE',
- 'tenant_id': self._tenant_id,
- 'session_persistence': ''
- }
-
- rest_call_function_mock.__dict__.update(
- {'RESPOND_WITH_ERROR': True})
- self.assertRaises(StandardError,
- self.plugin_instance.create_vip,
- (self.ctx, {'vip': vip_data}))
+ with self.network(do_delete=False) as network:
+ with self.subnet(network=network, do_delete=False) as subnet:
+ with self.pool(no_delete=True, provider='radware') as pool:
+ vip_data = {
+ 'name': 'vip1',
+ 'subnet_id': subnet['subnet']['id'],
+ 'pool_id': pool['pool']['id'],
+ 'description': '',
+ 'protocol_port': 80,
+ 'protocol': 'HTTP',
+ 'connection_limit': -1,
+ 'admin_state_up': True,
+ 'status': constants.PENDING_CREATE,
+ 'tenant_id': self._tenant_id,
+ 'session_persistence': ''
+ }
+
+ rest_call_function_mock.__dict__.update(
+ {'RESPOND_WITH_ERROR': True})
+
+ self.assertRaises(r_exc.RESTRequestFailure,
+ self.plugin_instance.create_vip,
+ context.get_admin_context(),
+ {'vip': vip_data})
def test_create_vip(self):
self.rest_call_mock.reset_mock()
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
- 'status': 'PENDING_CREATE',
+ 'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
- self.ctx, {'vip': vip_data})
+ context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
self.rest_call_mock.assert_has_calls(calls, any_order=True)
# sleep to wait for the operation completion
- greenthread.sleep(1)
+ eventlet.greenthread.sleep(0)
#Test DB
- new_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
- self.assertEqual(new_vip['status'], 'ACTIVE')
+ new_vip = self.plugin_instance.get_vip(
+ context.get_admin_context(),
+ vip['id']
+ )
+ self.assertEqual(new_vip['status'], constants.ACTIVE)
# Delete VIP
- self.plugin_instance.delete_vip(self.ctx, vip['id'])
+ self.plugin_instance.delete_vip(
+ context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
- 'status': 'PENDING_CREATE',
+ 'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
- self.ctx, {'vip': vip_data})
+ context.get_admin_context(), {'vip': vip_data})
- vip_data['status'] = 'PENDING_UPDATE'
- self.plugin_instance.update_vip(self.ctx, vip['id'],
- {'vip': vip_data})
+ vip_data['status'] = constants.PENDING_UPDATE
+ self.plugin_instance.update_vip(
+ context.get_admin_context(),
+ vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
- updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
- self.assertEqual(updated_vip['status'], 'PENDING_UPDATE')
+ updated_vip = self.plugin_instance.get_vip(
+ context.get_admin_context(), vip['id'])
+ self.assertEqual(updated_vip['status'],
+ constants.PENDING_UPDATE)
# sleep to wait for the operation completion
- greenthread.sleep(1)
- updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
- self.assertEqual(updated_vip['status'], 'ACTIVE')
+ eventlet.greenthread.sleep(1)
+ updated_vip = self.plugin_instance.get_vip(
+ context.get_admin_context(), vip['id'])
+ self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
- self.plugin_instance.delete_vip(self.ctx, vip['id'])
+ self.plugin_instance.delete_vip(
+ context.get_admin_context(), vip['id'])
+
+ def test_delete_vip_failure(self):
+ self.rest_call_mock.reset_mock()
+ plugin = self.plugin_instance
+
+ with self.network(do_delete=False) as network:
+ with self.subnet(network=network, do_delete=False) as subnet:
+ with self.pool(no_delete=True, provider='radware') as pool:
+ with contextlib.nested(
+ self.member(pool_id=pool['pool']['id'],
+ no_delete=True),
+ self.member(pool_id=pool['pool']['id'],
+ no_delete=True),
+ self.health_monitor(no_delete=True),
+ self.vip(pool=pool, subnet=subnet, no_delete=True)
+ ) as (mem1, mem2, hm, vip):
+
+ plugin.create_pool_health_monitor(
+ context.get_admin_context(), hm, pool['pool']['id']
+ )
+
+ eventlet.greenthread.sleep(1)
+
+ rest_call_function_mock.__dict__.update(
+ {'RESPOND_WITH_ERROR': True})
+
+ plugin.delete_vip(
+ context.get_admin_context(), vip['vip']['id'])
+
+ u_vip = plugin.get_vip(
+ context.get_admin_context(), vip['vip']['id'])
+ u_pool = plugin.get_pool(
+ context.get_admin_context(), pool['pool']['id'])
+ u_mem1 = plugin.get_member(
+ context.get_admin_context(), mem1['member']['id'])
+ u_mem2 = plugin.get_member(
+ context.get_admin_context(), mem2['member']['id'])
+ u_phm = plugin.get_pool_health_monitor(
+ context.get_admin_context(),
+ hm['health_monitor']['id'], pool['pool']['id'])
+
+ self.assertEqual(u_vip['status'], constants.ERROR)
+ self.assertEqual(u_pool['status'], constants.ACTIVE)
+ self.assertEqual(u_mem1['status'], constants.ACTIVE)
+ self.assertEqual(u_mem2['status'], constants.ACTIVE)
+ self.assertEqual(u_phm['status'], constants.ACTIVE)
def test_delete_vip(self):
self.rest_call_mock.reset_mock()
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
- 'status': 'PENDING_CREATE',
+ 'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
- self.ctx, {'vip': vip_data})
+ context.get_admin_context(), {'vip': vip_data})
- self.plugin_instance.delete_vip(self.ctx, vip['id'])
+ self.plugin_instance.delete_vip(
+ context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
- self.ctx, vip['id'])
- # add test checking all vip graph objects were removed from DB
+ context.get_admin_context(), vip['id'])
+
+ def test_update_pool(self):
+ self.rest_call_mock.reset_mock()
+ with self.subnet():
+ with self.pool() as pool:
+ del pool['pool']['provider']
+ del pool['pool']['status']
+ self.plugin_instance.update_pool(
+ context.get_admin_context(),
+ pool['pool']['id'], pool)
+ pool_db = self.plugin_instance.get_pool(
+ context.get_admin_context(), pool['pool']['id'])
+ self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
def test_delete_pool_with_vip(self):
self.rest_call_mock.reset_mock()
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
- self.ctx, pool['pool']['id'])
+ context.get_admin_context(),
+ pool['pool']['id'])
def test_create_member_with_vip(self):
self.rest_call_mock.reset_mock()
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
- self.ctx, member['member']['id'], member
+ context.get_admin_context(),
+ member['member']['id'], member
)
calls = [
mock.call(
any_order=True)
updated_member = self.plugin_instance.get_member(
- self.ctx, member['member']['id']
+ context.get_admin_context(),
+ member['member']['id']
)
# sleep to wait for the operation completion
- greenthread.sleep(1)
+ eventlet.greenthread.sleep(0)
updated_member = self.plugin_instance.get_member(
- self.ctx, member['member']['id']
+ context.get_admin_context(),
+ member['member']['id']
)
- self.assertEqual(updated_member['status'], 'ACTIVE')
+ self.assertEqual(updated_member['status'],
+ constants.ACTIVE)
def test_update_member_without_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
- member['member']['status'] = 'PENDING_UPDATE'
+ member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
- self.ctx, member['member']['id'], member
+ context.get_admin_context(),
+ member['member']['id'], member
)
self.assertEqual(updated_member['status'],
- 'PENDING_UPDATE')
+ constants.PENDING_UPDATE)
def test_delete_member_with_vip(self):
self.rest_call_mock.reset_mock()
no_delete=True) as m:
with self.vip(pool=p, subnet=subnet):
- self.plugin_instance.delete_member(self.ctx,
- m['member']['id'])
+ # Reset mock and
+ # wait for being sure the member
+ # Changed status from PENDING-CREATE
+ # to ACTIVE
+ self.rest_call_mock.reset_mock()
+ eventlet.greenthread.sleep(1)
+
+ self.plugin_instance.delete_member(
+ context.get_admin_context(),
+ m['member']['id']
+ )
+
+ args, kwargs = self.rest_call_mock.call_args
+ deletion_post_graph = str(args[2])
+ self.assertTrue(re.search(
+ r'.*\'member_address_array\': \[\].*',
+ deletion_post_graph
+ ))
calls = [
- mock.call(
- 'POST', '/api/workflow/' + p['pool']['id'] +
- '/action/BaseCreate',
- mock.ANY, driver.TEMPLATE_HEADER
- ),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
- self.rest_call_mock.assert_has_calls(calls,
- any_order=True)
+ self.rest_call_mock.assert_has_calls(
+ calls, any_order=True)
- greenthread.sleep(1)
+ eventlet.greenthread.sleep(1)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
- self.ctx, m['member']['id'])
+ context.get_admin_context(),
+ m['member']['id'])
def test_delete_member_without_vip(self):
self.rest_call_mock.reset_mock()
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
self.plugin_instance.delete_member(
- self.ctx, m['member']['id']
+ context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
- self.ctx, m['member']['id'])
+ context.get_admin_context(),
+ m['member']['id'])
+
+ def test_create_hm_with_vip(self):
+ self.rest_call_mock.reset_mock()
+ with self.subnet() as subnet:
+ with self.health_monitor() as hm:
+ with self.pool(provider='radware') as pool:
+ with self.vip(pool=pool, subnet=subnet):
+
+ self.plugin_instance.create_pool_health_monitor(
+ context.get_admin_context(),
+ hm, pool['pool']['id']
+ )
+
+ # Test REST calls
+ calls = [
+ mock.call(
+ 'POST', '/api/workflow/' + pool['pool']['id'] +
+ '/action/BaseCreate',
+ mock.ANY, driver.TEMPLATE_HEADER
+ ),
+ mock.call(
+ 'POST', '/api/workflow/' + pool['pool']['id'] +
+ '/action/BaseCreate',
+ mock.ANY, driver.TEMPLATE_HEADER
+ )
+ ]
+ self.rest_call_mock.assert_has_calls(
+ calls, any_order=True)
+
+ eventlet.greenthread.sleep(1)
+
+ phm = self.plugin_instance.get_pool_health_monitor(
+ context.get_admin_context(),
+ hm['health_monitor']['id'], pool['pool']['id']
+ )
+ self.assertEqual(phm['status'], constants.ACTIVE)
+
+ def test_delete_pool_hm_with_vip(self):
+ self.rest_call_mock.reset_mock()
+ with self.subnet() as subnet:
+ with self.health_monitor(no_delete=True) as hm:
+ with self.pool(provider='radware') as pool:
+ with self.vip(pool=pool, subnet=subnet):
+ self.plugin_instance.create_pool_health_monitor(
+ context.get_admin_context(),
+ hm, pool['pool']['id']
+ )
+
+ # Reset mock and
+ # wait for being sure that status
+ # changed from PENDING-CREATE
+ # to ACTIVE
+ self.rest_call_mock.reset_mock()
+ eventlet.greenthread.sleep(1)
+
+ self.plugin_instance.delete_pool_health_monitor(
+ context.get_admin_context(),
+ hm['health_monitor']['id'],
+ pool['pool']['id']
+ )
+
+ eventlet.greenthread.sleep(1)
+ name, args, kwargs = self.rest_call_mock.mock_calls[-2]
+ deletion_post_graph = str(args[2])
+
+ self.assertTrue(re.search(
+ r'.*\'hm_uuid_array\': \[\].*',
+ deletion_post_graph
+ ))
+
+ calls = [
+ mock.call(
+ 'POST', '/api/workflow/' + pool['pool']['id'] +
+ '/action/BaseCreate',
+ mock.ANY, driver.TEMPLATE_HEADER
+ )
+ ]
+ self.rest_call_mock.assert_has_calls(
+ calls, any_order=True)
+
+ self.assertRaises(
+ loadbalancer.PoolMonitorAssociationNotFound,
+ self.plugin_instance.get_pool_health_monitor,
+ context.get_admin_context(),
+ hm['health_monitor']['id'],
+ pool['pool']['id']
+ )