From ee50162b27a3e0bf310f8e4f273b69cd0660cf69 Mon Sep 17 00:00:00 2001 From: Roman Podolyaka Date: Mon, 15 Apr 2013 18:10:07 +0300 Subject: [PATCH] Simplify delete_health_monitor() using cascades Currently delete_health_monitor() emulates behaviour of cascade deletion: when a HealthMonitor instance is deleted, all corresponding PoolMonitorAssociations are queried and deleted one by one. This can be done automatically by means of SQLAlchemy if we set proper cascade flags on the relationship between HealthMonitor and PoolMonitorAssociations models. Fixes bug 1169107. Change-Id: I674c381192719a56433f67be53a32203c36ebe2b --- quantum/db/loadbalancer/loadbalancer_db.py | 18 ++++------ .../services/agent_loadbalancer/plugin.py | 8 ++--- .../db/loadbalancer/test_db_loadbalancer.py | 36 +++++++++++++++++++ 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/quantum/db/loadbalancer/loadbalancer_db.py b/quantum/db/loadbalancer/loadbalancer_db.py index ee504059a..65a01282a 100644 --- a/quantum/db/loadbalancer/loadbalancer_db.py +++ b/quantum/db/loadbalancer/loadbalancer_db.py @@ -127,6 +127,12 @@ class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) + pools = orm.relationship( + "PoolMonitorAssociation", + backref="healthmonitor", + cascade="all" + ) + class PoolMonitorAssociation(model_base.BASEV2): """ @@ -139,8 +145,6 @@ class PoolMonitorAssociation(model_base.BASEV2): monitor_id = sa.Column(sa.String(36), sa.ForeignKey("healthmonitors.id"), primary_key=True) - monitor = orm.relationship("HealthMonitor", - backref="pools_poolmonitorassociations") class LoadBalancerPluginDb(LoadBalancerPluginBase): @@ -605,7 +609,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase): assoc = PoolMonitorAssociation(pool_id=pool_id, monitor_id=monitor_id) - assoc.monitor = monitor + assoc.healthmonitor = monitor pool.monitors.append(assoc) monitors = [] @@ -746,14 +750,6 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase): def delete_health_monitor(self, context, id): with context.session.begin(subtransactions=True): - assoc_qry = context.session.query(PoolMonitorAssociation) - pool_qry = context.session.query(Pool) - for assoc in assoc_qry.filter_by(monitor_id=id).all(): - try: - pool = pool_qry.filter_by(id=assoc['pool_id']).one() - except exc.NoResultFound: - raise loadbalancer.PoolNotFound(pool_id=pool['id']) - pool.monitors.remove(assoc) monitor_db = self._get_resource(context, HealthMonitor, id) context.session.delete(monitor_db) diff --git a/quantum/plugins/services/agent_loadbalancer/plugin.py b/quantum/plugins/services/agent_loadbalancer/plugin.py index 7ffe22530..95aaf0e2e 100644 --- a/quantum/plugins/services/agent_loadbalancer/plugin.py +++ b/quantum/plugins/services/agent_loadbalancer/plugin.py @@ -79,8 +79,8 @@ class LoadBalancerCallbacks(object): m.status = constants.ACTIVE for hm in pool.monitors: - if hm.monitor.status in ACTIVE_PENDING: - hm.monitor.status = constants.ACTIVE + if hm.healthmonitor.status in ACTIVE_PENDING: + hm.healthmonitor.status = constants.ACTIVE if (pool.status != constants.ACTIVE or pool.vip.status != constants.ACTIVE): @@ -104,9 +104,9 @@ class LoadBalancerCallbacks(object): for m in pool.members if m.status == constants.ACTIVE ] retval['healthmonitors'] = [ - self.plugin._make_health_monitor_dict(hm.monitor) + self.plugin._make_health_monitor_dict(hm.healthmonitor) for hm in pool.monitors - if hm.monitor.status == constants.ACTIVE + if hm.healthmonitor.status == constants.ACTIVE ] return retval diff --git a/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py b/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py index 590ac6907..5c2f1e8e8 100644 --- a/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py +++ b/quantum/tests/unit/db/loadbalancer/test_db_loadbalancer.py @@ -836,6 +836,42 @@ class TestLoadBalancer(LoadBalancerPluginDbTestCase): res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 204) + def test_delete_healthmonitor_cascade_deletion_of_associations(self): + with self.health_monitor(type='HTTP', no_delete=True) as monitor: + with self.pool() as pool: + data = { + 'health_monitor': { + 'id': monitor['health_monitor']['id'], + 'tenant_id': self._tenant_id + } + } + req = self.new_create_request( + 'pools', + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource='health_monitors') + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + + ctx = context.get_admin_context() + + # check if we actually have corresponding Pool associations + qry = ctx.session.query(ldb.PoolMonitorAssociation) + qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) + self.assertTrue(qry.all()) + # delete the HealthMonitor instance + req = self.new_delete_request( + 'health_monitors', + monitor['health_monitor']['id'] + ) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + # check if all corresponding Pool associations are deleted + qry = ctx.session.query(ldb.PoolMonitorAssociation) + qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) + self.assertFalse(qry.all()) + def test_show_healthmonitor(self): with self.health_monitor() as monitor: keys = [('type', "TCP"), -- 2.45.2