Currently delete_health_monitor() emulates behaviour of
cascade deletion: when a HealthMonitor instance is deleted,
all corresponding PoolMonitorAssociations are queried and
deleted one by one. This can be done automatically by means
of SQLAlchemy if we set proper cascade flags on the relationship
between HealthMonitor and PoolMonitorAssociations models.
Fixes bug
1169107.
Change-Id: I674c381192719a56433f67be53a32203c36ebe2b
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
+ pools = orm.relationship(
+ "PoolMonitorAssociation",
+ backref="healthmonitor",
+ cascade="all"
+ )
+
class PoolMonitorAssociation(model_base.BASEV2):
"""
monitor_id = sa.Column(sa.String(36),
sa.ForeignKey("healthmonitors.id"),
primary_key=True)
- monitor = orm.relationship("HealthMonitor",
- backref="pools_poolmonitorassociations")
class LoadBalancerPluginDb(LoadBalancerPluginBase):
assoc = PoolMonitorAssociation(pool_id=pool_id,
monitor_id=monitor_id)
- assoc.monitor = monitor
+ assoc.healthmonitor = monitor
pool.monitors.append(assoc)
monitors = []
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
- assoc_qry = context.session.query(PoolMonitorAssociation)
- pool_qry = context.session.query(Pool)
- for assoc in assoc_qry.filter_by(monitor_id=id).all():
- try:
- pool = pool_qry.filter_by(id=assoc['pool_id']).one()
- except exc.NoResultFound:
- raise loadbalancer.PoolNotFound(pool_id=pool['id'])
- pool.monitors.remove(assoc)
monitor_db = self._get_resource(context, HealthMonitor, id)
context.session.delete(monitor_db)
m.status = constants.ACTIVE
for hm in pool.monitors:
- if hm.monitor.status in ACTIVE_PENDING:
- hm.monitor.status = constants.ACTIVE
+ if hm.healthmonitor.status in ACTIVE_PENDING:
+ hm.healthmonitor.status = constants.ACTIVE
if (pool.status != constants.ACTIVE
or pool.vip.status != constants.ACTIVE):
for m in pool.members if m.status == constants.ACTIVE
]
retval['healthmonitors'] = [
- self.plugin._make_health_monitor_dict(hm.monitor)
+ self.plugin._make_health_monitor_dict(hm.healthmonitor)
for hm in pool.monitors
- if hm.monitor.status == constants.ACTIVE
+ if hm.healthmonitor.status == constants.ACTIVE
]
return retval
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
+ def test_delete_healthmonitor_cascade_deletion_of_associations(self):
+ with self.health_monitor(type='HTTP', no_delete=True) as monitor:
+ with self.pool() as pool:
+ data = {
+ 'health_monitor': {
+ 'id': monitor['health_monitor']['id'],
+ 'tenant_id': self._tenant_id
+ }
+ }
+ req = self.new_create_request(
+ 'pools',
+ data,
+ fmt=self.fmt,
+ id=pool['pool']['id'],
+ subresource='health_monitors')
+ res = req.get_response(self.ext_api)
+ self.assertEqual(res.status_int, 201)
+
+ ctx = context.get_admin_context()
+
+ # check if we actually have corresponding Pool associations
+ qry = ctx.session.query(ldb.PoolMonitorAssociation)
+ qry = qry.filter_by(monitor_id=monitor['health_monitor']['id'])
+ self.assertTrue(qry.all())
+ # delete the HealthMonitor instance
+ req = self.new_delete_request(
+ 'health_monitors',
+ monitor['health_monitor']['id']
+ )
+ res = req.get_response(self.ext_api)
+ self.assertEqual(res.status_int, 204)
+ # check if all corresponding Pool associations are deleted
+ qry = ctx.session.query(ldb.PoolMonitorAssociation)
+ qry = qry.filter_by(monitor_id=monitor['health_monitor']['id'])
+ self.assertFalse(qry.all())
+
def test_show_healthmonitor(self):
with self.health_monitor() as monitor:
keys = [('type', "TCP"),