network_scheduler = None
router_scheduler = None
+ @staticmethod
+ def is_eligible_agent(active, agent):
+ if active is None:
+ # filtering by activeness is disabled, all agents are eligible
+ return True
+ else:
+ # note(rpodolyaka): original behaviour is saved here: if active
+ # filter is set, only agents which are 'up'
+ # (i.e. have a recent heartbeat timestamp)
+ # are eligible, even if active is False
+ return not agents_db.AgentDbMixin.is_agent_down(
+ agent['heartbeat_timestamp'])
+
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None):
if not network_ids:
NetworkDhcpAgentBinding.network_id in network_ids)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
- dhcp_agents = [binding.dhcp_agent for binding in query.all()]
- if active is not None:
- dhcp_agents = [dhcp_agent for dhcp_agent in
- dhcp_agents if not
- agents_db.AgentDbMixin.is_agent_down(
- dhcp_agent['heartbeat_timestamp'])]
- return dhcp_agents
+
+ return [binding.dhcp_agent
+ for binding in query
+ if AgentSchedulerDbMixin.is_eligible_agent(active,
+ binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
- net_ids = query.filter(
- NetworkDhcpAgentBinding.dhcp_agent_id == id).all()
+ query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
+
+ net_ids = [item[0] for item in query]
if net_ids:
- _ids = [item[0] for item in net_ids]
return {'networks':
- self.get_networks(context, filters={'id': _ids})}
+ self.get_networks(context, filters={'id': net_ids})}
else:
return {'networks': []}
if not agent.admin_state_up:
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
- net_ids = query.filter(
- NetworkDhcpAgentBinding.dhcp_agent_id == agent.id).all()
+ query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
+
+ net_ids = [item[0] for item in query]
if net_ids:
- _ids = [item[0] for item in net_ids]
return self.get_networks(
- context, filters={'id': _ids, 'admin_state_up': [True]})
+ context,
+ filters={'id': net_ids, 'admin_state_up': [True]}
+ )
else:
return []
def list_routers_on_l3_agent(self, context, id):
query = context.session.query(RouterL3AgentBinding.router_id)
- router_ids = query.filter(
- RouterL3AgentBinding.l3_agent_id == id).all()
+ query = query.filter(RouterL3AgentBinding.l3_agent_id == id)
+
+ router_ids = [item[0] for item in query]
if router_ids:
- _ids = [item[0] for item in router_ids]
return {'routers':
- self.get_routers(context, filters={'id': _ids})}
+ self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_id:
query = query.filter(RouterL3AgentBinding.router_id == router_id)
- router_ids = query.all()
+
+ router_ids = [item[0] for item in query]
if router_ids:
- _ids = [item[0] for item in router_ids]
- routers = self.get_sync_data(context, router_ids=_ids,
- active=True)
- return routers
- return []
+ return self.get_sync_data(context, router_ids=router_ids,
+ active=True)
+ else:
+ return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
- l3_agents = [binding.l3_agent for binding in query.all()]
+ l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
- l3_agents = query.all()
- if active is not None:
- l3_agents = [l3_agent for l3_agent in
- l3_agents if not
- agents_db.AgentDbMixin.is_agent_down(
- l3_agent['heartbeat_timestamp'])]
- return l3_agents
+
+ return [l3_agent
+ for l3_agent in query
+ if AgentSchedulerDbMixin.is_eligible_agent(active, l3_agent)]
def get_l3_agent_candidates(self, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
- items = [dict_func(c, fields) for c in query.all()]
+ items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
expired_qry = expired_qry.filter(
models_v2.IPAllocation.expiration <= timeutils.utcnow())
- for expired in expired_qry.all():
+ for expired in expired_qry:
QuantumDbPluginV2._recycle_ip(context,
network_id,
expired['subnet_id'],
# Grab all allocation pools for the subnet
pool_qry = context.session.query(
models_v2.IPAllocationPool).with_lockmode('update')
- allocation_pools = pool_qry.filter_by(subnet_id=subnet_id).all()
+ allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
# Find the allocation pool for the IP to recycle
pool_id = None
for allocation_pool in allocation_pools:
models_v2.IPAvailabilityRange,
models_v2.IPAllocationPool).join(
models_v2.IPAllocationPool).with_lockmode('update')
- results = range_qry.filter_by(subnet_id=subnet_id).all()
+ results = range_qry.filter_by(subnet_id=subnet_id)
for (range, pool) in results:
first = int(netaddr.IPAddress(range['first_ip']))
last = int(netaddr.IPAddress(range['last_ip']))
# Check if the requested IP is in a defined allocation pool
pool_qry = context.session.query(models_v2.IPAllocationPool)
- allocation_pools = pool_qry.filter_by(subnet_id=subnet_id).all()
+ allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
ip = netaddr.IPAddress(ip_address)
for allocation_pool in allocation_pools:
allocation_pool_range = netaddr.IPRange(
return
ports = self._model_query(
context, models_v2.Port).filter(
- models_v2.Port.network_id == id).all()
+ models_v2.Port.network_id == id)
subnets = self._model_query(
context, models_v2.Subnet).filter(
- models_v2.Subnet.network_id == id).all()
+ models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
# Check if any tenant owned ports are using this subnet
allocated_qry = context.session.query(models_v2.IPAllocation)
allocated_qry = allocated_qry.options(orm.joinedload('ports'))
- allocated = allocated_qry.filter_by(subnet_id=id).all()
+ allocated = allocated_qry.filter_by(subnet_id=id)
only_auto_del = all(not a.port_id or
a.ports.device_owner in AUTO_DELETE_PORT_OWNERS
raise q_exc.SubnetInUse(subnet_id=id)
# remove network owned ports
- for allocation in allocated:
- context.session.delete(allocation)
+ allocated.delete()
context.session.delete(subnet)
allocated_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# recycle all of the IP's
- allocated = allocated_qry.filter_by(port_id=id).all()
- if allocated:
- for a in allocated:
- subnet = self._get_subnet(context, a['subnet_id'])
- # Check if IP was allocated from allocation pool
- if QuantumDbPluginV2._check_ip_in_allocation_pool(
- context, a['subnet_id'], subnet['gateway_ip'],
- a['ip_address']):
- QuantumDbPluginV2._hold_ip(context,
- a['network_id'],
- a['subnet_id'],
- id,
- a['ip_address'])
- else:
- # IPs out of allocation pool will not be recycled, but
- # we do need to delete the allocation from the DB
- QuantumDbPluginV2._delete_ip_allocation(
- context, a['network_id'],
- a['subnet_id'], a['ip_address'])
- msg_dict = {'address': a['ip_address'],
- 'subnet_id': a['subnet_id']}
- msg = _("%(address)s (%(subnet_id)s) is not "
- "recycled") % msg_dict
- LOG.debug(msg)
+ allocated = allocated_qry.filter_by(port_id=id)
+ for a in allocated:
+ subnet = self._get_subnet(context, a['subnet_id'])
+ # Check if IP was allocated from allocation pool
+ if QuantumDbPluginV2._check_ip_in_allocation_pool(
+ context, a['subnet_id'], subnet['gateway_ip'],
+ a['ip_address']):
+ QuantumDbPluginV2._hold_ip(context,
+ a['network_id'],
+ a['subnet_id'],
+ id,
+ a['ip_address'])
+ else:
+ # IPs out of allocation pool will not be recycled, but
+ # we do need to delete the allocation from the DB
+ QuantumDbPluginV2._delete_ip_allocation(
+ context, a['network_id'],
+ a['subnet_id'], a['ip_address'])
+ msg_dict = {'address': a['ip_address'],
+ 'subnet_id': a['subnet_id']}
+ msg = _("%(address)s (%(subnet_id)s) is not "
+ "recycled") % msg_dict
+ LOG.debug(msg)
context.session.delete(port)
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
- items = [self._make_port_dict(c, fields) for c in query.all()]
+ items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_extra_routes_by_router_id(self, context, id):
query = context.session.query(RouterRoute)
query.filter(RouterRoute.router_id == id)
- extra_routes = query.all()
- return self._make_extra_route_list(extra_routes)
+ return self._make_extra_route_list(query)
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
network_id, subnet_id, subnet_cidr):
try:
rport_qry = context.session.query(models_v2.Port)
- rports = rport_qry.filter_by(
- device_id=router_id).all()
+ rports = rport_qry.filter_by(device_id=router_id)
# its possible these ports on on the same network, but
# different subnet
new_ipnet = netaddr.IPNetwork(subnet_cidr)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF,
- network_id=subnet['network_id']).all()
+ network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
if not vals:
return nets
- ext_nets = set([en['network_id'] for en in
- context.session.query(ExternalNetwork).all()])
+ ext_nets = set(en['network_id']
+ for en in context.session.query(ExternalNetwork))
if vals[0]:
return [n for n in nets if n['id'] in ext_nets]
else:
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters)
- return [dict_func(c, fields) for c in query.all()]
+ return [dict_func(c, fields) for c in query]
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
with context.session.begin(subtransactions=True):
vip = self._get_resource(context, Vip, id)
qry = context.session.query(Pool)
- for pool in qry.filter_by(vip_id=id).all():
+ for pool in qry.filter_by(vip_id=id):
pool.update({"vip_id": None})
context.session.delete(vip)
collection = self._model_query(context, Pool)
collection = self._apply_filters_to_query(collection, Pool, filters)
return [self._make_pool_dict(c, fields)
- for c in collection.all()]
+ for c in collection]
def stats(self, context, pool_id):
with context.session.begin(subtransactions=True):
# update with tenant specific limits
q_qry = context.session.query(Quota).filter_by(tenant_id=tenant_id)
- tenant_quota.update((q['resource'], q['limit']) for q in q_qry.all())
+ tenant_quota.update((q['resource'], q['limit']) for q in q_qry)
return tenant_quota
Atfer deletion, this tenant will use default quota values in conf.
"""
with context.session.begin():
- tenant_quotas = context.session.query(Quota).filter_by(
- tenant_id=tenant_id).all()
- for quota in tenant_quotas:
- context.session.delete(quota)
+ tenant_quotas = context.session.query(Quota)
+ tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)
+ tenant_quotas.delete()
@staticmethod
def get_all_quotas(context, resources):
all_tenant_quotas = {}
- for quota in context.session.query(Quota).all():
+ for quota in context.session.query(Quota):
tenant_id = quota['tenant_id']
# avoid setdefault() because only want to copy when actually req'd
query = query.join(models_v2.IPAllocation,
ip_port == sg_binding_port)
query = query.filter(sg_binding_sgid.in_(remote_group_ids))
- ip_in_db = query.all()
- for security_group_id, ip_address in ip_in_db:
+ for security_group_id, ip_address in query:
ips_by_group[security_group_id].append(ip_address)
return ips_by_group
for network_id in network_ids:
ips[network_id] = []
- for port, ip in query.all():
+ for port, ip in query:
ips[port['network_id']].append(ip)
return ips
if column:
query = query.filter(column.in_(value))
return [self._make_svc_type_dict(context, svc_type, fields)
- for svc_type in query.all()]
+ for svc_type in query]
def create_service_type(self, context, service_type):
"""Create a new service type."""
# Check if ports are using this subnet
allocated_qry = context.session.query(models_v2.IPAllocation)
allocated_qry = allocated_qry.options(orm.joinedload('ports'))
- allocated = allocated_qry.filter_by(subnet_id=id).all()
+ allocated = allocated_qry.filter_by(subnet_id=id)
prefix = db_base_plugin_v2.AGENT_OWNER_PREFIX
if not all(not a.port_id or a.ports.device_owner.startswith(prefix)
# get existing allocations for all physical networks
allocations = dict()
allocs_q = session.query(hyperv_model.VlanAllocation)
- for alloc in allocs_q.all():
+ for alloc in allocs_q:
allocations.setdefault(alloc.physical_network,
set()).add(alloc)
column = getattr(models_v2.Network, key, None)
if column:
collection = collection.filter(column.in_(value))
- return [self._make_network_dict(c, fields) for c in collection.all()]
+ return [self._make_network_dict(c, fields) for c in collection]
def get_networks(self, context, filters=None, fields=None):
nets = self.get_networks_with_flavor(context, filters, None)
column = getattr(l3_db.Router, key, None)
if column:
collection = collection.filter(column.in_(value))
- return [self._make_router_dict(c, fields) for c in collection.all()]
+ return [self._make_router_dict(c, fields) for c in collection]
def get_routers(self, context, filters=None, fields=None):
routers = self.get_routers_with_flavor(context, filters,
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
- network_id=network_id).all()
+ network_id=network_id)
network_port = None
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
- network_id=subnet['network_id']).all()
+ network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
filters = {'device_id': [port.get('device_id')],
'network_id': [network['network_id'] for
network in networks_with_same_queue]}
- query = self._model_query(context, models_v2.Port)
- ports = self._apply_filters_to_query(query, models_v2.Port,
- filters).all()
-
- if ports:
+ query = self._model_query(context, models_v2.Port.id)
+ query = self._apply_filters_to_query(query, models_v2.Port,
+ filters)
+ ports_ids = [p[0] for p in query]
+ if ports_ids:
# shared queue already exists find the queue id
- filters = {'port_id': [p['id'] for p in ports]}
- queues = self._get_port_queue_bindings(context, filters,
+ queues = self._get_port_queue_bindings(context,
+ {'port_id': ports_ids},
['queue_id'])
if queues:
return queues[0]['queue_id']
def get_tunnel_endpoints():
session = db.get_session()
try:
+ # TODO(rpodolyaka): Query.all() can't raise the NoResultNound exception
+ # Fix this later along with other identical cases.
tunnels = session.query(ovs_models_v2.TunnelEndpoint).all()
except exc.NoResultFound:
return []
def _generate_tunnel_id(session):
try:
+ # TODO(rpodolyaka): Query.all() can't raise the NoResultNound exception
+ # Fix this later along with other identical cases.
tunnels = session.query(ovs_models_v2.TunnelEndpoint).all()
except exc.NoResultFound:
return 0
for tun in self.tunnel_key.all_list():
self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key)
session = db.get_session()
- for port in session.query(models_v2.Port).all():
+ for port in session.query(models_v2.Port):
self.iface_client.update_network_id(port.id, port.network_id)
def _client_create_network(self, net_id, tunnel_key):
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Vip.admin_state_up == up)
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
- return [id for id, in qry.all()]
+ return [id for id, in qry]
def get_logical_device(self, context, pool_id=None, activate=True,
**kwargs):
)
qry = qry.filter_by(monitor_id=hm['id'])
- for assoc in qry.all():
+ for assoc in qry:
self.agent_rpc.modify_pool(context, assoc['pool_id'])
return hm
)
qry = qry.filter_by(monitor_id=id)
- pool_ids = [a['pool_id'] for a in qry.all()]
+ pool_ids = [a['pool_id'] for a in qry]
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
for pid in pool_ids:
self.agent_rpc.modify_pool(context, pid)
session = db_api.get_session()
gw_query = session.query(nicira_networkgw_db.NetworkGateway)
dev_query = session.query(nicira_networkgw_db.NetworkGatewayDevice)
- self.assertEqual(exp_gw_count, len(gw_query.all()))
- self.assertEqual(0, len(dev_query.all()))
+ self.assertEqual(exp_gw_count, gw_query.count())
+ self.assertEqual(0, dev_query.count())
def test_delete_network_gateway(self):
self._test_delete_network_gateway()
q = update_context.session.query(models_v2.IPAllocation)
q = q.filter_by(port_id=None, ip_address=ip_address)
- self.assertEqual(len(q.all()), 1)
+ self.assertEqual(q.count(), 1)
def test_recycle_held_ip_address(self):
plugin = QuantumManager.get_plugin()