"neutron/scheduler",
"neutron/server",
"neutron/services/firewall",
- "neutron/services/l3_router"]
+ "neutron/services/l3_router",
+ "neutron/services/loadbalancer"]
return any([dir in filename for dir in dirs])
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context
+from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
- LOG.exception(_("Failed reporting state!"))
+ LOG.exception(_LE("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
- LOG.exception(_('Error updating statistics on pool %s'),
+ LOG.exception(_LE('Error updating statistics on pool %s'),
pool_id)
self.needs_resync = True
self._reload_pool(pool_id)
except Exception:
- LOG.exception(_('Unable to retrieve ready devices'))
+ LOG.exception(_LE('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
- LOG.error(_('No device driver '
- 'on agent: %s.'), driver_name)
+ LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, constants.ERROR)
return
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
- LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id)
+ LOG.exception(_LE('Unable to deploy instance for pool: %s'),
+ pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
- LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
+ LOG.exception(_LE('Unable to destroy device for pool: %s'),
+ pool_id)
self.needs_resync = True
def remove_orphans(self):
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
- LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver '
- '%(driver)s'),
+ LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device '
+ 'driver %(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
- LOG.error(_('No device driver on agent: %s.'), driver_name)
+ LOG.error(_LE('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
return
self.needs_resync = True
else:
for pool_id in self.instance_mapping.keys():
- LOG.info(_("Destroying pool %s due to agent disabling"),
+ LOG.info(_LI("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
- LOG.info(_("Agent_updated by server side %s!"), payload)
+ LOG.info(_LI("Agent_updated by server side %s!"), payload)
from neutron.db import agentschedulers_db
from neutron.db import model_base
from neutron.extensions import lbaas_agentscheduler
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
lbaas_agent = plugin.get_lbaas_agent_hosting_pool(
context, pool['id'])
if lbaas_agent:
- LOG.debug(_('Pool %(pool_id)s has already been hosted'
- ' by lbaas agent %(agent_id)s'),
+ LOG.debug('Pool %(pool_id)s has already been hosted'
+ ' by lbaas agent %(agent_id)s',
{'pool_id': pool['id'],
'agent_id': lbaas_agent['id']})
return
active_agents = plugin.get_lbaas_agents(context, active=True)
if not active_agents:
- LOG.warn(_('No active lbaas agents for pool %s'), pool['id'])
+ LOG.warn(_LW('No active lbaas agents for pool %s'), pool['id'])
return
candidates = plugin.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
- LOG.warn(_('No lbaas agent supporting device driver %s'),
+ LOG.warn(_LW('No lbaas agent supporting device driver %s'),
device_driver)
return
binding.agent = chosen_agent
binding.pool_id = pool['id']
context.session.add(binding)
- LOG.debug(_('Pool %(pool_id)s is scheduled to '
- 'lbaas agent %(agent_id)s'),
+ LOG.debug('Pool %(pool_id)s is scheduled to lbaas agent '
+ '%(agent_id)s',
{'pool_id': pool['id'],
'agent_id': chosen_agent['id']})
return chosen_agent
from neutron.db.loadbalancer import loadbalancer_db
from neutron.extensions import lbaas_agentscheduler
from neutron.extensions import portbindings
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
if not agents:
return []
elif len(agents) > 1:
- LOG.warning(_('Multiple lbaas agents found on host %s'), host)
+ LOG.warning(_LW('Multiple lbaas agents found on host %s'),
+ host)
pools = self.plugin.list_pools_on_lbaas_agent(context,
agents[0].id)
pool_ids = [pool['id'] for pool in pools['pools']]
except n_exc.NotFound:
# update_status may come from agent on an object which was
# already deleted from db with other request
- LOG.warning(_('Cannot update status: %(obj_type)s %(obj_id)s '
- 'not found in the DB, it was probably deleted '
- 'concurrently'),
+ LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s '
+ 'not found in the DB, it was probably deleted '
+ 'concurrently'),
{'obj_type': obj_type, 'obj_id': obj_id})
def pool_destroyed(self, context, pool_id=None):
port_id
)
except n_exc.PortNotFound:
- msg = _('Unable to find port %s to plug.')
- LOG.debug(msg, port_id)
+ LOG.debug('Unable to find port %s to plug.', port_id)
return
port['admin_state_up'] = True
port_id
)
except n_exc.PortNotFound:
- msg = _('Unable to find port %s to unplug. This can occur when '
- 'the Vip has been deleted first.')
- LOG.debug(msg, port_id)
+ LOG.debug('Unable to find port %s to unplug. This can occur when '
+ 'the Vip has been deleted first.',
+ port_id)
return
port['admin_state_up'] = False
)
except n_exc.PortNotFound:
- msg = _('Unable to find port %s to unplug. This can occur when '
- 'the Vip has been deleted first.')
- LOG.debug(msg, port_id)
+ LOG.debug('Unable to find port %s to unplug. This can occur when '
+ 'the Vip has been deleted first.',
+ port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
self.plugin.update_pool_stats(context, pool_id, data=stats)
from eventlet import queue
from heleosapi import exceptions as h_exc
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.embrane.common import contexts as ctx
from neutron.services.loadbalancer.drivers.embrane.agent import lb_operations
operation_context.n_context,
operation_context.item)
except Exception:
- LOG.exception(_('Unhandled exception occurred'))
+ LOG.exception(_LE('Unhandled exception occurred'))
from heleosapi import exceptions as h_exc
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.services.loadbalancer import constants as lcon
from neutron.services.loadbalancer.drivers.embrane import constants as econ
try:
driver._heleos_api.delete_dva(context.tenant_id, vip['id'])
except h_exc.DvaNotFound:
- LOG.warning(_('The load balancer %s had no physical representation, '
- 'likely already deleted'), vip['id'])
+ LOG.warning(_LW('The load balancer %s had no physical representation, '
+ 'likely already deleted'), vip['id'])
return econ.DELETED
from neutron.common import exceptions as n_exc
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.extensions import loadbalancer as lb_ext
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pcon
from neutron.plugins.embrane.common import contexts as embrane_ctx
subnet = self.plugin._core_plugin.get_subnet(context,
db_pool["subnet_id"])
except n_exc.SubnetNotFound:
- LOG.warning(_("Subnet assigned to pool %s doesn't exist, "
- "backend port can't be created"), db_pool['id'])
+ LOG.warning(_LW("Subnet assigned to pool %s doesn't exist, "
+ "backend port can't be created"), db_pool['id'])
return
fixed_ip = {'subnet_id': subnet['id'],
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as sdb
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as ccon
try:
self.synchronize_vips(ctx)
except h_exc.PollingException as e:
- LOG.exception(_('Unhandled exception occurred'), e)
+ LOG.exception(_LE('Unhandled exception occurred'), e)
def synchronize_vips(self, ctx):
session = ctx.session
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE, _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
- LOG.warn(_('Stats socket not found for pool %s'), pool_id)
+ LOG.warn(_LW('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
return self._parse_stats(raw_stats)
except socket.error as e:
- LOG.warn(_('Error while connecting to stats socket: %s'), e)
+ LOG.warn(_LW('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
utils.execute(['kill', '-9', pid], root_helper)
except RuntimeError:
LOG.exception(
- _('Unable to kill haproxy process: %s'),
+ _LE('Unable to kill haproxy process: %s'),
pid
)
import requests
from neutron.common import exceptions as n_exc
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def __init__(self, service_uri, username, password):
if not service_uri:
- msg = _("No NetScaler Control Center URI specified. "
- "Cannot connect.")
- LOG.exception(msg)
+ LOG.exception(_LE("No NetScaler Control Center URI specified. "
+ "Cannot connect."))
raise NCCException(NCCException.CONNECTION_ERROR)
self.service_uri = service_uri.strip('/')
self.auth = None
response = requests.request(method, url=resource_uri,
headers=headers, data=body)
except requests.exceptions.ConnectionError:
- msg = (_("Connection error occurred while connecting to %s") %
- self.service_uri)
- LOG.exception(msg)
+ LOG.exception(_LE("Connection error occurred while connecting "
+ "to %s"),
+ self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.SSLError:
- msg = (_("SSL error occurred while connecting to %s") %
- self.service_uri)
- LOG.exception(msg)
+ LOG.exception(_LE("SSL error occurred while connecting to %s"),
+ self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.Timeout:
- msg = _("Request to %s timed out") % self.service_uri
- LOG.exception(msg)
+ LOG.exception(_LE("Request to %s timed out"), self.service_uri)
raise NCCException(NCCException.CONNECTION_ERROR)
except (requests.exceptions.URLRequired,
requests.exceptions.InvalidURL,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema):
- msg = _("Request did not specify a valid URL")
- LOG.exception(msg)
+ LOG.exception(_LE("Request did not specify a valid URL"))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.TooManyRedirects:
- msg = _("Too many redirects occurred for request to %s")
- LOG.exception(msg)
+ LOG.exception(_LE("Too many redirects occurred for request to %s"))
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.RequestException:
- msg = (_("A request error while connecting to %s") %
- self.service_uri)
- LOG.exception(msg)
+ LOG.exception(_LE("A request error while connecting to %s"),
+ self.service_uri)
raise NCCException(NCCException.REQUEST_ERROR)
except Exception:
- msg = (_("A unknown error occurred during request to %s") %
- self.service_uri)
- LOG.exception(msg)
+ LOG.exception(_LE("A unknown error occurred during request to %s"),
+ self.service_uri)
raise NCCException(NCCException.UNKNOWN_ERROR)
resp_dict = self._get_response_dict(response)
- LOG.debug(_("Response: %s"), resp_dict['body'])
+ LOG.debug("Response: %s", resp_dict['body'])
response_status = resp_dict['status']
if response_status == requests.codes.unauthorized:
- LOG.exception(_("Unable to login. Invalid credentials passed."
- "for: %s"), self.service_uri)
+ LOG.exception(_LE("Unable to login. Invalid credentials passed."
+ "for: %s"),
+ self.service_uri)
raise NCCException(NCCException.RESPONSE_ERROR)
if not self._is_valid_response(response_status):
- msg = (_("Failed %(method)s operation on %(url)s "
- "status code: %(response_status)s") %
- {"method": method,
- "url": resource_uri,
- "response_status": response_status})
- LOG.exception(msg)
+ LOG.exception(_LE("Failed %(method)s operation on %(url)s "
+ "status code: %(response_status)s"),
+ {"method": method,
+ "url": resource_uri,
+ "response_status": response_status})
raise NCCException(NCCException.RESPONSE_ERROR)
return response_status, resp_dict
from neutron.api.v2 import attributes
from neutron.db.loadbalancer import loadbalancer_db
+from neutron.openstack.common.gettextutils import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
network_info = self._get_vip_network_info(context, vip)
ncc_vip = self._prepare_vip_for_creation(vip)
ncc_vip = dict(ncc_vip.items() + network_info.items())
- msg = _("NetScaler driver vip creation: %s") % repr(ncc_vip)
- LOG.debug(msg)
+ LOG.debug("NetScaler driver vip creation: %r", ncc_vip)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
"""Update a vip on a NetScaler device."""
update_vip = self._prepare_vip_for_update(vip)
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
- msg = (_("NetScaler driver vip %(vip_id)s update: %(vip_obj)s") %
- {"vip_id": vip["id"], "vip_obj": repr(vip)})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver vip %(vip_id)s update: %(vip_obj)r",
+ {"vip_id": vip["id"], "vip_obj": vip})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
def delete_vip(self, context, vip):
"""Delete a vip on a NetScaler device."""
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
- msg = _("NetScaler driver vip removal: %s") % vip["id"]
- LOG.debug(msg)
+ LOG.debug("NetScaler driver vip removal: %s", vip["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
network_info)
ncc_pool = self._prepare_pool_for_creation(pool)
ncc_pool = dict(ncc_pool.items() + network_info.items())
- msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool)
- LOG.debug(msg)
+ LOG.debug("NetScaler driver pool creation: %r", ncc_pool)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
"""Update a pool on a NetScaler device."""
ncc_pool = self._prepare_pool_for_update(pool)
resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
- msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") %
- {"pool_id": old_pool["id"], "pool_obj": repr(ncc_pool)})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver pool %(pool_id)s update: %(pool_obj)r",
+ {"pool_id": old_pool["id"], "pool_obj": ncc_pool})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
def delete_pool(self, context, pool):
"""Delete a pool on a NetScaler device."""
resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
- msg = _("NetScaler driver pool removal: %s") % pool["id"]
- LOG.debug(msg)
+ LOG.debug("NetScaler driver pool removal: %s", pool["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
def create_member(self, context, member):
"""Create a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_creation(member)
- msg = (_("NetScaler driver poolmember creation: %s") %
- repr(ncc_member))
- LOG.info(msg)
+ LOG.info(_LI("NetScaler driver poolmember creation: %r"),
+ ncc_member)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id,
"""Update a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_update(member)
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
- msg = (_("NetScaler driver poolmember %(member_id)s update:"
- " %(member_obj)s") %
- {"member_id": old_member["id"],
- "member_obj": repr(ncc_member)})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver poolmember %(member_id)s update: "
+ "%(member_obj)r",
+ {"member_id": old_member["id"],
+ "member_obj": ncc_member})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
def delete_member(self, context, member):
"""Delete a pool member on a NetScaler device."""
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
- msg = (_("NetScaler driver poolmember removal: %s") %
- member["id"])
- LOG.debug(msg)
+ LOG.debug("NetScaler driver poolmember removal: %s", member["id"])
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
pool_id)
resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE)
- msg = (_("NetScaler driver healthmonitor creation for pool %(pool_id)s"
- ": %(monitor_obj)s") %
- {"pool_id": pool_id,
- "monitor_obj": repr(ncc_hm)})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver healthmonitor creation for pool "
+ "%(pool_id)s: %(monitor_obj)r",
+ {"pool_id": pool_id, "monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, resource_path,
ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
resource_path = "%s/%s" % (MONITORS_RESOURCE,
old_health_monitor["id"])
- msg = (_("NetScaler driver healthmonitor %(monitor_id)s update: "
- "%(monitor_obj)s") %
- {"monitor_id": old_health_monitor["id"],
- "monitor_obj": repr(ncc_hm)})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver healthmonitor %(monitor_id)s update: "
+ "%(monitor_obj)r",
+ {"monitor_id": old_health_monitor["id"],
+ "monitor_obj": ncc_hm})
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE,
health_monitor["id"])
- msg = (_("NetScaler driver healthmonitor %(monitor_id)s"
- "removal for pool %(pool_id)s") %
- {"monitor_id": health_monitor["id"],
- "pool_id": pool_id})
- LOG.debug(msg)
+ LOG.debug("NetScaler driver healthmonitor %(monitor_id)s"
+ "removal for pool %(pool_id)s",
+ {"monitor_id": health_monitor["id"],
+ "pool_id": pool_id})
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
def stats(self, context, pool_id):
"""Retrieve pool statistics from the NetScaler device."""
resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
- msg = _("NetScaler driver pool stats retrieval: %s") % pool_id
- LOG.debug(msg)
+ LOG.debug("NetScaler driver pool stats retrieval: %s", pool_id)
try:
stats = self.client.retrieve_resource(context.tenant_id,
resource_path)[1]
device_id = '_lb-snatport-' + subnet_id
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
- msg = (_("Filtering ports based on network_id=%(network_id)s, "
- "tenant_id=%(tenant_id)s, device_id=%(device_id)s") %
- {'network_id': network_id,
- 'tenant_id': tenant_id,
- 'device_id': device_id})
- LOG.debug(msg)
+ LOG.debug("Filtering ports based on network_id=%(network_id)s, "
+ "tenant_id=%(tenant_id)s, device_id=%(device_id)s",
+ {'network_id': network_id,
+ 'tenant_id': tenant_id,
+ 'device_id': device_id})
filter_dict = {
'network_id': [network_id],
'tenant_id': [tenant_id],
ports = self.plugin._core_plugin.get_ports(context,
filters=filter_dict)
if ports:
- msg = _("Found an existing SNAT port for subnet %s") % subnet_id
- LOG.info(msg)
+ LOG.info(_LI("Found an existing SNAT port for subnet %s"),
+ subnet_id)
return ports[0]
- msg = _("Found no SNAT ports for subnet %s") % subnet_id
- LOG.info(msg)
+ LOG.info(_LI("Found no SNAT ports for subnet %s"), subnet_id)
def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
ip_address):
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
- msg = _("Created SNAT port: %s") % repr(port)
- LOG.info(msg)
+ LOG.info(_LI("Created SNAT port: %r"), port)
return port
def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if port:
self.plugin._core_plugin.delete_port(context, port['id'])
- msg = _("Removed SNAT port: %s") % repr(port)
- LOG.info(msg)
+ LOG.info(_LI("Removed SNAT port: %r"), port)
def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
subnet_id, network_info):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if not port:
- msg = _("No SNAT port found for subnet %s."
- " Creating one...") % subnet_id
- LOG.info(msg)
+ LOG.info(_LI("No SNAT port found for subnet %s. Creating one..."),
+ subnet_id)
port = self._create_snatport_for_subnet(context, tenant_id,
subnet_id,
ip_address=None)
network_info['port_id'] = port['id']
network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
- msg = _("SNAT port: %s") % repr(port)
- LOG.info(msg)
+ LOG.info(_LI("SNAT port: %r"), port)
def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
subnet_id):
#No pools left on the old subnet.
#We can remove the SNAT port/ipaddress
self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
- msg = _("Removing SNAT port for subnet %s "
- "as this is the last pool using it...") % subnet_id
- LOG.info(msg)
+ LOG.info(_LI("Removing SNAT port for subnet %s "
+ "as this is the last pool using it..."),
+ subnet_id)
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
self.l4_action_name, ext_vip, context)
finally:
- LOG.debug(_('vip: %(vip)s, '
- 'extended_vip: %(extended_vip)s, '
- 'service_name: %(service_name)s, '),
+ LOG.debug('vip: %(vip)s, extended_vip: %(extended_vip)s, '
+ 'service_name: %(service_name)s, ',
log_info)
def update_vip(self, context, old_vip, vip):
ports = self.plugin._core_plugin.get_ports(context,
filters=port_filter)
if ports:
- LOG.debug(_('Retrieved pip nport: %(port)r for '
- 'vip: %(vip)s'), {'port': ports[0],
- 'vip': vip['id']})
+ LOG.debug('Retrieved pip nport: %(port)r for vip: %(vip)s',
+ {'port': ports[0], 'vip': vip['id']})
delete_pip_nport_function = self._get_delete_pip_nports(
context, ports)
else:
delete_pip_nport_function = None
- LOG.debug(_('Found no pip nports associated with '
- 'vip: %s'), vip['id'])
+ LOG.debug('Found no pip nports associated with vip: %s',
+ vip['id'])
# removing the WF will cause deletion of the configuration from the
# device
except r_exc.RESTRequestFailure:
pool_id = ext_vip['pool_id']
- LOG.exception(_('Failed to remove workflow %s. '
- 'Going to set vip to ERROR status'),
+ LOG.exception(_LE('Failed to remove workflow %s. '
+ 'Going to set vip to ERROR status'),
pool_id)
self.plugin.update_status(context, lb_db.Vip, ids['vip'],
try:
self.plugin._core_plugin.delete_port(
context, port['id'])
- LOG.debug(_('pip nport id: %s'), port['id'])
+ LOG.debug('pip nport id: %s', port['id'])
except Exception as exception:
# stop exception propagation, nport may have
# been deleted by other means
- LOG.warning(_('pip nport delete failed: %r'),
+ LOG.warning(_LW('pip nport delete failed: %r'),
exception)
return _delete_pip_nports
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
- LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
- 'pool_id = %(pool_id)s delete = %(delete)s '
- 'vip_id = %(vip_id)s'),
+ LOG.debug('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
+ 'pool_id = %(pool_id)s delete = %(delete)s '
+ 'vip_id = %(vip_id)s',
debug_params)
if vip_id:
def _start_completion_handling_thread(self):
if not self.completion_handler_started:
- LOG.info(_('Starting operation completion handling thread'))
+ LOG.info(_LI('Starting operation completion handling thread'))
self.completion_handler.start()
self.completion_handler_started = True
response = _rest_wrapper(self.rest_client.call('POST', resource,
{'parameters': params},
TEMPLATE_HEADER))
- LOG.debug(_('_update_workflow response: %s '), response)
+ LOG.debug('_update_workflow response: %s ', response)
if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
lbaas_entity,
entity_id,
delete=delete)
- LOG.debug(_('Pushing operation %s to the queue'), oper)
+ LOG.debug('Pushing operation %s to the queue', oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
def _remove_workflow(self, ids, context, post_remove_function):
wf_name = ids['pool']
- LOG.debug(_('Remove the workflow %s') % wf_name)
+ LOG.debug('Remove the workflow %s' % wf_name)
resource = '/api/workflow/%s' % (wf_name)
rest_return = self.rest_client.call('DELETE', resource, None, None)
response = _rest_wrapper(rest_return, [204, 202, 404])
if post_remove_function:
try:
post_remove_function(True)
- LOG.debug(_('Post-remove workflow function '
- '%r completed'), post_remove_function)
+ LOG.debug('Post-remove workflow function %r completed',
+ post_remove_function)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Post-remove workflow function '
- '%r failed'), post_remove_function)
+ LOG.exception(_LE('Post-remove workflow function '
+ '%r failed'), post_remove_function)
self.plugin._delete_db_vip(context, ids['vip'])
else:
oper = OperationAttributes(
ids['vip'],
delete=True,
post_op_function=post_remove_function)
- LOG.debug(_('Pushing operation %s to the queue'), oper)
+ LOG.debug('Pushing operation %s to the queue', oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
resource,
params,
TEMPLATE_HEADER))
- LOG.debug(_('create_workflow response: %s'), str(response))
+ LOG.debug('create_workflow response: %s', response)
def _verify_workflow_templates(self):
"""Verify the existence of workflows on vDirect server."""
'sec_server': self.secondary_server,
'port': self.port,
'ssl': self.ssl}
- LOG.debug(_('vDirectRESTClient:init server=%(server)s, '
- 'secondary server=%(sec_server)s, '
- 'port=%(port)d, '
- 'ssl=%(ssl)r'), debug_params)
+ LOG.debug('vDirectRESTClient:init server=%(server)s, '
+ 'secondary server=%(sec_server)s, '
+ 'port=%(port)d, ssl=%(ssl)r',
+ debug_params)
def _flip_servers(self):
- LOG.warning(_('Fliping servers. Current is: %(server)s, '
- 'switching to %(secondary)s'),
+ LOG.warning(_LW('Fliping servers. Current is: %(server)s, '
+ 'switching to %(secondary)s'),
{'server': self.server,
'secondary': self.secondary_server})
self.server, self.secondary_server = self.secondary_server, self.server
headers, binary)
return resp
else:
- LOG.exception(_('REST client is not able to recover '
- 'since only one vDirect server is '
+ LOG.exception(_LE('REST client is not able to recover '
+ 'since only one vDirect server is '
'configured.'))
return -1, None, None, None
def call(self, action, resource, data, headers, binary=False):
resp = self._call(action, resource, data, headers, binary)
if resp[RESP_STATUS] == -1:
- LOG.warning(_('vDirect server is not responding (%s).'),
+ LOG.warning(_LW('vDirect server is not responding (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
elif resp[RESP_STATUS] in (301, 307):
- LOG.warning(_('vDirect server is not active (%s).'),
+ LOG.warning(_LW('vDirect server is not active (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
else:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
- LOG.error(_('vdirectRESTClient: Could not establish HTTPS '
+ LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
- LOG.error(_('vdirectRESTClient: Could not establish HTTP '
+ LOG.error(_LE('vdirectRESTClient: Could not establish HTTP '
'connection'))
return 0, None, None, None
ret = (response.status, response.reason, respstr, respdata)
except Exception as e:
log_dict = {'action': action, 'e': e}
- LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'),
+ LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'),
log_dict)
ret = -1, None, None, None
conn.close()
debug_data = {'oper': oper,
'sec_to_completion': sec_to_completion,
'success': success}
- LOG.debug(_('Operation %(oper)s is completed after '
+ LOG.debug('Operation %(oper)s is completed after '
'%(sec_to_completion)d sec '
- 'with success status: %(success)s :'),
+ 'with success status: %(success)s :',
debug_data)
db_status = None
if not success:
else:
msg = "unknown"
error_params = {"operation": oper, "msg": msg}
- LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'),
+ LOG.error(_LE('Operation %(operation)s failed. Reason: '
+ '%(msg)s'),
error_params)
db_status = constants.ERROR
else:
if self.opers_to_handle_before_rest <= 0:
self.opers_to_handle_before_rest = self.queue.qsize() + 1
- LOG.debug('Operation consumed from the queue: ' +
- str(oper))
+ LOG.debug('Operation consumed from the queue: %s', oper)
# check the status - if oper is done: update the db ,
# else push the oper again to the queue
if not self.handle_operation_completion(oper):
- LOG.debug(_('Operation %s is not completed yet..') % oper)
+ LOG.debug('Operation %s is not completed yet..', oper)
# Not completed - push to the queue again
self.queue.put_nowait(oper)
log_data = {'func': oper.post_op_function, 'oper': oper}
try:
oper.post_op_function(success)
- LOG.debug(_('Post-operation function '
- '%(func)r completed '
- 'after operation %(oper)r'),
+ LOG.debug('Post-operation function %(func)r completed '
+ 'after operation %(oper)r',
log_data)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Post-operation function '
- '%(func)r failed '
- 'after operation %(oper)r'),
+ LOG.exception(_LE('Post-operation function %(func)r '
+ 'failed after operation %(oper)r'),
log_data)
ctx = context.get_admin_context(load_admin_roles=False)
- LOG.debug(_('_update: %s '), oper)
+ LOG.debug('_update: %s ', oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin.update_pool_health_monitor(ctx,
oper.entity_id,
def _remove_object_from_db(plugin, oper):
"""Remove a specific entity from db."""
- LOG.debug(_('_remove_object_from_db %s'), str(oper))
+ LOG.debug('_remove_object_from_db %s', oper)
ctx = context.get_admin_context(load_admin_roles=False)
return ids
trans_vip = {}
- LOG.debug('Vip graph to be translated: ' + str(extended_vip))
+ LOG.debug('Vip graph to be translated: %s', extended_vip)
for vip_property in VIP_PROPERTIES:
trans_vip['vip_' + vip_property] = extended_vip.get(
vip_property, TRANSLATION_DEFAULTS.get(vip_property))
trans_vip['__ids__'] = ids
if 'pip_address' in extended_vip:
trans_vip['pip_address'] = extended_vip['pip_address']
- LOG.debug('Translated Vip graph: ' + str(trans_vip))
+ LOG.debug('Translated Vip graph: %s', trans_vip)
return trans_vip
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
- msg = _("Delete associated loadbalancer pools before "
- "removing providers %s") % list(lost_providers)
- LOG.exception(msg)
+ LOG.exception(_LE("Delete associated loadbalancer pools before "
+ "removing providers %s"),
+ list(lost_providers))
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
- LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
+ LOG.error(_LE('Failed to delete pool %s, putting it in ERROR '
+ 'state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,