"neutron/scheduler",
"neutron/server",
"neutron/services",
+ "neutron/plugins/cisco",
"neutron/plugins/ml2",
"neutron/plugins/openvswitch",
"neutron/plugins/linuxbridge",
- "neutron/plugins/cisco"]
+ "neutron/plugins/vmware"]
return any([dir in filename for dir in dirs])
from oslo.config import cfg
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware import api_client
api_providers are configured.
'''
if not self._api_providers:
- LOG.warn(_("[%d] no API providers currently available."), rid)
+ LOG.warn(_LW("[%d] no API providers currently available."), rid)
return None
if self._conn_pool.empty():
- LOG.debug(_("[%d] Waiting to acquire API client connection."), rid)
+ LOG.debug("[%d] Waiting to acquire API client connection.", rid)
priority, conn = self._conn_pool.get()
now = time.time()
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
- LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
- "seconds; reconnecting."),
+ LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
+ "seconds; reconnecting."),
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
'sec': now - conn.last_used})
conn = self._create_connection(*self._conn_params(conn))
conn.last_used = now
conn.priority = priority # stash current priority for release
qsize = self._conn_pool.qsize()
- LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
- "connection(s) available."),
+ LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
+ "connection(s) available.",
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
'qsize': qsize})
if auto_login and self.auth_cookie(conn) is None:
'''
conn_params = self._conn_params(http_conn)
if self._conn_params(http_conn) not in self._api_providers:
- LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an "
- "API provider for the cluster"),
+ LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
+ "API provider for the cluster",
{'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)})
return
priority = http_conn.priority
if bad_state:
# Reconnect to provider.
- LOG.warn(_("[%(rid)d] Connection returned in bad state, "
- "reconnecting to %(conn)s"),
+ LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
+ "reconnecting to %(conn)s"),
{'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)})
http_conn = self._create_connection(*self._conn_params(http_conn))
self._next_conn_priority += 1
self._conn_pool.put((priority, http_conn))
- LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d "
- "connection(s) available."),
+ LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
+ "connection(s) available.",
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
'qsize': self._conn_pool.qsize()})
data = self._get_provider_data(conn)
if data is None:
- LOG.error(_("Login request for an invalid connection: '%s'"),
+ LOG.error(_LE("Login request for an invalid connection: '%s'"),
api_client.ctrl_conn_to_str(conn))
return
provider_sem = data[0]
finally:
provider_sem.release()
else:
- LOG.debug(_("Waiting for auth to complete"))
+ LOG.debug("Waiting for auth to complete")
# Wait until we can acquire then release
provider_sem.acquire(blocking=True)
provider_sem.release()
"""
if (not isinstance(conn_or_conn_params, tuple) and
not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
- LOG.debug(_("Invalid conn_params value: '%s'"),
+ LOG.debug("Invalid conn_params value: '%s'",
str(conn_or_conn_params))
return conn_or_conn_params
if isinstance(conn_or_conn_params, httplib.HTTPConnection):
import httplib
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import base
from neutron.plugins.vmware.api_client import eventlet_client
retries=self._retries, redirects=self._redirects)
g.start()
response = g.join()
- LOG.debug(_('Request returns "%s"'), response)
+ LOG.debug('Request returns "%s"', response)
# response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library
if response is None:
# Timeout.
- LOG.error(_('Request timed out: %(method)s to %(url)s'),
+ LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
{'method': method, 'url': url})
raise exception.RequestTimeout()
# Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes.
if status in exception.ERROR_MAPPINGS:
- LOG.error(_("Received error code: %s"), status)
- LOG.error(_("Server Error Message: %s"), response.body)
+ LOG.error(_LE("Received error code: %s"), status)
+ LOG.error(_LE("Server Error Message: %s"), response.body)
exception.ERROR_MAPPINGS[status](response)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
- LOG.error(_("%(method)s to %(url)s, unexpected response code: "
- "%(status)d (content = '%(body)s')"),
+ LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
+ "%(status)d (content = '%(body)s')"),
{'method': method, 'url': url,
'status': response.status, 'body': response.body})
return None
# one of the server that responds.
self.request('GET', '/ws.v1/control-cluster/node')
if not self._version:
- LOG.error(_('Unable to determine NSX version. '
- 'Plugin might not work as expected.'))
+ LOG.error(_LE('Unable to determine NSX version. '
+ 'Plugin might not work as expected.'))
return self._version
import eventlet
eventlet.monkey_patch()
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import base
from neutron.plugins.vmware.api_client import eventlet_request
ret = g.join()
if ret:
if isinstance(ret, Exception):
- LOG.error(_('Login error "%s"'), ret)
+ LOG.error(_LE('Login error "%s"'), ret)
raise ret
cookie = ret.getheader("Set-Cookie")
if cookie:
- LOG.debug(_("Saving new authentication cookie '%s'"), cookie)
+ LOG.debug("Saving new authentication cookie '%s'", cookie)
return cookie
import eventlet
from oslo.serialization import jsonutils
+from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import request
with eventlet.timeout.Timeout(self._request_timeout, False):
return self._handle_request()
- LOG.info(_('[%d] Request timeout.'), self._rid())
+ LOG.info(_LI('[%d] Request timeout.'), self._rid())
self._request_error = Exception(_('Request timeout'))
return None
else:
continue
# else fall through to return the error code
- LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
- ": %(status)s"),
+ LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
+ ": %(status)s",
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': req.status})
self._request_error = None
response = req
else:
- LOG.info(_('[%(rid)d] Error while handling request: %(req)s'),
+ LOG.info(_LI('[%(rid)d] Error while handling request: '
+ '%(req)s'),
{'rid': self._rid(), 'req': req})
self._request_error = req
response = None
ret.append(_provider_from_listen_addr(addr))
return ret
except Exception as e:
- LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"),
+ LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
# intentionally fall through
return None
import six
import six.moves.urllib.parse as urlparse
+from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware import api_client
return error
url = self._url
- LOG.debug(_("[%(rid)d] Issuing - request url: %(conn)s "
- "body: %(body)s"),
+ LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
+ "body: %(body)s",
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'body': self._body})
issued_time = time.time()
gen = self._api_client.config_gen
if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen
- LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation "
- "request header: '%s'"), gen)
+ LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
+ "request header: '%s'", gen)
try:
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.warn(_("[%(rid)d] Exception issuing request: "
- "%(e)s"),
+ LOG.warn(_LW("[%(rid)d] Exception issuing request: "
+ "%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
response.headers = response.getheaders()
elapsed_time = time.time() - issued_time
- LOG.debug(_("[%(rid)d] Completed request '%(conn)s': "
- "%(status)s (%(elapsed)s seconds)"),
+ LOG.debug("[%(rid)d] Completed request '%(conn)s': "
+ "%(status)s (%(elapsed)s seconds)",
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'status': response.status,
new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen:
- LOG.debug(_("Reading X-Nvp-config-Generation response "
- "header: '%s'"), new_gen)
+ LOG.debug("Reading X-Nvp-config-Generation response "
+ "header: '%s'", new_gen)
if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen)
httplib.TEMPORARY_REDIRECT]:
break
elif redirects >= self._redirects:
- LOG.info(_("[%d] Maximum redirects exceeded, aborting "
- "request"), self._rid())
+ LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
+ "request"), self._rid())
break
redirects += 1
if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR
break
- LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"),
+ LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(),
'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
- LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
- "received: %(status)s"),
+ LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
+ "received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
msg = unicode(e)
if response is None:
elapsed_time = time.time() - issued_time
- LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
- "(%(elapsed)s seconds)"),
+ LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
+ "(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
url = value
break
if not url:
- LOG.warn(_("[%d] Received redirect status without location header"
- " field"), self._rid())
+ LOG.warn(_LW("[%d] Received redirect status without location "
+ "header field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
url = result.path
return (conn, url) # case 1
else:
- LOG.warn(_("[%(rid)d] Received invalid redirect location: "
- "'%(url)s'"), {'rid': self._rid(), 'url': url})
+ LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
+ "'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
- LOG.warn(_("[%(rid)d] Received malformed redirect "
- "location: %(url)s"), {'rid': self._rid(), 'url': url})
+ LOG.warn(_LW("[%(rid)d] Received malformed redirect "
+ "location: %(url)s"),
+ {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate
# under the License.
#
+from neutron.i18n import _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
if header_name == 'server':
return Version(header_value.split('/')[1])
except IndexError:
- LOG.warning(_("Unable to fetch NSX version from response "
- "headers :%s"), headers)
+ LOG.warning(_LW("Unable to fetch NSX version from response "
+ "headers :%s"), headers)
class Version(object):
from neutron.common import exceptions as n_exc
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import providernet as pnet
+from neutron.i18n import _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import client
from neutron.plugins.vmware.api_client import exception as api_exc
# more than once for each network in Neutron's lifetime
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches:
- LOG.warn(_("Unable to find NSX switches for Neutron network %s"),
+ LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
neutron_network_id)
return
nsx_switch_ids = []
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nsx_ports:
- LOG.warn(_("Unable to find NSX port for Neutron port %s"),
+ LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
neutron_port_id)
# This method is supposed to return a tuple
return None, None
# NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag
if not nsx_sec_profiles:
- LOG.warn(_("Unable to find NSX security profile for Neutron "
- "security group %s"), neutron_id)
+ LOG.warn(_LW("Unable to find NSX security profile for Neutron "
+ "security group %s"), neutron_id)
return
elif len(nsx_sec_profiles) > 1:
- LOG.warn(_("Multiple NSX security profiles found for Neutron "
- "security group %s"), neutron_id)
+ LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
+ "security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid']
with session.begin(subtransactions=True):
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nsx_routers:
- LOG.warn(_("Unable to find NSX router for Neutron router %s"),
+ LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
neutron_router_id)
return
nsx_router = nsx_routers[0]
except api_exc.NsxApiException:
# Do not make a NSX API exception fatal
if tenant_id:
- LOG.warn(_("Unable to retrieve operational status for gateway "
- "devices belonging to tenant: %s"), tenant_id)
+ LOG.warn(_LW("Unable to retrieve operational status for gateway "
+ "devices belonging to tenant: %s"), tenant_id)
else:
- LOG.warn(_("Unable to retrieve operational status for "
- "gateway devices"))
+ LOG.warn(_LW("Unable to retrieve operational status for "
+ "gateway devices"))
def _convert_bindings_to_nsx_transport_zones(bindings):
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import l3
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log
from neutron.openstack.common import loopingcall
from neutron.plugins.vmware.api_client import exception as api_exc
# TODO(salv-orlando): We should be catching
# api_exc.ResourceNotFound here
# The logical switch was not found
- LOG.warning(_("Logical switch for neutron network %s not "
- "found on NSX."), neutron_network_data['id'])
+ LOG.warning(_LW("Logical switch for neutron network %s not "
+ "found on NSX."), neutron_network_data['id'])
lswitches = []
else:
for lswitch in lswitches:
pass
else:
network.status = status
- LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
- " %(status)s"),
+ LOG.debug("Updating status for neutron resource %(q_id)s to:"
+ " %(status)s",
{'q_id': neutron_network_data['id'],
'status': status})
# NOTE(salv-orlando): We should be catching
# api_exc.ResourceNotFound here
# The logical router was not found
- LOG.warning(_("Logical router for neutron router %s not "
- "found on NSX."), neutron_router_data['id'])
+ LOG.warning(_LW("Logical router for neutron router %s not "
+ "found on NSX."), neutron_router_data['id'])
if lrouter:
# Update the cache
self._nsx_cache.update_lrouter(lrouter)
pass
else:
router.status = status
- LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
- " %(status)s"),
+ LOG.debug("Updating status for neutron resource %(q_id)s to:"
+ " %(status)s",
{'q_id': neutron_router_data['id'],
'status': status})
neutron_router_mappings[neutron_router_id] = (
self._nsx_cache[lr_uuid])
else:
- LOG.warn(_("Unable to find Neutron router id for "
- "NSX logical router: %s"), lr_uuid)
+ LOG.warn(_LW("Unable to find Neutron router id for "
+ "NSX logical router: %s"), lr_uuid)
# Fetch neutron routers from database
filters = ({} if scan_missing else
{'id': neutron_router_mappings.keys()})
# api_exc.ResourceNotFound here instead
# of PortNotFoundOnNetwork when the id exists but
# the logical switch port was not found
- LOG.warning(_("Logical switch port for neutron port %s "
- "not found on NSX."), neutron_port_data['id'])
+ LOG.warning(_LW("Logical switch port for neutron port %s "
+ "not found on NSX."), neutron_port_data['id'])
lswitchport = None
else:
# If lswitchport is not None, update the cache.
pass
else:
port.status = status
- LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
- " %(status)s"),
+ LOG.debug("Updating status for neutron resource %(q_id)s to:"
+ " %(status)s",
{'q_id': neutron_port_data['id'],
'status': status})
# be emitted.
num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
if num_requests > 1:
- LOG.warn(_("Requested page size is %(cur_chunk_size)d."
- "It might be necessary to do %(num_requests)d "
- "round-trips to NSX for fetching data. Please "
- "tune sync parameters to ensure chunk size "
- "is less than %(max_page_size)d"),
+ LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
+ "It might be necessary to do %(num_requests)d "
+ "round-trips to NSX for fetching data. Please "
+ "tune sync parameters to ensure chunk size "
+ "is less than %(max_page_size)d"),
{'cur_chunk_size': page_size,
'num_requests': num_requests,
'max_page_size': MAX_PAGE_SIZE})
def _fetch_nsx_data_chunk(self, sp):
base_chunk_size = sp.chunk_size
chunk_size = base_chunk_size + sp.extra_chunk_size
- LOG.info(_("Fetching up to %s resources "
- "from NSX backend"), chunk_size)
+ LOG.info(_LI("Fetching up to %s resources "
+ "from NSX backend"), chunk_size)
fetched = ls_count = lr_count = lp_count = 0
lswitches = lrouters = lswitchports = []
if sp.ls_cursor or sp.ls_cursor == 'start':
# No cursors were provided. Then it must be possible to
# calculate the total amount of data to fetch
sp.total_size = ls_count + lr_count + lp_count
- LOG.debug(_("Total data size: %d"), sp.total_size)
+ LOG.debug("Total data size: %d", sp.total_size)
sp.chunk_size = self._get_chunk_size(sp)
# Calculate chunk size adjustment
sp.extra_chunk_size = sp.chunk_size - base_chunk_size
- LOG.debug(_("Fetched %(num_lswitches)d logical switches, "
- "%(num_lswitchports)d logical switch ports,"
- "%(num_lrouters)d logical routers"),
+ LOG.debug("Fetched %(num_lswitches)d logical switches, "
+ "%(num_lswitchports)d logical switch ports,"
+ "%(num_lrouters)d logical routers",
{'num_lswitches': len(lswitches),
'num_lswitchports': len(lswitchports),
'num_lrouters': len(lrouters)})
# Reset page cursor variables if necessary
if sp.current_chunk == 0:
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
- LOG.info(_("Running state synchronization task. Chunk: %s"),
+ LOG.info(_LI("Running state synchronization task. Chunk: %s"),
sp.current_chunk)
# Fetch chunk_size data from NSX
try:
sleep_interval = self._sync_backoff
# Cap max back off to 64 seconds
self._sync_backoff = min(self._sync_backoff * 2, 64)
- LOG.exception(_("An error occurred while communicating with "
- "NSX backend. Will retry synchronization "
- "in %d seconds"), sleep_interval)
+ LOG.exception(_LE("An error occurred while communicating with "
+ "NSX backend. Will retry synchronization "
+ "in %d seconds"), sleep_interval)
return sleep_interval
- LOG.debug(_("Time elapsed querying NSX: %s"),
+ LOG.debug("Time elapsed querying NSX: %s",
timeutils.utcnow() - start)
if sp.total_size:
num_chunks = ((sp.total_size / sp.chunk_size) +
(sp.total_size % sp.chunk_size != 0))
else:
num_chunks = 1
- LOG.debug(_("Number of chunks: %d"), num_chunks)
+ LOG.debug("Number of chunks: %d", num_chunks)
# Find objects which have changed on NSX side and need
# to be synchronized
LOG.debug("Processing NSX cache for updated objects")
changed_only=not scan_missing)
lp_uuids = self._nsx_cache.get_lswitchports(
changed_only=not scan_missing)
- LOG.debug(_("Time elapsed hashing data: %s"),
+ LOG.debug("Time elapsed hashing data: %s",
timeutils.utcnow() - start)
# Get an admin context
ctx = context.get_admin_context()
self._synchronize_lswitchports(ctx, lp_uuids,
scan_missing=scan_missing)
# Increase chunk counter
- LOG.info(_("Synchronization for chunk %(chunk_num)d of "
- "%(total_chunks)d performed"),
+ LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
+ "%(total_chunks)d performed"),
{'chunk_num': sp.current_chunk + 1,
'total_chunks': num_chunks})
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
sp.init_sync_performed = True
# Add additional random delay
added_delay = random.randint(0, self._max_rand_delay)
- LOG.debug(_("Time elapsed at end of sync: %s"),
+ LOG.debug("Time elapsed at end of sync: %s",
timeutils.utcnow() - start)
return self._sync_interval / num_chunks + added_delay
def check_and_truncate(display_name):
if (attributes.is_attr_set(display_name) and
len(display_name) > MAX_DISPLAY_NAME_LEN):
- LOG.debug(_("Specified name:'%s' exceeds maximum length. "
- "It will be truncated on NSX"), display_name)
+ LOG.debug("Specified name:'%s' exceeds maximum length. "
+ "It will be truncated on NSX", display_name)
return display_name[:MAX_DISPLAY_NAME_LEN]
return display_name or ''
# this should not occur whilst a mapping already exists
current = get_nsx_switch_and_port_id(session, neutron_id)
if current[1] == nsx_port_id:
- LOG.debug(_("Port mapping for %s already available"),
+ LOG.debug("Port mapping for %s already available",
neutron_id)
ctxt.reraise = False
except db_exc.DBError:
one())
return mapping['nsx_switch_id'], mapping['nsx_port_id']
except exc.NoResultFound:
- LOG.debug(_("NSX identifiers for neutron port %s not yet "
- "stored in Neutron DB"), neutron_id)
+ LOG.debug("NSX identifiers for neutron port %s not yet "
+ "stored in Neutron DB", neutron_id)
return None, None
filter_by(neutron_id=neutron_id).one())
return mapping['nsx_id']
except exc.NoResultFound:
- LOG.debug(_("NSX identifiers for neutron router %s not yet "
- "stored in Neutron DB"), neutron_id)
+ LOG.debug("NSX identifiers for neutron router %s not yet "
+ "stored in Neutron DB", neutron_id)
def get_nsx_security_group_id(session, neutron_id):
one())
return mapping['nsx_id']
except exc.NoResultFound:
- LOG.debug(_("NSX identifiers for neutron security group %s not yet "
- "stored in Neutron DB"), neutron_id)
+ LOG.debug("NSX identifiers for neutron security group %s not yet "
+ "stored in Neutron DB", neutron_id)
return None
gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
for device in gw_data['devices']])
context.session.add(gw_db)
- LOG.debug(_("Created network gateway with id:%s"), gw_db['id'])
+ LOG.debug("Created network gateway with id:%s", gw_db['id'])
return self._make_network_gateway_dict(gw_db)
def update_network_gateway(self, context, id, network_gateway):
# Ensure there is something to update before doing it
if any([gw_db[k] != gw_data[k] for k in gw_data]):
gw_db.update(gw_data)
- LOG.debug(_("Updated network gateway with id:%s"), id)
+ LOG.debug("Updated network gateway with id:%s", id)
return self._make_network_gateway_dict(gw_db)
def get_network_gateway(self, context, id, fields=None):
if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id)
context.session.delete(gw_db)
- LOG.debug(_("Network gateway '%s' was destroyed."), id)
+ LOG.debug("Network gateway '%s' was destroyed.", id)
def get_network_gateways(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
def connect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
- LOG.debug(_("Connecting network '%(network_id)s' to gateway "
- "'%(network_gateway_id)s'"),
+ LOG.debug("Connecting network '%(network_id)s' to gateway "
+ "'%(network_gateway_id)s'",
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
LOG.error(err_msg)
raise exceptions.InvalidInput(error_message=err_msg)
port_id = port['id']
- LOG.debug(_("Gateway port for '%(network_gateway_id)s' "
- "created on network '%(network_id)s':%(port_id)s"),
+ LOG.debug("Gateway port for '%(network_gateway_id)s' "
+ "created on network '%(network_id)s':%(port_id)s",
{'network_gateway_id': network_gateway_id,
'network_id': network_id,
'port_id': port_id})
self._delete_ip_allocation(context, network_id,
fixed_ip['subnet_id'],
fixed_ip['ip_address'])
- LOG.debug(_("Ensured no Ip addresses are configured on port %s"),
+ LOG.debug("Ensured no Ip addresses are configured on port %s",
port_id)
return {'connection_info':
{'network_gateway_id': network_gateway_id,
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
- LOG.debug(_("Disconnecting network '%(network_id)s' from gateway "
- "'%(network_gateway_id)s'"),
+ LOG.debug("Disconnecting network '%(network_id)s' from gateway "
+ "'%(network_gateway_id)s'",
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
connector_ip=device_data['connector_ip'],
status=initial_status)
context.session.add(device_db)
- LOG.debug(_("Created network gateway device: %s"), device_db['id'])
+ LOG.debug("Created network gateway device: %s", device_db['id'])
return self._make_gateway_device_dict(device_db)
def update_gateway_device(self, context, gateway_device_id,
# Ensure there is something to update before doing it
if any([device_db[k] != device_data[k] for k in device_data]):
device_db.update(device_data)
- LOG.debug(_("Updated network gateway device: %s"),
+ LOG.debug("Updated network gateway device: %s",
gateway_device_id)
return self._make_gateway_device_dict(
device_db, include_nsx_id=include_nsx_id)
raise GatewayDeviceInUse(device_id=device_id)
device_db = self._get_gateway_device(context, device_id)
context.session.delete(device_db)
- LOG.debug(_("Deleted network gateway device: %s."), device_id)
+ LOG.debug("Deleted network gateway device: %s.", device_id)
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
+from neutron.i18n import _LI
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.extensions import qos
if dscp:
# must raise because a non-zero dscp was provided
raise qos.QueueInvalidMarking()
- LOG.info(_("DSCP value (%s) will be ignored with 'trusted' "
- "marking"), dscp)
+ LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
+ "marking"), dscp)
max = qos_queue.get('max')
min = qos_queue.get('min')
# Max can be None
from oslo.utils import excutils
from neutron.common import exceptions as n_exc
+from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as p_exc
try:
return lsn_api.lsn_for_network_get(self.cluster, network_id)
except (n_exc.NotFound, api_exc.NsxApiException):
- msg = _('Unable to find Logical Service Node for network %s')
if raise_on_err:
- LOG.error(msg, network_id)
+ LOG.error(_LE('Unable to find Logical Service Node for '
+ 'network %s.'),
+ network_id)
raise p_exc.LsnNotFound(entity='network',
entity_id=network_id)
else:
- LOG.warn(msg, network_id)
+ LOG.warn(_LW('Unable to find Logical Service Node for '
+ 'the requested network %s.'),
+ network_id)
def lsn_create(self, context, network_id):
"""Create a LSN associated to the network."""
try:
lsn_api.lsn_delete(self.cluster, lsn_id)
except (n_exc.NotFound, api_exc.NsxApiException):
- LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id)
+ LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
def lsn_delete_by_network(self, context, network_id):
"""Delete a LSN associated to the network."""
lsn_port_id = lsn_api.lsn_port_by_subnet_get(
self.cluster, lsn_id, subnet_id)
except (n_exc.NotFound, api_exc.NsxApiException):
- msg = _('Unable to find Logical Service Node Port for '
- 'LSN %(lsn_id)s and subnet %(subnet_id)s')
if raise_on_err:
- LOG.error(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id})
+ LOG.error(_LE('Unable to find Logical Service Node Port '
+ 'for LSN %(lsn_id)s and subnet '
+ '%(subnet_id)s'),
+ {'lsn_id': lsn_id, 'subnet_id': subnet_id})
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='subnet',
entity_id=subnet_id)
else:
- LOG.warn(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id})
+ LOG.warn(_LW('Unable to find Logical Service Node Port '
+ 'for LSN %(lsn_id)s and subnet '
+ '%(subnet_id)s'),
+ {'lsn_id': lsn_id, 'subnet_id': subnet_id})
return (lsn_id, None)
else:
return (lsn_id, lsn_port_id)
lsn_port_id = lsn_api.lsn_port_by_mac_get(
self.cluster, lsn_id, mac)
except (n_exc.NotFound, api_exc.NsxApiException):
- msg = _('Unable to find Logical Service Node Port for '
- 'LSN %(lsn_id)s and mac address %(mac)s')
if raise_on_err:
- LOG.error(msg, {'lsn_id': lsn_id, 'mac': mac})
+ LOG.error(_LE('Unable to find Logical Service Node Port '
+ 'for LSN %(lsn_id)s and mac address '
+ '%(mac)s'),
+ {'lsn_id': lsn_id, 'mac': mac})
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='MAC',
entity_id=mac)
else:
- LOG.warn(msg, {'lsn_id': lsn_id, 'mac': mac})
+ LOG.warn(_LW('Unable to find Logical Service Node '
+ 'Port for LSN %(lsn_id)s and mac address '
+ '%(mac)s'),
+ {'lsn_id': lsn_id, 'mac': mac})
return (lsn_id, None)
else:
return (lsn_id, lsn_port_id)
try:
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
except (n_exc.NotFound, api_exc.NsxApiException):
- LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id)
+ LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
def lsn_port_dispose(self, context, network_id, mac_address):
"""Delete a LSN port given the network and the mac address."""
self.cluster, network_id, lswitch_port_id)
except (n_exc.PortNotFoundOnNetwork,
api_exc.NsxApiException):
- LOG.warn(_("Metadata port not found while attempting "
- "to delete it from network %s"), network_id)
+ LOG.warn(_LW("Metadata port not found while attempting "
+ "to delete it from network %s"), network_id)
else:
- LOG.warn(_("Unable to find Logical Services Node "
- "Port with MAC %s"), mac_address)
+ LOG.warn(_LW("Unable to find Logical Services Node "
+ "Port with MAC %s"), mac_address)
def lsn_port_dhcp_setup(
self, context, network_id, port_id, port_data, subnet_config=None):
if lsn_id and lsn_port_id:
hdlr(self.cluster, lsn_id, lsn_port_id, data)
except (n_exc.NotFound, api_exc.NsxApiException):
- LOG.error(_('Error while configuring LSN '
- 'port %s'), lsn_port_id)
+ LOG.error(_LE('Error while configuring LSN '
+ 'port %s'), lsn_port_id)
raise p_exc.PortConfigurationError(
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron.extensions import external_net
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dhcp_meta import nsx
try:
self.plugin.delete_port(context, port['id'])
except n_exc.PortNotFound:
- LOG.error(_('Port %s is already gone'), port['id'])
+ LOG.error(_LE('Port %s is already gone'), port['id'])
def dhcp_allocate(self, context, network_id, subnet):
"""Allocate dhcp resources for the subnet."""
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.extensions import external_net
+from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dhcp_meta import constants as d_const
# down below as well as handle_port_metadata_access
self.plugin.create_port(context, {'port': dhcp_port})
except p_exc.PortConfigurationError as e:
- err_msg = (_("Error while creating subnet %(cidr)s for "
- "network %(network)s. Please, contact "
- "administrator") %
- {"cidr": subnet["cidr"],
- "network": network_id})
- LOG.error(err_msg)
+ LOG.error(_LE("Error while creating subnet %(cidr)s for "
+ "network %(network)s. Please, contact "
+ "administrator"),
+ {"cidr": subnet["cidr"],
+ "network": network_id})
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
self.plugin, context, e.port_id)
if clean_on_err:
def handle_network_dhcp_access(plugin, context, network, action):
- LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s")
- % {"action": action, "resource": network})
+ LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
+ {"action": action, "resource": network})
if action == 'create_network':
network_id = network['id']
if network.get(external_net.EXTERNAL):
- LOG.info(_("Network %s is external: no LSN to create"), network_id)
+ LOG.info(_LI("Network %s is external: no LSN to create"),
+ network_id)
return
plugin.lsn_manager.lsn_create(context, network_id)
elif action == 'delete_network':
# is just the network id
network_id = network
plugin.lsn_manager.lsn_delete_by_network(context, network_id)
- LOG.info(_("Logical Services Node for network "
- "%s configured successfully"), network_id)
+ LOG.info(_LI("Logical Services Node for network "
+ "%s configured successfully"), network_id)
def handle_port_dhcp_access(plugin, context, port, action):
- LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s")
- % {"action": action, "resource": port})
+ LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
+ {"action": action, "resource": port})
if port["device_owner"] == const.DEVICE_OWNER_DHCP:
network_id = port["network_id"]
if action == "create_port":
plugin.lsn_manager.lsn_port_dhcp_setup(
context, network_id, port['id'], subnet_data, subnet)
except p_exc.PortConfigurationError:
- err_msg = (_("Error while configuring DHCP for "
- "port %s"), port['id'])
- LOG.error(err_msg)
+ LOG.error(_LE("Error while configuring DHCP for "
+ "port %s"), port['id'])
raise n_exc.NeutronException()
elif action == "delete_port":
plugin.lsn_manager.lsn_port_dispose(context, network_id,
# do something only if there are IP's and dhcp is enabled
subnet_id = port["fixed_ips"][0]['subnet_id']
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
- LOG.info(_("DHCP is disabled for subnet %s: nothing "
- "to do"), subnet_id)
+ LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
+ "to do"), subnet_id)
return
host_data = {
"mac_address": port["mac_address"],
if action == 'create_port':
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
plugin, context, port['id'])
- LOG.info(_("DHCP for port %s configured successfully"), port['id'])
+ LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
def handle_port_metadata_access(plugin, context, port, is_delete=False):
network_id = port["network_id"]
network = plugin.get_network(context, network_id)
if network[external_net.EXTERNAL]:
- LOG.info(_("Network %s is external: nothing to do"), network_id)
+ LOG.info(_LI("Network %s is external: nothing to do"),
+ network_id)
return
subnet_id = port["fixed_ips"][0]['subnet_id']
host_data = {
"tenant_id": port["tenant_id"],
"ip_address": port["fixed_ips"][0]['ip_address']
}
- LOG.info(_("Configuring metadata entry for port %s"), port)
+ LOG.info(_LI("Configuring metadata entry for port %s"), port)
if not is_delete:
handler = plugin.lsn_manager.lsn_port_meta_host_add
else:
if not is_delete:
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
plugin, context, port['id'])
- LOG.info(_("Metadata for port %s configured successfully"), port['id'])
+ LOG.info(_LI("Metadata for port %s configured successfully"),
+ port['id'])
def handle_router_metadata_access(plugin, context, router_id, interface=None):
- LOG.info(_("Handle metadata access via router: %(r)s and "
- "interface %(i)s") % {'r': router_id, 'i': interface})
+ LOG.info(_LI("Handle metadata access via router: %(r)s and "
+ "interface %(i)s"), {'r': router_id, 'i': interface})
if interface:
try:
plugin.get_port(context, interface['port_id'])
if is_enabled:
l3_db.L3_NAT_db_mixin.remove_router_interface(
plugin, context, router_id, interface)
- LOG.info(_("Metadata for router %s handled successfully"), router_id)
+ LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db import models_v2
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config
# route. This is done via the enable_isolated_metadata
# option if desired.
if not subnet.get('gateway_ip'):
- LOG.info(_('Subnet %s does not have a gateway, the metadata '
- 'route will not be created'), subnet['id'])
+ LOG.info(_LI('Subnet %s does not have a gateway, the '
+ 'metadata route will not be created'),
+ subnet['id'])
return
metadata_routes = [r for r in subnet.routes
if r['destination'] == METADATA_DHCP_ROUTE]
def handle_router_metadata_access(plugin, context, router_id, interface=None):
if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT:
- LOG.debug(_("Metadata access network is disabled"))
+ LOG.debug("Metadata access network is disabled")
return
if not cfg.CONF.allow_overlapping_ips:
- LOG.warn(_("Overlapping IPs must be enabled in order to setup "
- "the metadata access network"))
+ LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
+ "the metadata access network"))
return
ctx_elevated = context.elevated()
device_filter = {'device_id': [router_id],
_destroy_metadata_access_network(
plugin, ctx_elevated, router_id, ports)
else:
- LOG.debug(_("No router interface found for router '%s'. "
- "No metadata access network should be "
- "created or destroyed"), router_id)
+ LOG.debug("No router interface found for router '%s'. "
+ "No metadata access network should be "
+ "created or destroyed", router_id)
# TODO(salvatore-orlando): A better exception handling in the
# NSX plugin would allow us to improve error handling here
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
api_exc.NsxApiException):
# Any exception here should be regarded as non-fatal
- LOG.exception(_("An error occurred while operating on the "
- "metadata access network for router:'%s'"),
+ LOG.exception(_LE("An error occurred while operating on the "
+ "metadata access network for router:'%s'"),
router_id)
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
+from neutron.i18n import _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import config
from neutron.plugins.vmware.common import exceptions as nsx_exc
# This becomes ineffective, as all new networks creations
# are handled by Logical Services Nodes in NSX
cfg.CONF.set_override('network_auto_schedule', False)
- LOG.warn(_('network_auto_schedule has been disabled'))
+ LOG.warn(_LW('network_auto_schedule has been disabled'))
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
lsn_manager)
self.supported_extension_aliases.append(lsn.EXT_ALIAS)
from oslo.config import cfg
+from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions
raise exceptions.InvalidClusterConfiguration(
invalid_attrs=self._required_attributes)
if self._important_attributes:
- LOG.info(_("The following cluster attributes were "
- "not specified: %s'"), self._important_attributes)
+ LOG.info(_LI("The following cluster attributes were "
+ "not specified: %s'"), self._important_attributes)
# The API client will be explicitly created by users of this class
self.api_client = None
from oslo.utils import excutils
from neutron.common import exceptions as exception
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
router_id, route)
added_routes.append(uuid)
except api_exc.NsxApiException:
- LOG.exception(_('Cannot update NSX routes %(routes)s for '
- 'router %(router_id)s'),
+ LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
+ 'router %(router_id)s'),
{'routes': routes, 'router_id': router_id})
# Roll back to keep NSX in consistent state
with excutils.save_and_reraise_exception():
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster)
- LOG.debug(_("Created logical port %(lport_uuid)s on "
- "logical router %(lrouter_uuid)s"),
+ LOG.debug("Created logical port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s",
{'lport_uuid': result['uuid'],
'lrouter_uuid': lrouter_uuid})
return result
result = nsxlib.do_request(HTTP_PUT, path,
jsonutils.dumps(lport_obj),
cluster=cluster)
- LOG.debug(_("Updated logical port %(lport_uuid)s on "
- "logical router %(lrouter_uuid)s"),
+ LOG.debug("Updated logical port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s",
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
return result
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
lrouter_uuid)
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
- LOG.debug(_("Delete logical router port %(lport_uuid)s on "
- "logical router %(lrouter_uuid)s"),
+ LOG.debug("Delete logical router port %(lport_uuid)s on "
+ "logical router %(lrouter_uuid)s",
{'lport_uuid': lport_uuid,
'lrouter_uuid': lrouter_uuid})
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
- LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj)
+ LOG.debug("Creating NAT rule: %s", nat_rule_obj)
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
parent_resource_id=router_id)
return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
- LOG.info(_("No SNAT rules cannot be applied as they are not available in "
- "this version of the NSX platform"))
+ LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
+ "in this version of the NSX platform"))
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
- LOG.info(_("No DNAT rules cannot be applied as they are not available in "
- "this version of the NSX platform"))
+ LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
+ "in this version of the NSX platform"))
def create_lrouter_snat_rule_v2(cluster, router_id,
min_rules=min_num_expected,
max_rules=max_num_expected)
else:
- LOG.warn(_("Found %(actual_rule_num)d matching NAT rules, which "
- "is not in the expected range (%(min_exp_rule_num)d,"
- "%(max_exp_rule_num)d)"),
+ LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
+ "is not in the expected range (%(min_exp_rule_num)d,"
+ "%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete,
'min_exp_rule_num': min_num_expected,
'max_exp_rule_num': max_num_expected})
from neutron.common import constants
from neutron.common import exceptions
+from neutron.i18n import _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
{'ethertype': 'IPv6'}]}
update_security_group_rules(cluster, rsp['uuid'], rules)
- LOG.debug(_("Created Security Profile: %s"), rsp)
+ LOG.debug("Created Security Profile: %s", rsp)
return rsp
LOG.error(nsxlib.format_exception("Unknown", e, locals()))
#FIXME(salvatore-orlando): This should not raise NeutronException
raise exceptions.NeutronException()
- LOG.debug(_("Updated Security Profile: %s"), rsp)
+ LOG.debug("Updated Security Profile: %s", rsp)
return rsp
except exceptions.NotFound:
with excutils.save_and_reraise_exception():
# This is not necessarily an error condition
- LOG.warn(_("Unable to find security profile %s on NSX backend"),
+ LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
spid)
from neutron.common import constants
from neutron.common import exceptions as exception
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster)
- LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
+ LOG.debug("Created logical switch: %s", lswitch['uuid'])
return lswitch
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
- LOG.error(_("Network not found, Error: %s"), str(e))
+ LOG.error(_LE("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
try:
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
- LOG.error(_("Network not found, Error: %s"), str(e))
+ LOG.error(_LE("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id)
try:
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
- LOG.exception(_("Port or Network not found"))
+ LOG.exception(_LE("Port or Network not found"))
raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port)
except api_exc.NsxApiException:
if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
- LOG.warn(_("Lswitch %s not found in NSX"), lswitch)
+ LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
ports = None
if ports:
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
- LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
- "on: '%(lswitch_uuid)s'"),
+ LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
+ "on: '%(lswitch_uuid)s'",
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
- LOG.warn(_("Found '%(num_ports)d' ports with "
- "q_port_id tag: '%(neutron_port_id)s'. "
- "Only 1 was expected."),
+ LOG.warn(_LW("Found '%(num_ports)d' ports with "
+ "q_port_id tag: '%(neutron_port_id)s'. "
+ "Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
- LOG.info(_("get_port() %(network)s %(port)s"),
+ LOG.info(_LI("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
try:
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e:
- LOG.error(_("Port or Network not found, Error: %s"), str(e))
+ LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network)
try:
result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
cluster=cluster)
- LOG.debug(_("Updated logical port %(result)s "
- "on logical switch %(uuid)s"),
+ LOG.debug("Updated logical port %(result)s "
+ "on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
- LOG.error(_("Port or Network not found, Error: %s"), str(e))
+ LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster)
- LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"),
+ LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
- LOG.error(_("Port not found, Error: %s"), str(e))
+ LOG.error(_LE("Port not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg
-from neutron.i18n import _LE
+from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as plugin_const
self._is_default_net_gw_in_sync = True
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Unable to process default l2 gw service:%s"),
+ LOG.exception(_LE("Unable to process default l2 gw service: "
+ "%s"),
def_l2_gw_uuid)
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
port_data.get('mac_address'))
LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
except api_exc.NsxApiException:
- LOG.exception(_("Unable to create port on NSX logical router %s"),
+ LOG.exception(_LE("Unable to create port on NSX logical router "
+ "%s"),
nsx_router_id)
raise nsx_exc.NsxPluginException(
err_msg=_("Unable to create logical router port for neutron "
# Must remove NSX logical port
routerlib.delete_router_lport(cluster, nsx_router_id,
nsx_router_port_id)
- LOG.exception(_("Unable to plug attachment in NSX logical "
- "router port %(r_port_id)s, associated with "
- "Neutron %(q_port_id)s"),
+ LOG.exception(_LE("Unable to plug attachment in NSX logical "
+ "router port %(r_port_id)s, associated with "
+ "Neutron %(q_port_id)s"),
{'r_port_id': nsx_router_port_id,
'q_port_id': port_data.get('id')})
raise nsx_exc.NsxPluginException(
# rollback the neutron-nsx port mapping
nsx_db.delete_neutron_nsx_port_mapping(context.session,
port_id)
- msg = (_("An exception occurred while creating the "
- "neutron port %s on the NSX plaform") % port_id)
- LOG.exception(msg)
+ LOG.exception(_LE("An exception occurred while creating the "
+ "neutron port %s on the NSX plaform"), port_id)
def _nsx_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NSX platform."""
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
- LOG.info(_("NSX plugin does not support regular VIF ports on "
- "external networks. Port %s will be down."),
+ LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+ "external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
except db_exc.DBError as e:
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
isinstance(e.inner_exception, sql_exc.IntegrityError)):
- msg = (_("Concurrent network deletion detected; Back-end Port "
- "%(nsx_id)s creation to be rolled back for Neutron "
- "port: %(neutron_id)s")
- % {'nsx_id': lport['uuid'],
- 'neutron_id': port_data['id']})
- LOG.warning(msg)
+ LOG.warning(
+ _LW("Concurrent network deletion detected; Back-end "
+ "Port %(nsx_id)s creation to be rolled back for "
+ "Neutron port: %(neutron_id)s"),
+ {'nsx_id': lport['uuid'],
+ 'neutron_id': port_data['id']})
if selected_lswitch and lport:
try:
switchlib.delete_port(self.cluster,
# does not make sense. However we cannot raise as this would break
# unit tests.
if self._network_is_external(context, port_data['network_id']):
- LOG.info(_("NSX plugin does not support regular VIF ports on "
- "external networks. Port %s will be down."),
+ LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+ "external networks. Port %s will be down."),
port_data['network_id'])
return
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except n_exc.NotFound:
- LOG.warning(_("Port %s not found in NSX"), port_data['id'])
+ LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
def _nsx_delete_router_port(self, context, port_data):
# Delete logical router port
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nsx_port_id:
- LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. "
- "Terminating delete operation. A dangling router port "
- "might have been left on router %(router_id)s"),
- {'port_id': port_data['id'],
- 'router_id': nsx_router_id})
+ LOG.warn(
+ _LW("Neutron port %(port_id)s not found on NSX backend. "
+ "Terminating delete operation. A dangling router port "
+ "might have been left on router %(router_id)s"),
+ {'port_id': port_data['id'],
+ 'router_id': nsx_router_id})
return
try:
routerlib.delete_peer_router_lport(self.cluster,
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
- LOG.exception(_("Ignoring exception as this means the peer "
- "for port '%s' has already been deleted."),
+ LOG.exception(_LE("Ignoring exception as this means the peer "
+ "for port '%s' has already been deleted."),
nsx_port_id)
# Delete logical switch port
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
- LOG.info(_("NSX plugin does not support regular VIF ports on "
- "external networks. Port %s will be down."),
+ LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+ "external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch
else:
- LOG.error(_("Maximum number of logical ports reached for "
- "logical network %s"), network.id)
+ LOG.error(_LE("Maximum number of logical ports reached for "
+ "logical network %s"), network.id)
raise nsx_exc.NoMorePortsException(network=network.id)
def _convert_to_nsx_transport_zones(self, cluster, network=None,
net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NSX
if net_data['admin_state_up'] is False:
- LOG.warning(_("Network with admin_state_up=False are not yet "
- "supported by this plugin. Ignoring setting for "
- "network %s"), net_data.get('name', '<unknown>'))
+ LOG.warning(_LW("Network with admin_state_up=False are not yet "
+ "supported by this plugin. Ignoring setting for "
+ "network %s"), net_data.get('name', '<unknown>'))
transport_zone_config = self._convert_to_nsx_transport_zones(
self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL)
try:
switchlib.delete_networks(self.cluster, id, lswitch_ids)
except n_exc.NotFound:
- LOG.warning(_("The following logical switches were not found "
- "on the NSX backend:%s"), lswitch_ids)
+ LOG.warning(_LW("The following logical switches were not "
+ "found on the NSX backend:%s"), lswitch_ids)
self.handle_network_dhcp_access(context, id, action='delete_network')
LOG.debug("Delete network complete for network: %s", id)
nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
if not nsx_switch_ids or len(nsx_switch_ids) < 1:
- LOG.warn(_("Unable to find NSX mappings for neutron "
- "network:%s"), id)
+ LOG.warn(_LW("Unable to find NSX mappings for neutron "
+ "network:%s"), id)
try:
switchlib.update_lswitch(self.cluster,
nsx_switch_ids[0],
network['network']['name'])
except api_exc.NsxApiException as e:
- LOG.warn(_("Logical switch update on NSX backend failed. "
- "Neutron network id:%(net_id)s; "
- "NSX lswitch id:%(lswitch_id)s;"
- "Error:%(error)s"),
+ LOG.warn(_LW("Logical switch update on NSX backend failed. "
+ "Neutron network id:%(net_id)s; "
+ "NSX lswitch id:%(lswitch_id)s;"
+ "Error:%(error)s"),
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
'error': e})
LOG.debug("port created on NSX backend for tenant "
"%(tenant_id)s: (%(id)s)", port_data)
except n_exc.NotFound:
- LOG.warning(_("Logical switch for network %s was not "
- "found in NSX."), port_data['network_id'])
+ LOG.warning(_LW("Logical switch for network %s was not "
+ "found in NSX."), port_data['network_id'])
# Put port in error on neutron DB
with context.session.begin(subtransactions=True):
port = self._get_port(context, neutron_port_id)
except Exception:
# Port must be removed from neutron DB
with excutils.save_and_reraise_exception():
- LOG.error(_("Unable to create port or set port "
- "attachment in NSX."))
+ LOG.error(_LE("Unable to create port or set port "
+ "attachment in NSX."))
with context.session.begin(subtransactions=True):
self._delete_port(context, neutron_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
- LOG.exception(_("Unable to update port id: %s."),
+ LOG.exception(_LE("Unable to update port id: %s."),
nsx_port_id)
# If nsx_port_id is not in database or in nsx put in error state.
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except nsx_exc.NsxPluginException:
- LOG.exception(_("Unable to create L3GW port on logical router "
- "%(router_uuid)s. Verify Default Layer-3 Gateway "
- "service %(def_l3_gw_svc)s id is correct"),
+ LOG.exception(_LE("Unable to create L3GW port on logical router "
+ "%(router_uuid)s. Verify Default Layer-3 "
+ "Gateway service %(def_l3_gw_svc)s id is "
+ "correct"),
{'router_uuid': lrouter['uuid'],
'def_l3_gw_svc':
self.cluster.default_l3_gw_service_uuid})
# As setting gateway failed, the router must be deleted
# in order to ensure atomicity
router_id = router_db['id']
- LOG.warn(_("Failed to set gateway info for router being "
- "created:%s - removing router"), router_id)
+ LOG.warn(_LW("Failed to set gateway info for router being "
+ "created:%s - removing router"), router_id)
self.delete_router(context, router_id)
- LOG.info(_("Create router failed while setting external "
- "gateway. Router:%s has been removed from "
- "DB and backend"),
+ LOG.info(_LI("Create router failed while setting external "
+ "gateway. Router:%s has been removed from "
+ "DB and backend"),
router_id)
return self._make_router_dict(router_db)
self._delete_lrouter(context, router_id, nsx_router_id)
except n_exc.NotFound:
# This is not a fatal error, but needs to be logged
- LOG.warning(_("Logical router '%s' not found "
- "on NSX Platform"), router_id)
+ LOG.warning(_LW("Logical router '%s' not found "
+ "on NSX Platform"), router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to delete logical router '%s' "
context.session, router_id)
except db_exc.DBError as d_exc:
# Do not make this error fatal
- LOG.warn(_("Unable to remove NSX mapping for Neutron router "
- "%(router_id)s because of the following exception:"
- "%(d_exc)s"), {'router_id': router_id,
- 'd_exc': str(d_exc)})
+ LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
+ "%(router_id)s because of the following exception:"
+ "%(d_exc)s"), {'router_id': router_id,
+ 'd_exc': str(d_exc)})
# Perform the actual delete on the Neutron DB
super(NsxPluginV2, self).delete_router(context, router_id)
raise_on_len_mismatch=False,
destination_ip_addresses=subnet['cidr'])
except n_exc.NotFound:
- LOG.error(_("Logical router resource %s not found "
- "on NSX platform") % router_id)
+ LOG.error(_LE("Logical router resource %s not found "
+ "on NSX platform"), router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to update logical router"
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("An error occurred while removing NAT rules "
- "on the NSX platform for floating ip:%s"),
+ LOG.exception(_LE("An error occurred while removing NAT rules "
+ "on the NSX platform for floating ip:%s"),
floating_ip_address)
except nsx_exc.NatRuleMismatch:
# Do not surface to the user
- LOG.warning(_("An incorrect number of matching NAT rules "
- "was found on the NSX platform"))
+ LOG.warning(_LW("An incorrect number of matching NAT rules "
+ "was found on the NSX platform"))
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
self.cluster, nsx_router_id, nsx_gw_port_id,
ips_to_add=nsx_floating_ips, ips_to_remove=[])
except api_exc.NsxApiException:
- LOG.exception(_("An error occurred while creating NAT "
- "rules on the NSX platform for floating "
- "ip:%(floating_ip)s mapped to "
- "internal ip:%(internal_ip)s"),
+ LOG.exception(_LE("An error occurred while creating NAT "
+ "rules on the NSX platform for floating "
+ "ip:%(floating_ip)s mapped to "
+ "internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
msg = _("Failed to update NAT rules for floatingip update")
LOG.debug("The port '%s' is not associated with floating IPs",
port_id)
except n_exc.NotFound:
- LOG.warning(_("Nat rules not found in nsx for port: %s"), id)
+ LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
# NOTE(ihrachys): L3 agent notifications don't make sense for
# NSX VMWare plugin since there is no L3 agent in such setup, so
except api_exc.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NSX resource does not exist
- LOG.exception(_("Unable to remove gateway service from "
- "NSX plaform - the resource was not found"))
+ LOG.exception(_LE("Unable to remove gateway service from "
+ "NSX plaform - the resource was not found"))
def get_network_gateway(self, context, id, fields=None):
# Ensure the default gateway in the config file is in sync with the db
except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on
- LOG.warn(_("Unable to update name on NSX backend "
- "for network gateway: %s"), id)
+ LOG.warn(_LW("Unable to update name on NSX backend "
+ "for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway)
try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound:
- LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on "
- "NSX backend (NSX id:%(nsx_id)s) because the NSX "
- "resource was not found"),
+ LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
+ "NSX backend (NSX id:%(nsx_id)s) because the NSX "
+ "resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
# In this case a 500 should be returned
- LOG.exception(_("Removal of gateway device: %(neutron_id)s "
- "failed on NSX backend (NSX id:%(nsx_id)s). "
- "Neutron and NSX states have diverged."),
+ LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
+ "failed on NSX backend (NSX id:%(nsx_id)s). "
+ "Neutron and NSX states have diverged."),
{'neutron_id': device_id,
'nsx_id': nsx_device_id})
# Reverting the DB change is not really worthwhile
# for a mismatch between names. It's the rules that
# we care about.
- LOG.error(_('Error while updating security profile '
- '%(uuid)s with name %(name)s: %(error)s.')
- % {'uuid': secgroup_id, 'name': name, 'error': e})
+ LOG.error(_LE('Error while updating security profile '
+ '%(uuid)s with name %(name)s: %(error)s.'),
+ {'uuid': secgroup_id, 'name': name, 'error': e})
return secgroup
def delete_security_group(self, context, security_group_id):
except n_exc.NotFound:
# The security profile was not found on the backend
# do not fail in this case.
- LOG.warning(_("The NSX security profile %(sec_profile_id)s, "
- "associated with the Neutron security group "
- "%(sec_group_id)s was not found on the backend"),
+ LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
+ "associated with the Neutron security group "
+ "%(sec_group_id)s was not found on the "
+ "backend"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
except api_exc.NsxApiException:
# Raise and fail the operation, as there is a problem which
# prevented the sec group from being removed from the backend
- LOG.exception(_("An exception occurred while removing the "
- "NSX security profile %(sec_profile_id)s, "
- "associated with Netron security group "
- "%(sec_group_id)s"),
+ LOG.exception(_LE("An exception occurred while removing the "
+ "NSX security profile %(sec_profile_id)s, "
+ "associated with Netron security group "
+ "%(sec_group_id)s"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
raise nsx_exc.NsxPluginException(
from neutron.extensions import l3
from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import vpnaas as vpn_ext
+from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron.plugins.vmware.api_client import exception as api_exc
try:
self.vcns_driver.delete_lswitch(lswitch_id)
except exceptions.ResourceNotFound:
- LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id)
+ LOG.warning(_LW("Did not found lswitch %s in NSX"), lswitch_id)
# delete edge
jobdata = {
except exceptions.VcnsApiException as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
- msg = (_("Failed to create firewall on vShield Edge "
- "bound on router %s") % router_id)
- LOG.exception(msg)
+ LOG.exception(_LE("Failed to create firewall on vShield Edge "
+ "bound on router %s"), router_id)
raise e
except exceptions.VcnsBadRequest as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
- LOG.exception(_("Bad Firewall request Input"))
+ LOG.exception(_LE("Bad Firewall request Input"))
raise e
def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
context, loadbalancer_db.Vip, resource_id=vip_id)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to find the edge with "
- "vip_id: %s"), vip_id)
+ LOG.exception(_LE("Failed to find the edge with "
+ "vip_id: %s"), vip_id)
return self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
context, edge_id, hm)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create healthmonitor "
- "associated with pool id: %s!") % pool_id)
+ LOG.exception(_LE("Failed to create healthmonitor "
+ "associated with pool id: %s!"), pool_id)
for monitor_ide in pool.get('health_monitors'):
if monitor_ide == monitor_id:
break
self.vcns_driver.create_pool(context, edge_id, pool, members)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create pool on vshield edge"))
+ LOG.exception(_LE("Failed to create pool on vshield edge"))
self.vcns_driver.delete_pool(
context, pool_id, edge_id)
for monitor_id in pool.get('health_monitors'):
self.vcns_driver.create_vip(context, edge_id, v)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create vip!"))
+ LOG.exception(_LE("Failed to create vip!"))
self._delete_resource_router_id_binding(
context, v['id'], loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
self.vcns_driver.update_vip(context, v, session_persistence_update)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update vip with id: %s!"), id)
+ LOG.exception(_LE("Failed to update vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR, v)
self.vcns_driver.delete_vip(context, id)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete vip with id: %s!"), id)
+ LOG.exception(_LE("Failed to delete vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR)
edge_id = self._get_edge_id_by_vip_id(context, id)
self._vcns_update_pool(context, p)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update pool with id: %s!"), id)
+ LOG.exception(_LE("Failed to update pool with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ERROR, p)
self._resource_set_status(context, loadbalancer_db.Pool,
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update pool with the member"))
+ LOG.exception(_LE("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
self._vcns_update_pool(context, old_pool)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update old pool "
- "with the member"))
+ LOG.exception(_LE("Failed to update old pool "
+ "with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update pool with the member"))
+ LOG.exception(_LE("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update pool with the member"))
+ LOG.exception(_LE("Failed to update pool with the member"))
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
context, edge_id, old_hm, hm)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update monitor "
- "with id: %s!"), id)
+ LOG.exception(_LE("Failed to update monitor "
+ "with id: %s!"), id)
return hm
def create_pool_health_monitor(self, context,
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to associate monitor with pool!"))
+ LOG.exception(_LE("Failed to associate monitor with pool!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
- _("Failed to update pool with pool_monitor!"))
+ _LE("Failed to update pool with pool_monitor!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
edge_id, sites, enabled=vpn_service.admin_state_up)
except exceptions.VcnsBadRequest:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Bad or unsupported Input request!"))
+ LOG.exception(_LE("Bad or unsupported Input request!"))
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
- msg = (_("Failed to update ipsec VPN configuration "
- "with vpnservice: %(vpnservice_id)s on vShield Edge: "
- "%(edge_id)s") % {'vpnservice_id': vpnservice_id,
- 'edge_id': edge_id})
- LOG.exception(msg)
+ LOG.exception(_LE("Failed to update ipsec VPN configuration "
+ "with vpnservice: %(vpnservice_id)s on "
+ "vShield Edge: %(edge_id)s"),
+ {'vpnservice_id': vpnservice_id,
+ 'edge_id': edge_id})
def create_vpnservice(self, context, vpnservice):
LOG.debug("create_vpnservice() called")
context, neutron_router_id)
except l3.RouterNotFound:
# Router might have been deleted before deploy finished
- LOG.exception(_("Router %s not found"), lrouter['uuid'])
+ LOG.exception(_LE("Router %s not found"), lrouter['uuid'])
if task.status == tasks_const.TaskStatus.COMPLETED:
LOG.debug("Successfully deployed %(edge_id)s for "
from oslo.serialization import jsonutils
from oslo.utils import excutils
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.vshield.common import constants as vcns_const
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
- LOG.exception(_("VCNS: Failed to get edge status:\n%s"),
+ LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
e.response)
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
try:
self.vcns.update_interface(edge_id, config)
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n"
- "%(response)s"), {
+ LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
+ "%(response)s"), {
'config': config,
'response': e.response})
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: Failed to update vnic %d"),
+ LOG.exception(_LE("VCNS: Failed to update vnic %d"),
config['index'])
return constants.TaskStatus.COMPLETED
status = constants.TaskStatus.PENDING
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: deploy edge failed for router %s."),
+ LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
name)
return status
status = constants.TaskStatus.ERROR
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
+ LOG.exception(_LE("VCNS: Edge %s status query failed."),
+ edge_id)
except Exception:
retries = task.userdata.get('retries', 0) + 1
if retries < 3:
task.userdata['retries'] = retries
- msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
- "Retry %(retries)d.") % {
- 'edge_id': edge_id,
- 'retries': retries}
- LOG.exception(msg)
+ LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
+ "status. Retry %(retries)d."),
+ {'edge_id': edge_id,
+ 'retries': retries})
status = constants.TaskStatus.PENDING
else:
- msg = _("VCNS: Unable to retrieve edge %s status. "
- "Abort.") % edge_id
- LOG.exception(msg)
+ LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
+ "Abort."), edge_id)
status = constants.TaskStatus.ERROR
LOG.debug("VCNS: Edge %s status", edge_id)
return status
router_name = task.userdata['router_name']
edge_id = task.userdata.get('edge_id')
if task.status != constants.TaskStatus.COMPLETED:
- LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s "
- "for %(name)s, status %(status)d"), {
+ LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
+ "for %(name)s, status %(status)d"), {
'edge_id': edge_id,
'name': router_name,
'status': task.status
except exceptions.ResourceNotFound:
pass
except exceptions.VcnsApiException as e:
- msg = _("VCNS: Failed to delete %(edge_id)s:\n"
- "%(response)s") % {
- 'edge_id': edge_id, 'response': e.response}
- LOG.exception(msg)
+ LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
+ "%(response)s"),
+ {'edge_id': edge_id, 'response': e.response})
status = constants.TaskStatus.ERROR
except Exception:
- LOG.exception(_("VCNS: Failed to delete %s"), edge_id)
+ LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
status = constants.TaskStatus.ERROR
return status
return self.vcns.get_edges()[1]
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
+ LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
+ e.response)
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
wait_for_exec=False, loadbalancer_enable=True):
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception():
- LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
+ LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
e.response)
def _create_nat_rule(self, task):
self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
- LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
+ LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
try:
self.vcns.delete_nat_rule(edge_id, rule_id)
except exceptions.VcnsApiException as e:
- LOG.exception(_("VCNS: Failed to delete snat rule:\n"
- "%s"), e.response)
+ LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
+ "%s"), e.response)
status = constants.TaskStatus.ERROR
return status
self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
- LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
+ LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
self.vcns.update_routes(edge_id, request)
status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
- LOG.exception(_("VCNS: Failed to update routes:\n%s"),
+ LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
e.response)
status = constants.TaskStatus.ERROR
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get service config"))
+ LOG.exception(_LE("Failed to get service config"))
return response
def enable_service_loadbalancer(self, edge_id):
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to enable loadbalancer "
- "service config"))
+ LOG.exception(_LE("Failed to enable loadbalancer "
+ "service config"))
from oslo.utils import excutils
from neutron.db import db_base_plugin_v2
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.plugins.vmware.dbexts import vcns_db
return self.vcns.get_firewall(edge_id)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get firewall with edge "
- "id: %s"), edge_id)
+ LOG.exception(_LE("Failed to get firewall with edge "
+ "id: %s"), edge_id)
def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
# Return the firewall rule below 'rule_vseid'
edge_id, vcns_rule_id)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get firewall rule: %(rule_id)s "
- "with edge_id: %(edge_id)s"), {
+ LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
+ "with edge_id: %(edge_id)s"), {
'rule_id': id,
'edge_id': edge_id})
return self._restore_firewall_rule(context, edge_id, response)
self.vcns.update_firewall(edge_id, fw_req)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update firewall "
- "with edge_id: %s"), edge_id)
+ LOG.exception(_LE("Failed to update firewall "
+ "with edge_id: %s"), edge_id)
fw_res = self._get_firewall(context, edge_id)
vcns_db.cleanup_vcns_edge_firewallrule_binding(
context.session, edge_id)
self.vcns.delete_firewall(edge_id)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete firewall "
- "with edge_id:%s"), edge_id)
+ LOG.exception(_LE("Failed to delete firewall "
+ "with edge_id:%s"), edge_id)
vcns_db.cleanup_vcns_edge_firewallrule_binding(
context.session, edge_id)
self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update firewall rule: %(rule_id)s "
- "with edge_id: %(edge_id)s"),
+ LOG.exception(_LE("Failed to update firewall rule: "
+ "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': id,
'edge_id': edge_id})
self.vcns.delete_firewall_rule(edge_id, vcns_rule_id)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete firewall rule: %(rule_id)s "
- "with edge_id: %(edge_id)s"),
+ LOG.exception(_LE("Failed to delete firewall rule: "
+ "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': id,
'edge_id': edge_id})
vcns_db.delete_vcns_edge_firewallrule_binding(
edge_id, ref_vcns_rule_id, fwr_req)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to add firewall rule above: "
- "%(rule_id)s with edge_id: %(edge_id)s"),
+ LOG.exception(_LE("Failed to add firewall rule above: "
+ "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': ref_vcns_rule_id,
'edge_id': edge_id})
edge_id, int(ref_vcns_rule_id), fwr_req)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to add firewall rule above: "
- "%(rule_id)s with edge_id: %(edge_id)s"),
+ LOG.exception(_LE("Failed to add firewall rule above: "
+ "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': ref_vcns_rule_id,
'edge_id': edge_id})
else:
edge_id, fwr_req)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to append a firewall rule"
- "with edge_id: %s"), edge_id)
+ LOG.exception(_LE("Failed to append a firewall rule"
+ "with edge_id: %s"), edge_id)
objuri = header['location']
fwr_vseid = objuri[objuri.rfind("/") + 1:]
from oslo.utils import excutils
+from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.vshield.common import (
exceptions as vcns_exc)
ikepolicy['encryption_algorithm'] != ipsecpolicy[
'encryption_algorithm'] or
ikepolicy['pfs'] != ipsecpolicy['pfs']):
- msg = _("IKEPolicy and IPsecPolicy should have consistent "
- "auth_algorithm, encryption_algorithm and pfs for VSE!")
- LOG.warning(msg)
+ LOG.warning(_LW(
+ "IKEPolicy and IPsecPolicy should have consistent "
+ "auth_algorithm, encryption_algorithm and pfs for VSE!"))
# Check whether encryption_algorithm is allowed.
encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
self.vcns.update_ipsec_config(edge_id, ipsec_config)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update ipsec vpn configuration "
- "with edge_id: %s"), edge_id)
+ LOG.exception(_LE("Failed to update ipsec vpn "
+ "configuration with edge_id: %s"),
+ edge_id)
def delete_ipsec_config(self, edge_id):
try:
self.vcns.delete_ipsec_config(edge_id)
except vcns_exc.ResourceNotFound:
- LOG.warning(_("IPsec config not found on edge: %s"), edge_id)
+ LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete ipsec vpn configuration "
- "with edge_id: %s"), edge_id)
+ LOG.exception(_LE("Failed to delete ipsec vpn configuration "
+ "with edge_id: %s"), edge_id)
def get_ipsec_config(self, edge_id):
return self.vcns.get_ipsec_config(edge_id)
from oslo.utils import excutils
+from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.vshield.common import (
edge_id, app_profile)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create app profile on edge: %s"),
+ LOG.exception(_LE("Failed to create app profile on edge: %s"),
edge_id)
objuri = header['location']
app_profileid = objuri[objuri.rfind("/") + 1:]
edge_id, vip_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create vip on vshield edge: %s"),
+ LOG.exception(_LE("Failed to create vip on vshield edge: %s"),
edge_id)
self.vcns.delete_app_profile(edge_id, app_profileid)
objuri = header['location']
response = self.vcns.get_vip(edge_id, vip_vseid)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get vip on edge"))
+ LOG.exception(_LE("Failed to get vip on edge"))
return self._restore_lb_vip(context, edge_id, response)
def update_vip(self, context, vip, session_persistence_update=True):
edge_id, app_profileid, app_profile)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update app profile on "
- "edge: %s") % edge_id)
+ LOG.exception(_LE("Failed to update app profile on "
+ "edge: %s"), edge_id)
vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
try:
self.vcns.update_vip(edge_id, vip_vseid, vip_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update vip on edge: %s") % edge_id)
+ LOG.exception(_LE("Failed to update vip on edge: %s"), edge_id)
def delete_vip(self, context, id):
vip_binding = self._get_vip_binding(context.session, id)
try:
self.vcns.delete_vip(edge_id, vip_vseid)
except vcns_exc.ResourceNotFound:
- LOG.exception(_("vip not found on edge: %s") % edge_id)
+ LOG.exception(_LE("vip not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete vip on edge: %s") % edge_id)
+ LOG.exception(_LE("Failed to delete vip on edge: %s"), edge_id)
try:
self.vcns.delete_app_profile(edge_id, app_profileid)
except vcns_exc.ResourceNotFound:
- LOG.exception(_("app profile not found on edge: %s") % edge_id)
+ LOG.exception(_LE("app profile not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete app profile on edge: %s") %
+ LOG.exception(_LE("Failed to delete app profile on edge: %s"),
edge_id)
vcns_db.delete_vcns_edge_vip_binding(context.session, id)
header = self.vcns.create_pool(edge_id, pool_new)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create pool"))
+ LOG.exception(_LE("Failed to create pool"))
objuri = header['location']
pool_vseid = objuri[objuri.rfind("/") + 1:]
response = self.vcns.get_pool(edge_id, pool_vseid)[1]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get pool on edge"))
+ LOG.exception(_LE("Failed to get pool on edge"))
return self._restore_lb_pool(context, edge_id, response)
def update_pool(self, context, edge_id, pool, members):
self.vcns.update_pool(edge_id, pool_vseid, pool_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update pool"))
+ LOG.exception(_LE("Failed to update pool"))
def delete_pool(self, context, id, edge_id):
pool_binding = vcns_db.get_vcns_edge_pool_binding(
self.vcns.delete_pool(edge_id, pool_vseid)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete pool"))
+ LOG.exception(_LE("Failed to delete pool"))
vcns_db.delete_vcns_edge_pool_binding(
context.session, id, edge_id)
header = self.vcns.create_health_monitor(edge_id, monitor_new)[0]
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to create monitor on edge: %s"),
+ LOG.exception(_LE("Failed to create monitor on edge: %s"),
edge_id)
objuri = header['location']
response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1]
except vcns_exc.VcnsApiException as e:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to get monitor on edge: %s"),
+ LOG.exception(_LE("Failed to get monitor on edge: %s"),
e.response)
return self._restore_lb_monitor(context, edge_id, response)
edge_id, monitor_vseid, monitor_new)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to update monitor on edge: %s"),
+ LOG.exception(_LE("Failed to update monitor on edge: %s"),
edge_id)
def delete_health_monitor(self, context, id, edge_id):
self.vcns.delete_health_monitor(edge_id, monitor_vseid)
except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete monitor"))
+ LOG.exception(_LE("Failed to delete monitor"))
vcns_db.delete_vcns_edge_monitor_binding(
context.session, id, edge_id)
from eventlet import greenthread
from neutron.common import exceptions
+from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.vmware.vshield.tasks import constants
try:
func(self)
except Exception:
- msg = _("Task %(task)s encountered exception in %(func)s "
- "at state %(state)s") % {
- 'task': str(self),
- 'func': str(func),
- 'state': state}
- LOG.exception(msg)
+ LOG.exception(_LE("Task %(task)s encountered exception in "
+ "%(func)s at state %(state)s"),
+ {'task': str(self),
+ 'func': str(func),
+ 'state': state})
self._move_state(state)
def _execute(self, task):
"""Execute task."""
- msg = _("Start task %s") % str(task)
- LOG.debug(msg)
+ LOG.debug("Start task %s", str(task))
task._start()
try:
status = task._execute_callback(task)
except Exception:
- msg = _("Task %(task)s encountered exception in %(cb)s") % {
- 'task': str(task),
- 'cb': str(task._execute_callback)}
- LOG.exception(msg)
+ LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+ {'task': str(task),
+ 'cb': str(task._execute_callback)})
status = constants.TaskStatus.ERROR
LOG.debug("Task %(task)s return %(status)s", {
try:
task._result_callback(task)
except Exception:
- msg = _("Task %(task)s encountered exception in %(cb)s") % {
- 'task': str(task),
- 'cb': str(task._result_callback)}
- LOG.exception(msg)
+ LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+ {'task': str(task),
+ 'cb': str(task._result_callback)})
LOG.debug("Task %(task)s return %(status)s",
{'task': str(task), 'status': task.status})
try:
status = task._status_callback(task)
except Exception:
- msg = _("Task %(task)s encountered exception in %(cb)s") % {
- 'task': str(task),
- 'cb': str(task._status_callback)}
- LOG.exception(msg)
+ LOG.exception(_LE("Task %(task)s encountered exception in "
+ "%(cb)s"),
+ {'task': str(task),
+ 'cb': str(task._status_callback)})
status = constants.TaskStatus.ERROR
task._update_status(status)
if status != constants.TaskStatus.PENDING:
if self._stopped:
# Gracefully terminate this thread if the _stopped
# attribute was set to true
- LOG.info(_("Stopping TaskManager"))
+ LOG.info(_LI("Stopping TaskManager"))
break
# get a task from queue, or timeout for periodic status check
else:
self._enqueue(task)
except Exception:
- LOG.exception(_("TaskManager terminating because "
- "of an exception"))
+ LOG.exception(_LE("TaskManager terminating because "
+ "of an exception"))
break
def add(self, task):
if self._monitor_busy:
self._monitor.wait()
self._abort()
- LOG.info(_("TaskManager terminated"))
+ LOG.info(_LI("TaskManager terminated"))
def has_pending_task(self):
if self._tasks_queue or self._tasks or self._main_thread_exec_task:
try:
self._check_pending_tasks()
except Exception:
- LOG.exception(_("Exception in _check_pending_tasks"))
+ LOG.exception(_LE("Exception in _check_pending_tasks"))
self._monitor_busy = False
if self._thread is not None: