]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Update i18n translation for VMware NSX plugin log msg's
authorGary Kotton <gkotton@vmware.com>
Thu, 20 Nov 2014 19:53:27 +0000 (11:53 -0800)
committerGary Kotton <gkotton@vmware.com>
Mon, 1 Dec 2014 08:49:46 +0000 (00:49 -0800)
All the existing LOG.info, LOG.warning, LOG.error and LOG.critical
messages should have _LI, _LW, _LE and _LC respectively. Also, debug
level log shouldn't be translated. This patch set will cover the vmware
directory under neutron/plugins.

Change-Id: Iba83af988cb2de919b05108f145efb19e9192ae4
Partial-Bug: #1320867

29 files changed:
neutron/hacking/checks.py
neutron/plugins/vmware/api_client/base.py
neutron/plugins/vmware/api_client/client.py
neutron/plugins/vmware/api_client/eventlet_client.py
neutron/plugins/vmware/api_client/eventlet_request.py
neutron/plugins/vmware/api_client/request.py
neutron/plugins/vmware/api_client/version.py
neutron/plugins/vmware/common/nsx_utils.py
neutron/plugins/vmware/common/sync.py
neutron/plugins/vmware/common/utils.py
neutron/plugins/vmware/dbexts/db.py
neutron/plugins/vmware/dbexts/networkgw_db.py
neutron/plugins/vmware/dbexts/qos_db.py
neutron/plugins/vmware/dhcp_meta/lsnmanager.py
neutron/plugins/vmware/dhcp_meta/migration.py
neutron/plugins/vmware/dhcp_meta/nsx.py
neutron/plugins/vmware/dhcp_meta/rpc.py
neutron/plugins/vmware/dhcpmeta_modes.py
neutron/plugins/vmware/nsx_cluster.py
neutron/plugins/vmware/nsxlib/router.py
neutron/plugins/vmware/nsxlib/secgroup.py
neutron/plugins/vmware/nsxlib/switch.py
neutron/plugins/vmware/plugins/base.py
neutron/plugins/vmware/plugins/service.py
neutron/plugins/vmware/vshield/edge_appliance_driver.py
neutron/plugins/vmware/vshield/edge_firewall_driver.py
neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py
neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py
neutron/plugins/vmware/vshield/tasks/tasks.py

index 9a7599dbce33c19e5cf8c0c988b47408ab2f8bca..d45acc5be11eca46a2f227c7d69f98ad74132e72 100644 (file)
@@ -73,10 +73,11 @@ def _directory_to_check_translation(filename):
             "neutron/scheduler",
             "neutron/server",
             "neutron/services",
+            "neutron/plugins/cisco",
             "neutron/plugins/ml2",
             "neutron/plugins/openvswitch",
             "neutron/plugins/linuxbridge",
-            "neutron/plugins/cisco"]
+            "neutron/plugins/vmware"]
     return any([dir in filename for dir in dirs])
 
 
index 2042807239d93b9aab1b79d7be0561c8c3ac6445..d7e2f768d07bcbd139c8328e9096e3c05585b2c5 100644 (file)
@@ -21,6 +21,7 @@ import time
 
 from oslo.config import cfg
 
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware import api_client
 
@@ -100,15 +101,15 @@ class ApiClientBase(object):
                  api_providers are configured.
         '''
         if not self._api_providers:
-            LOG.warn(_("[%d] no API providers currently available."), rid)
+            LOG.warn(_LW("[%d] no API providers currently available."), rid)
             return None
         if self._conn_pool.empty():
-            LOG.debug(_("[%d] Waiting to acquire API client connection."), rid)
+            LOG.debug("[%d] Waiting to acquire API client connection.", rid)
         priority, conn = self._conn_pool.get()
         now = time.time()
         if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
-            LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
-                       "seconds; reconnecting."),
+            LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
+                         "seconds; reconnecting."),
                      {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                       'sec': now - conn.last_used})
             conn = self._create_connection(*self._conn_params(conn))
@@ -116,8 +117,8 @@ class ApiClientBase(object):
         conn.last_used = now
         conn.priority = priority  # stash current priority for release
         qsize = self._conn_pool.qsize()
-        LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
-                    "connection(s) available."),
+        LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
+                  "connection(s) available.",
                   {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                    'qsize': qsize})
         if auto_login and self.auth_cookie(conn) is None:
@@ -137,8 +138,8 @@ class ApiClientBase(object):
         '''
         conn_params = self._conn_params(http_conn)
         if self._conn_params(http_conn) not in self._api_providers:
-            LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an "
-                        "API provider for the cluster"),
+            LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
+                      "API provider for the cluster",
                       {'rid': rid,
                        'conn': api_client.ctrl_conn_to_str(http_conn)})
             return
@@ -148,8 +149,8 @@ class ApiClientBase(object):
         priority = http_conn.priority
         if bad_state:
             # Reconnect to provider.
-            LOG.warn(_("[%(rid)d] Connection returned in bad state, "
-                       "reconnecting to %(conn)s"),
+            LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
+                         "reconnecting to %(conn)s"),
                      {'rid': rid,
                       'conn': api_client.ctrl_conn_to_str(http_conn)})
             http_conn = self._create_connection(*self._conn_params(http_conn))
@@ -170,8 +171,8 @@ class ApiClientBase(object):
             self._next_conn_priority += 1
 
         self._conn_pool.put((priority, http_conn))
-        LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d "
-                    "connection(s) available."),
+        LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
+                  "connection(s) available.",
                   {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
                    'qsize': self._conn_pool.qsize()})
 
@@ -180,7 +181,7 @@ class ApiClientBase(object):
 
         data = self._get_provider_data(conn)
         if data is None:
-            LOG.error(_("Login request for an invalid connection: '%s'"),
+            LOG.error(_LE("Login request for an invalid connection: '%s'"),
                       api_client.ctrl_conn_to_str(conn))
             return
         provider_sem = data[0]
@@ -191,7 +192,7 @@ class ApiClientBase(object):
             finally:
                 provider_sem.release()
         else:
-            LOG.debug(_("Waiting for auth to complete"))
+            LOG.debug("Waiting for auth to complete")
             # Wait until we can acquire then release
             provider_sem.acquire(blocking=True)
             provider_sem.release()
@@ -233,7 +234,7 @@ class ApiClientBase(object):
         """
         if (not isinstance(conn_or_conn_params, tuple) and
             not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
-            LOG.debug(_("Invalid conn_params value: '%s'"),
+            LOG.debug("Invalid conn_params value: '%s'",
                       str(conn_or_conn_params))
             return conn_or_conn_params
         if isinstance(conn_or_conn_params, httplib.HTTPConnection):
index 103943ada616b844ee3f7c5e981b1d8820fb217e..67ea8d2fd00b75214e4d883977576e05ce400c9a 100644 (file)
@@ -17,6 +17,7 @@
 
 import httplib
 
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import base
 from neutron.plugins.vmware.api_client import eventlet_client
@@ -86,7 +87,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
             retries=self._retries, redirects=self._redirects)
         g.start()
         response = g.join()
-        LOG.debug(_('Request returns "%s"'), response)
+        LOG.debug('Request returns "%s"', response)
 
         # response is a modified HTTPResponse object or None.
         # response.read() will not work on response as the underlying library
@@ -99,7 +100,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
 
         if response is None:
             # Timeout.
-            LOG.error(_('Request timed out: %(method)s to %(url)s'),
+            LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
                       {'method': method, 'url': url})
             raise exception.RequestTimeout()
 
@@ -110,15 +111,15 @@ class NsxApiClient(eventlet_client.EventletApiClient):
         # Fail-fast: Check for exception conditions and raise the
         # appropriate exceptions for known error codes.
         if status in exception.ERROR_MAPPINGS:
-            LOG.error(_("Received error code: %s"), status)
-            LOG.error(_("Server Error Message: %s"), response.body)
+            LOG.error(_LE("Received error code: %s"), status)
+            LOG.error(_LE("Server Error Message: %s"), response.body)
             exception.ERROR_MAPPINGS[status](response)
 
         # Continue processing for non-error condition.
         if (status != httplib.OK and status != httplib.CREATED
                 and status != httplib.NO_CONTENT):
-            LOG.error(_("%(method)s to %(url)s, unexpected response code: "
-                        "%(status)d (content = '%(body)s')"),
+            LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
+                          "%(status)d (content = '%(body)s')"),
                       {'method': method, 'url': url,
                        'status': response.status, 'body': response.body})
             return None
@@ -134,6 +135,6 @@ class NsxApiClient(eventlet_client.EventletApiClient):
             # one of the server that responds.
             self.request('GET', '/ws.v1/control-cluster/node')
             if not self._version:
-                LOG.error(_('Unable to determine NSX version. '
-                          'Plugin might not work as expected.'))
+                LOG.error(_LE('Unable to determine NSX version. '
+                              'Plugin might not work as expected.'))
         return self._version
index fa0cd1f3eb0a6c3769729598257e3f376c72612a..0eba2fdb56bd4cb5dc0a5ae0f578eaf774c25d51 100644 (file)
@@ -20,6 +20,7 @@ import time
 import eventlet
 eventlet.monkey_patch()
 
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import base
 from neutron.plugins.vmware.api_client import eventlet_request
@@ -142,12 +143,12 @@ class EventletApiClient(base.ApiClientBase):
         ret = g.join()
         if ret:
             if isinstance(ret, Exception):
-                LOG.error(_('Login error "%s"'), ret)
+                LOG.error(_LE('Login error "%s"'), ret)
                 raise ret
 
             cookie = ret.getheader("Set-Cookie")
             if cookie:
-                LOG.debug(_("Saving new authentication cookie '%s'"), cookie)
+                LOG.debug("Saving new authentication cookie '%s'", cookie)
 
         return cookie
 
index 43c79c810f01fc2738aef169df42a81120bb8aa8..3402e4f9080b89657fe64f57626076ebd09df9be 100644 (file)
@@ -20,6 +20,7 @@ import urllib
 import eventlet
 from oslo.serialization import jsonutils
 
+from neutron.i18n import _LI, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import request
 
@@ -119,7 +120,7 @@ class EventletApiRequest(request.ApiRequest):
             with eventlet.timeout.Timeout(self._request_timeout, False):
                 return self._handle_request()
 
-            LOG.info(_('[%d] Request timeout.'), self._rid())
+            LOG.info(_LI('[%d] Request timeout.'), self._rid())
             self._request_error = Exception(_('Request timeout'))
             return None
         else:
@@ -146,14 +147,15 @@ class EventletApiRequest(request.ApiRequest):
                         continue
                     # else fall through to return the error code
 
-                LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
-                            ": %(status)s"),
+                LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
+                          ": %(status)s",
                           {'rid': self._rid(), 'method': self._method,
                            'url': self._url, 'status': req.status})
                 self._request_error = None
                 response = req
             else:
-                LOG.info(_('[%(rid)d] Error while handling request: %(req)s'),
+                LOG.info(_LI('[%(rid)d] Error while handling request: '
+                             '%(req)s'),
                          {'rid': self._rid(), 'req': req})
                 self._request_error = req
                 response = None
@@ -209,7 +211,7 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
                                 ret.append(_provider_from_listen_addr(addr))
                 return ret
         except Exception as e:
-            LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"),
+            LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
                      {'rid': self._rid(), 'e': e})
             # intentionally fall through
         return None
index 78cf81f5625e30fdda19f2471a424375ab86071c..a488d7637fc30186d0156faff2f7bbc923fb61d5 100644 (file)
@@ -25,6 +25,7 @@ from oslo.utils import excutils
 import six
 import six.moves.urllib.parse as urlparse
 
+from neutron.i18n import _LI, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware import api_client
 
@@ -86,8 +87,8 @@ class ApiRequest(object):
             return error
 
         url = self._url
-        LOG.debug(_("[%(rid)d] Issuing - request url: %(conn)s "
-                    "body: %(body)s"),
+        LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
+                  "body: %(body)s",
                   {'rid': self._rid(), 'conn': self._request_str(conn, url),
                    'body': self._body})
         issued_time = time.time()
@@ -114,22 +115,22 @@ class ApiRequest(object):
                 gen = self._api_client.config_gen
                 if gen:
                     headers["X-Nvp-Wait-For-Config-Generation"] = gen
-                    LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation "
-                                "request header: '%s'"), gen)
+                    LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
+                              "request header: '%s'", gen)
                 try:
                     conn.request(self._method, url, self._body, headers)
                 except Exception as e:
                     with excutils.save_and_reraise_exception():
-                        LOG.warn(_("[%(rid)d] Exception issuing request: "
-                                   "%(e)s"),
+                        LOG.warn(_LW("[%(rid)d] Exception issuing request: "
+                                     "%(e)s"),
                                  {'rid': self._rid(), 'e': e})
 
                 response = conn.getresponse()
                 response.body = response.read()
                 response.headers = response.getheaders()
                 elapsed_time = time.time() - issued_time
-                LOG.debug(_("[%(rid)d] Completed request '%(conn)s': "
-                            "%(status)s (%(elapsed)s seconds)"),
+                LOG.debug("[%(rid)d] Completed request '%(conn)s': "
+                          "%(status)s (%(elapsed)s seconds)",
                           {'rid': self._rid(),
                            'conn': self._request_str(conn, url),
                            'status': response.status,
@@ -137,8 +138,8 @@ class ApiRequest(object):
 
                 new_gen = response.getheader('X-Nvp-Config-Generation', None)
                 if new_gen:
-                    LOG.debug(_("Reading X-Nvp-config-Generation response "
-                                "header: '%s'"), new_gen)
+                    LOG.debug("Reading X-Nvp-config-Generation response "
+                              "header: '%s'", new_gen)
                     if (self._api_client.config_gen is None or
                         self._api_client.config_gen < int(new_gen)):
                         self._api_client.config_gen = int(new_gen)
@@ -164,8 +165,8 @@ class ApiRequest(object):
                                            httplib.TEMPORARY_REDIRECT]:
                     break
                 elif redirects >= self._redirects:
-                    LOG.info(_("[%d] Maximum redirects exceeded, aborting "
-                               "request"), self._rid())
+                    LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
+                                 "request"), self._rid())
                     break
                 redirects += 1
 
@@ -174,7 +175,7 @@ class ApiRequest(object):
                 if url is None:
                     response.status = httplib.INTERNAL_SERVER_ERROR
                     break
-                LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"),
+                LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
                          {'rid': self._rid(),
                           'conn': self._request_str(conn, url)})
                 # yield here, just in case we are not out of the loop yet
@@ -187,8 +188,8 @@ class ApiRequest(object):
             # queue.
             if (response.status == httplib.INTERNAL_SERVER_ERROR and
                 response.status > httplib.NOT_IMPLEMENTED):
-                LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
-                           "received: %(status)s"),
+                LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
+                             "received: %(status)s"),
                          {'rid': self._rid(), 'method': self._method,
                           'url': self._url, 'status': response.status})
                 raise Exception(_('Server error return: %s'), response.status)
@@ -200,8 +201,8 @@ class ApiRequest(object):
                 msg = unicode(e)
             if response is None:
                 elapsed_time = time.time() - issued_time
-            LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
-                       "(%(elapsed)s seconds)"),
+            LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
+                         "(%(elapsed)s seconds)"),
                      {'rid': self._rid(), 'conn': self._request_str(conn, url),
                       'msg': msg, 'elapsed': elapsed_time})
             self._request_error = e
@@ -234,8 +235,8 @@ class ApiRequest(object):
                 url = value
                 break
         if not url:
-            LOG.warn(_("[%d] Received redirect status without location header"
-                       " field"), self._rid())
+            LOG.warn(_LW("[%d] Received redirect status without location "
+                         "header field"), self._rid())
             return (conn, None)
         # Accept location with the following format:
         # 1. /path, redirect to same node
@@ -251,12 +252,13 @@ class ApiRequest(object):
                     url = result.path
                 return (conn, url)      # case 1
             else:
-                LOG.warn(_("[%(rid)d] Received invalid redirect location: "
-                           "'%(url)s'"), {'rid': self._rid(), 'url': url})
+                LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
+                             "'%(url)s'"), {'rid': self._rid(), 'url': url})
                 return (conn, None)     # case 3
         elif result.scheme not in ["http", "https"] or not result.hostname:
-            LOG.warn(_("[%(rid)d] Received malformed redirect "
-                       "location: %(url)s"), {'rid': self._rid(), 'url': url})
+            LOG.warn(_LW("[%(rid)d] Received malformed redirect "
+                         "location: %(url)s"),
+                     {'rid': self._rid(), 'url': url})
             return (conn, None)         # case 3
         # case 2, redirect location includes a scheme
         # so setup a new connection and authenticate
index 52fcd74b48dcc3e57a7421716a34c1aa64fd6676..189f31897d4c6c875c31de0be7ab12ec5bc4d6e0 100644 (file)
@@ -15,6 +15,7 @@
 #    under the License.
 #
 
+from neutron.i18n import _LW
 from neutron.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
@@ -27,8 +28,8 @@ def find_version(headers):
             if header_name == 'server':
                 return Version(header_value.split('/')[1])
         except IndexError:
-            LOG.warning(_("Unable to fetch NSX version from response "
-                          "headers :%s"), headers)
+            LOG.warning(_LW("Unable to fetch NSX version from response "
+                            "headers :%s"), headers)
 
 
 class Version(object):
index 267467edef39fde30f7c6eb2e7aad11d5b58414e..754e75d0fea044cafe40b7bd873a45d9f3e6a179 100644 (file)
@@ -17,6 +17,7 @@ from neutron.api.v2 import attributes as attr
 from neutron.common import exceptions as n_exc
 from neutron.extensions import multiprovidernet as mpnet
 from neutron.extensions import providernet as pnet
+from neutron.i18n import _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import client
 from neutron.plugins.vmware.api_client import exception as api_exc
@@ -64,7 +65,7 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
         # more than once for each network in Neutron's lifetime
         nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
         if not nsx_switches:
-            LOG.warn(_("Unable to find NSX switches for Neutron network %s"),
+            LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
                      neutron_network_id)
             return
         nsx_switch_ids = []
@@ -111,7 +112,7 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
         # NOTE(salv-orlando): Not handling the case where more than one
         # port is found with the same neutron port tag
         if not nsx_ports:
-            LOG.warn(_("Unable to find NSX port for Neutron port %s"),
+            LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
                      neutron_port_id)
             # This method is supposed to return a tuple
             return None, None
@@ -151,12 +152,12 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
         # NOTE(salv-orlando): Not handling the case where more than one
         # security profile is found with the same neutron port tag
         if not nsx_sec_profiles:
-            LOG.warn(_("Unable to find NSX security profile for Neutron "
-                       "security group %s"), neutron_id)
+            LOG.warn(_LW("Unable to find NSX security profile for Neutron "
+                         "security group %s"), neutron_id)
             return
         elif len(nsx_sec_profiles) > 1:
-            LOG.warn(_("Multiple NSX security profiles found for Neutron "
-                       "security group %s"), neutron_id)
+            LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
+                         "security group %s"), neutron_id)
         nsx_sec_profile = nsx_sec_profiles[0]
         nsx_id = nsx_sec_profile['uuid']
         with session.begin(subtransactions=True):
@@ -186,7 +187,7 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
         # NOTE(salv-orlando): Not handling the case where more than one
         # port is found with the same neutron port tag
         if not nsx_routers:
-            LOG.warn(_("Unable to find NSX router for Neutron router %s"),
+            LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
                      neutron_router_id)
             return
         nsx_router = nsx_routers[0]
@@ -243,11 +244,11 @@ def get_nsx_device_statuses(cluster, tenant_id):
     except api_exc.NsxApiException:
         # Do not make a NSX API exception fatal
         if tenant_id:
-            LOG.warn(_("Unable to retrieve operational status for gateway "
-                       "devices belonging to tenant: %s"), tenant_id)
+            LOG.warn(_LW("Unable to retrieve operational status for gateway "
+                         "devices belonging to tenant: %s"), tenant_id)
         else:
-            LOG.warn(_("Unable to retrieve operational status for "
-                       "gateway devices"))
+            LOG.warn(_LW("Unable to retrieve operational status for "
+                         "gateway devices"))
 
 
 def _convert_bindings_to_nsx_transport_zones(bindings):
index c7621a79b63a9a904b849338ea50ce354684da56..d5aaaf96b4445df5568338591b5673f67d2a6f62 100644 (file)
@@ -25,6 +25,7 @@ from neutron.db import external_net_db
 from neutron.db import l3_db
 from neutron.db import models_v2
 from neutron.extensions import l3
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log
 from neutron.openstack.common import loopingcall
 from neutron.plugins.vmware.api_client import exception as api_exc
@@ -262,8 +263,8 @@ class NsxSynchronizer():
                 # TODO(salv-orlando): We should be catching
                 # api_exc.ResourceNotFound here
                 # The logical switch was not found
-                LOG.warning(_("Logical switch for neutron network %s not "
-                              "found on NSX."), neutron_network_data['id'])
+                LOG.warning(_LW("Logical switch for neutron network %s not "
+                                "found on NSX."), neutron_network_data['id'])
                 lswitches = []
             else:
                 for lswitch in lswitches:
@@ -297,8 +298,8 @@ class NsxSynchronizer():
                 pass
             else:
                 network.status = status
-                LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
-                            " %(status)s"),
+                LOG.debug("Updating status for neutron resource %(q_id)s to:"
+                          " %(status)s",
                           {'q_id': neutron_network_data['id'],
                            'status': status})
 
@@ -349,8 +350,8 @@ class NsxSynchronizer():
                 # NOTE(salv-orlando): We should be catching
                 # api_exc.ResourceNotFound here
                 # The logical router was not found
-                LOG.warning(_("Logical router for neutron router %s not "
-                              "found on NSX."), neutron_router_data['id'])
+                LOG.warning(_LW("Logical router for neutron router %s not "
+                                "found on NSX."), neutron_router_data['id'])
             if lrouter:
                 # Update the cache
                 self._nsx_cache.update_lrouter(lrouter)
@@ -379,8 +380,8 @@ class NsxSynchronizer():
                 pass
             else:
                 router.status = status
-                LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
-                            " %(status)s"),
+                LOG.debug("Updating status for neutron resource %(q_id)s to:"
+                          " %(status)s",
                           {'q_id': neutron_router_data['id'],
                            'status': status})
 
@@ -399,8 +400,8 @@ class NsxSynchronizer():
                 neutron_router_mappings[neutron_router_id] = (
                     self._nsx_cache[lr_uuid])
             else:
-                LOG.warn(_("Unable to find Neutron router id for "
-                           "NSX logical router: %s"), lr_uuid)
+                LOG.warn(_LW("Unable to find Neutron router id for "
+                             "NSX logical router: %s"), lr_uuid)
         # Fetch neutron routers from database
         filters = ({} if scan_missing else
                    {'id': neutron_router_mappings.keys()})
@@ -441,8 +442,8 @@ class NsxSynchronizer():
                 # api_exc.ResourceNotFound here instead
                 # of PortNotFoundOnNetwork when the id exists but
                 # the logical switch port was not found
-                LOG.warning(_("Logical switch port for neutron port %s "
-                              "not found on NSX."), neutron_port_data['id'])
+                LOG.warning(_LW("Logical switch port for neutron port %s "
+                                "not found on NSX."), neutron_port_data['id'])
                 lswitchport = None
             else:
                 # If lswitchport is not None, update the cache.
@@ -474,8 +475,8 @@ class NsxSynchronizer():
                 pass
             else:
                 port.status = status
-                LOG.debug(_("Updating status for neutron resource %(q_id)s to:"
-                            " %(status)s"),
+                LOG.debug("Updating status for neutron resource %(q_id)s to:"
+                          " %(status)s",
                           {'q_id': neutron_port_data['id'],
                            'status': status})
 
@@ -534,11 +535,11 @@ class NsxSynchronizer():
             # be emitted.
             num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
             if num_requests > 1:
-                LOG.warn(_("Requested page size is %(cur_chunk_size)d."
-                           "It might be necessary to do %(num_requests)d "
-                           "round-trips to NSX for fetching data. Please "
-                           "tune sync parameters to ensure chunk size "
-                           "is less than %(max_page_size)d"),
+                LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
+                             "It might be necessary to do %(num_requests)d "
+                             "round-trips to NSX for fetching data. Please "
+                             "tune sync parameters to ensure chunk size "
+                             "is less than %(max_page_size)d"),
                          {'cur_chunk_size': page_size,
                           'num_requests': num_requests,
                           'max_page_size': MAX_PAGE_SIZE})
@@ -567,8 +568,8 @@ class NsxSynchronizer():
     def _fetch_nsx_data_chunk(self, sp):
         base_chunk_size = sp.chunk_size
         chunk_size = base_chunk_size + sp.extra_chunk_size
-        LOG.info(_("Fetching up to %s resources "
-                   "from NSX backend"), chunk_size)
+        LOG.info(_LI("Fetching up to %s resources "
+                     "from NSX backend"), chunk_size)
         fetched = ls_count = lr_count = lp_count = 0
         lswitches = lrouters = lswitchports = []
         if sp.ls_cursor or sp.ls_cursor == 'start':
@@ -587,13 +588,13 @@ class NsxSynchronizer():
             # No cursors were provided. Then it must be possible to
             # calculate the total amount of data to fetch
             sp.total_size = ls_count + lr_count + lp_count
-        LOG.debug(_("Total data size: %d"), sp.total_size)
+        LOG.debug("Total data size: %d", sp.total_size)
         sp.chunk_size = self._get_chunk_size(sp)
         # Calculate chunk size adjustment
         sp.extra_chunk_size = sp.chunk_size - base_chunk_size
-        LOG.debug(_("Fetched %(num_lswitches)d logical switches, "
-                    "%(num_lswitchports)d logical switch ports,"
-                    "%(num_lrouters)d logical routers"),
+        LOG.debug("Fetched %(num_lswitches)d logical switches, "
+                  "%(num_lswitchports)d logical switch ports,"
+                  "%(num_lrouters)d logical routers",
                   {'num_lswitches': len(lswitches),
                    'num_lswitchports': len(lswitchports),
                    'num_lrouters': len(lrouters)})
@@ -607,7 +608,7 @@ class NsxSynchronizer():
         # Reset page cursor variables if necessary
         if sp.current_chunk == 0:
             sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
-        LOG.info(_("Running state synchronization task. Chunk: %s"),
+        LOG.info(_LI("Running state synchronization task. Chunk: %s"),
                  sp.current_chunk)
         # Fetch chunk_size data from NSX
         try:
@@ -617,18 +618,18 @@ class NsxSynchronizer():
             sleep_interval = self._sync_backoff
             # Cap max back off to 64 seconds
             self._sync_backoff = min(self._sync_backoff * 2, 64)
-            LOG.exception(_("An error occurred while communicating with "
-                            "NSX backend. Will retry synchronization "
-                            "in %d seconds"), sleep_interval)
+            LOG.exception(_LE("An error occurred while communicating with "
+                              "NSX backend. Will retry synchronization "
+                              "in %d seconds"), sleep_interval)
             return sleep_interval
-        LOG.debug(_("Time elapsed querying NSX: %s"),
+        LOG.debug("Time elapsed querying NSX: %s",
                   timeutils.utcnow() - start)
         if sp.total_size:
             num_chunks = ((sp.total_size / sp.chunk_size) +
                           (sp.total_size % sp.chunk_size != 0))
         else:
             num_chunks = 1
-        LOG.debug(_("Number of chunks: %d"), num_chunks)
+        LOG.debug("Number of chunks: %d", num_chunks)
         # Find objects which have changed on NSX side and need
         # to be synchronized
         LOG.debug("Processing NSX cache for updated objects")
@@ -646,7 +647,7 @@ class NsxSynchronizer():
                 changed_only=not scan_missing)
             lp_uuids = self._nsx_cache.get_lswitchports(
                 changed_only=not scan_missing)
-        LOG.debug(_("Time elapsed hashing data: %s"),
+        LOG.debug("Time elapsed hashing data: %s",
                   timeutils.utcnow() - start)
         # Get an admin context
         ctx = context.get_admin_context()
@@ -658,8 +659,8 @@ class NsxSynchronizer():
         self._synchronize_lswitchports(ctx, lp_uuids,
                                        scan_missing=scan_missing)
         # Increase chunk counter
-        LOG.info(_("Synchronization for chunk %(chunk_num)d of "
-                   "%(total_chunks)d performed"),
+        LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
+                     "%(total_chunks)d performed"),
                  {'chunk_num': sp.current_chunk + 1,
                   'total_chunks': num_chunks})
         sp.current_chunk = (sp.current_chunk + 1) % num_chunks
@@ -670,6 +671,6 @@ class NsxSynchronizer():
                 sp.init_sync_performed = True
             # Add additional random delay
             added_delay = random.randint(0, self._max_rand_delay)
-        LOG.debug(_("Time elapsed at end of sync: %s"),
+        LOG.debug("Time elapsed at end of sync: %s",
                   timeutils.utcnow() - start)
         return self._sync_interval / num_chunks + added_delay
index fd5f2fc29566131b56a5cbccd93ca9d791feae94..49f5cf01d91176cfb3b5f353c7db161834292d21 100644 (file)
@@ -61,7 +61,7 @@ def device_id_to_vm_id(device_id, obfuscate=False):
 def check_and_truncate(display_name):
     if (attributes.is_attr_set(display_name) and
             len(display_name) > MAX_DISPLAY_NAME_LEN):
-        LOG.debug(_("Specified name:'%s' exceeds maximum length. "
-                    "It will be truncated on NSX"), display_name)
+        LOG.debug("Specified name:'%s' exceeds maximum length. "
+                  "It will be truncated on NSX", display_name)
         return display_name[:MAX_DISPLAY_NAME_LEN]
     return display_name or ''
index 55f2bb0588021a40bc5615ee22b51cebf076ff3e..4b4d774354c08f23c7eba08ee43dcd5043491d39 100644 (file)
@@ -74,7 +74,7 @@ def add_neutron_nsx_port_mapping(session, neutron_id,
             # this should not occur whilst a mapping already exists
             current = get_nsx_switch_and_port_id(session, neutron_id)
             if current[1] == nsx_port_id:
-                LOG.debug(_("Port mapping for %s already available"),
+                LOG.debug("Port mapping for %s already available",
                           neutron_id)
                 ctxt.reraise = False
     except db_exc.DBError:
@@ -121,8 +121,8 @@ def get_nsx_switch_and_port_id(session, neutron_id):
                    one())
         return mapping['nsx_switch_id'], mapping['nsx_port_id']
     except exc.NoResultFound:
-        LOG.debug(_("NSX identifiers for neutron port %s not yet "
-                    "stored in Neutron DB"), neutron_id)
+        LOG.debug("NSX identifiers for neutron port %s not yet "
+                  "stored in Neutron DB", neutron_id)
         return None, None
 
 
@@ -132,8 +132,8 @@ def get_nsx_router_id(session, neutron_id):
                    filter_by(neutron_id=neutron_id).one())
         return mapping['nsx_id']
     except exc.NoResultFound:
-        LOG.debug(_("NSX identifiers for neutron router %s not yet "
-                    "stored in Neutron DB"), neutron_id)
+        LOG.debug("NSX identifiers for neutron router %s not yet "
+                  "stored in Neutron DB", neutron_id)
 
 
 def get_nsx_security_group_id(session, neutron_id):
@@ -147,8 +147,8 @@ def get_nsx_security_group_id(session, neutron_id):
                    one())
         return mapping['nsx_id']
     except exc.NoResultFound:
-        LOG.debug(_("NSX identifiers for neutron security group %s not yet "
-                    "stored in Neutron DB"), neutron_id)
+        LOG.debug("NSX identifiers for neutron security group %s not yet "
+                  "stored in Neutron DB", neutron_id)
         return None
 
 
index d499e322330b74a6d69a02f402565c298ea55d92..a790893cccb365fec700cf0dc41d1429ee696bf8 100644 (file)
@@ -281,7 +281,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
             gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
                                   for device in gw_data['devices']])
             context.session.add(gw_db)
-        LOG.debug(_("Created network gateway with id:%s"), gw_db['id'])
+        LOG.debug("Created network gateway with id:%s", gw_db['id'])
         return self._make_network_gateway_dict(gw_db)
 
     def update_network_gateway(self, context, id, network_gateway):
@@ -293,7 +293,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
             # Ensure there is something to update before doing it
             if any([gw_db[k] != gw_data[k] for k in gw_data]):
                 gw_db.update(gw_data)
-        LOG.debug(_("Updated network gateway with id:%s"), id)
+        LOG.debug("Updated network gateway with id:%s", id)
         return self._make_network_gateway_dict(gw_db)
 
     def get_network_gateway(self, context, id, fields=None):
@@ -308,7 +308,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
             if gw_db.default:
                 raise NetworkGatewayUnchangeable(gateway_id=id)
             context.session.delete(gw_db)
-        LOG.debug(_("Network gateway '%s' was destroyed."), id)
+        LOG.debug("Network gateway '%s' was destroyed.", id)
 
     def get_network_gateways(self, context, filters=None, fields=None,
                              sorts=None, limit=None, marker=None,
@@ -325,8 +325,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
     def connect_network(self, context, network_gateway_id,
                         network_mapping_info):
         network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug(_("Connecting network '%(network_id)s' to gateway "
-                    "'%(network_gateway_id)s'"),
+        LOG.debug("Connecting network '%(network_id)s' to gateway "
+                  "'%(network_gateway_id)s'",
                   {'network_id': network_id,
                    'network_gateway_id': network_gateway_id})
         with context.session.begin(subtransactions=True):
@@ -374,8 +374,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
                 LOG.error(err_msg)
                 raise exceptions.InvalidInput(error_message=err_msg)
             port_id = port['id']
-            LOG.debug(_("Gateway port for '%(network_gateway_id)s' "
-                        "created on network '%(network_id)s':%(port_id)s"),
+            LOG.debug("Gateway port for '%(network_gateway_id)s' "
+                      "created on network '%(network_id)s':%(port_id)s",
                       {'network_gateway_id': network_gateway_id,
                        'network_id': network_id,
                        'port_id': port_id})
@@ -390,7 +390,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
                 self._delete_ip_allocation(context, network_id,
                                            fixed_ip['subnet_id'],
                                            fixed_ip['ip_address'])
-            LOG.debug(_("Ensured no Ip addresses are configured on port %s"),
+            LOG.debug("Ensured no Ip addresses are configured on port %s",
                       port_id)
             return {'connection_info':
                     {'network_gateway_id': network_gateway_id,
@@ -400,8 +400,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
     def disconnect_network(self, context, network_gateway_id,
                            network_mapping_info):
         network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug(_("Disconnecting network '%(network_id)s' from gateway "
-                    "'%(network_gateway_id)s'"),
+        LOG.debug("Disconnecting network '%(network_id)s' from gateway "
+                  "'%(network_gateway_id)s'",
                   {'network_id': network_id,
                    'network_gateway_id': network_gateway_id})
         with context.session.begin(subtransactions=True):
@@ -494,7 +494,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
                 connector_ip=device_data['connector_ip'],
                 status=initial_status)
             context.session.add(device_db)
-        LOG.debug(_("Created network gateway device: %s"), device_db['id'])
+        LOG.debug("Created network gateway device: %s", device_db['id'])
         return self._make_gateway_device_dict(device_db)
 
     def update_gateway_device(self, context, gateway_device_id,
@@ -505,7 +505,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
             # Ensure there is something to update before doing it
             if any([device_db[k] != device_data[k] for k in device_data]):
                 device_db.update(device_data)
-        LOG.debug(_("Updated network gateway device: %s"),
+        LOG.debug("Updated network gateway device: %s",
                   gateway_device_id)
         return self._make_gateway_device_dict(
             device_db, include_nsx_id=include_nsx_id)
@@ -518,4 +518,4 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
                 raise GatewayDeviceInUse(device_id=device_id)
             device_db = self._get_gateway_device(context, device_id)
             context.session.delete(device_db)
-        LOG.debug(_("Deleted network gateway device: %s."), device_id)
+        LOG.debug("Deleted network gateway device: %s.", device_id)
index 80196faadb813426d7d3a74845d0e7019a58dcdb..f5f56413f317632010908d29aa194d9541176be1 100644 (file)
@@ -22,6 +22,7 @@ from neutron.api.v2 import attributes as attr
 from neutron.db import db_base_plugin_v2
 from neutron.db import model_base
 from neutron.db import models_v2
+from neutron.i18n import _LI
 from neutron.openstack.common import log
 from neutron.openstack.common import uuidutils
 from neutron.plugins.vmware.extensions import qos
@@ -292,8 +293,8 @@ class QoSDbMixin(qos.QueuePluginBase):
             if dscp:
                 # must raise because a non-zero dscp was provided
                 raise qos.QueueInvalidMarking()
-            LOG.info(_("DSCP value (%s) will be ignored with 'trusted' "
-                       "marking"), dscp)
+            LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
+                         "marking"), dscp)
         max = qos_queue.get('max')
         min = qos_queue.get('min')
         # Max can be None
index 9d861329da34616be5d48648c7224ec39fa30a13..df6ffc3aac32b6f7dfb3ef43e0ba54e12c5b31e4 100644 (file)
@@ -20,6 +20,7 @@ from oslo.db import exception as db_exc
 from oslo.utils import excutils
 
 from neutron.common import exceptions as n_exc
+from neutron.i18n import _LE, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as p_exc
@@ -67,13 +68,16 @@ class LsnManager(object):
         try:
             return lsn_api.lsn_for_network_get(self.cluster, network_id)
         except (n_exc.NotFound, api_exc.NsxApiException):
-            msg = _('Unable to find Logical Service Node for network %s')
             if raise_on_err:
-                LOG.error(msg, network_id)
+                LOG.error(_LE('Unable to find Logical Service Node for '
+                              'network %s.'),
+                          network_id)
                 raise p_exc.LsnNotFound(entity='network',
                                         entity_id=network_id)
             else:
-                LOG.warn(msg, network_id)
+                LOG.warn(_LW('Unable to find Logical Service Node for '
+                             'the requested network %s.'),
+                         network_id)
 
     def lsn_create(self, context, network_id):
         """Create a LSN associated to the network."""
@@ -88,7 +92,7 @@ class LsnManager(object):
         try:
             lsn_api.lsn_delete(self.cluster, lsn_id)
         except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id)
+            LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
 
     def lsn_delete_by_network(self, context, network_id):
         """Delete a LSN associated to the network."""
@@ -104,15 +108,19 @@ class LsnManager(object):
                 lsn_port_id = lsn_api.lsn_port_by_subnet_get(
                     self.cluster, lsn_id, subnet_id)
             except (n_exc.NotFound, api_exc.NsxApiException):
-                msg = _('Unable to find Logical Service Node Port for '
-                        'LSN %(lsn_id)s and subnet %(subnet_id)s')
                 if raise_on_err:
-                    LOG.error(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id})
+                    LOG.error(_LE('Unable to find Logical Service Node Port '
+                                  'for LSN %(lsn_id)s and subnet '
+                                  '%(subnet_id)s'),
+                              {'lsn_id': lsn_id, 'subnet_id': subnet_id})
                     raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
                                                 entity='subnet',
                                                 entity_id=subnet_id)
                 else:
-                    LOG.warn(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id})
+                    LOG.warn(_LW('Unable to find Logical Service Node Port '
+                                 'for LSN %(lsn_id)s and subnet '
+                                 '%(subnet_id)s'),
+                             {'lsn_id': lsn_id, 'subnet_id': subnet_id})
                 return (lsn_id, None)
             else:
                 return (lsn_id, lsn_port_id)
@@ -127,15 +135,19 @@ class LsnManager(object):
                 lsn_port_id = lsn_api.lsn_port_by_mac_get(
                     self.cluster, lsn_id, mac)
             except (n_exc.NotFound, api_exc.NsxApiException):
-                msg = _('Unable to find Logical Service Node Port for '
-                        'LSN %(lsn_id)s and mac address %(mac)s')
                 if raise_on_err:
-                    LOG.error(msg, {'lsn_id': lsn_id, 'mac': mac})
+                    LOG.error(_LE('Unable to find Logical Service Node Port '
+                                  'for LSN %(lsn_id)s and mac address '
+                                  '%(mac)s'),
+                              {'lsn_id': lsn_id, 'mac': mac})
                     raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
                                                 entity='MAC',
                                                 entity_id=mac)
                 else:
-                    LOG.warn(msg, {'lsn_id': lsn_id, 'mac': mac})
+                    LOG.warn(_LW('Unable to find Logical Service Node '
+                                 'Port for LSN %(lsn_id)s and mac address '
+                                 '%(mac)s'),
+                             {'lsn_id': lsn_id, 'mac': mac})
                 return (lsn_id, None)
             else:
                 return (lsn_id, lsn_port_id)
@@ -157,7 +169,7 @@ class LsnManager(object):
         try:
             lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
         except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id)
+            LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
 
     def lsn_port_dispose(self, context, network_id, mac_address):
         """Delete a LSN port given the network and the mac address."""
@@ -174,11 +186,11 @@ class LsnManager(object):
                         self.cluster, network_id, lswitch_port_id)
                 except (n_exc.PortNotFoundOnNetwork,
                         api_exc.NsxApiException):
-                    LOG.warn(_("Metadata port not found while attempting "
-                               "to delete it from network %s"), network_id)
+                    LOG.warn(_LW("Metadata port not found while attempting "
+                                 "to delete it from network %s"), network_id)
         else:
-            LOG.warn(_("Unable to find Logical Services Node "
-                       "Port with MAC %s"), mac_address)
+            LOG.warn(_LW("Unable to find Logical Services Node "
+                         "Port with MAC %s"), mac_address)
 
     def lsn_port_dhcp_setup(
         self, context, network_id, port_id, port_data, subnet_config=None):
@@ -305,8 +317,8 @@ class LsnManager(object):
             if lsn_id and lsn_port_id:
                 hdlr(self.cluster, lsn_id, lsn_port_id, data)
         except (n_exc.NotFound, api_exc.NsxApiException):
-            LOG.error(_('Error while configuring LSN '
-                        'port %s'), lsn_port_id)
+            LOG.error(_LE('Error while configuring LSN '
+                          'port %s'), lsn_port_id)
             raise p_exc.PortConfigurationError(
                 net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
 
index 0f1b32b771c7268bed20db3e40667fe07c5da7ee..ea6fa7b15cd0b4dd3a11cf79f1284b656fdb279e 100644 (file)
@@ -18,6 +18,7 @@
 from neutron.common import constants as const
 from neutron.common import exceptions as n_exc
 from neutron.extensions import external_net
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import exceptions as p_exc
 from neutron.plugins.vmware.dhcp_meta import nsx
@@ -78,7 +79,7 @@ class DhcpMetadataBuilder(object):
             try:
                 self.plugin.delete_port(context, port['id'])
             except n_exc.PortNotFound:
-                LOG.error(_('Port %s is already gone'), port['id'])
+                LOG.error(_LE('Port %s is already gone'), port['id'])
 
     def dhcp_allocate(self, context, network_id, subnet):
         """Allocate dhcp resources for the subnet."""
index b26960b470dad390875d4d9347c3bf2439f35af8..817a1e163de96acb0ad81e8748de428254c6293a 100644 (file)
@@ -24,6 +24,7 @@ from neutron.common import exceptions as n_exc
 from neutron.db import db_base_plugin_v2
 from neutron.db import l3_db
 from neutron.extensions import external_net
+from neutron.i18n import _LE, _LI
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import exceptions as p_exc
 from neutron.plugins.vmware.dhcp_meta import constants as d_const
@@ -133,12 +134,11 @@ class DhcpAgentNotifyAPI(object):
                 # down below as well as handle_port_metadata_access
                 self.plugin.create_port(context, {'port': dhcp_port})
             except p_exc.PortConfigurationError as e:
-                err_msg = (_("Error while creating subnet %(cidr)s for "
-                             "network %(network)s. Please, contact "
-                             "administrator") %
-                           {"cidr": subnet["cidr"],
-                            "network": network_id})
-                LOG.error(err_msg)
+                LOG.error(_LE("Error while creating subnet %(cidr)s for "
+                              "network %(network)s. Please, contact "
+                              "administrator"),
+                          {"cidr": subnet["cidr"],
+                           "network": network_id})
                 db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                     self.plugin, context, e.port_id)
                 if clean_on_err:
@@ -203,12 +203,13 @@ def check_services_requirements(cluster):
 
 
 def handle_network_dhcp_access(plugin, context, network, action):
-    LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s")
-             {"action": action, "resource": network})
+    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
+             {"action": action, "resource": network})
     if action == 'create_network':
         network_id = network['id']
         if network.get(external_net.EXTERNAL):
-            LOG.info(_("Network %s is external: no LSN to create"), network_id)
+            LOG.info(_LI("Network %s is external: no LSN to create"),
+                     network_id)
             return
         plugin.lsn_manager.lsn_create(context, network_id)
     elif action == 'delete_network':
@@ -216,13 +217,13 @@ def handle_network_dhcp_access(plugin, context, network, action):
         # is just the network id
         network_id = network
         plugin.lsn_manager.lsn_delete_by_network(context, network_id)
-    LOG.info(_("Logical Services Node for network "
-               "%s configured successfully"), network_id)
+    LOG.info(_LI("Logical Services Node for network "
+                 "%s configured successfully"), network_id)
 
 
 def handle_port_dhcp_access(plugin, context, port, action):
-    LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s")
-             {"action": action, "resource": port})
+    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
+             {"action": action, "resource": port})
     if port["device_owner"] == const.DEVICE_OWNER_DHCP:
         network_id = port["network_id"]
         if action == "create_port":
@@ -238,9 +239,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
                 plugin.lsn_manager.lsn_port_dhcp_setup(
                     context, network_id, port['id'], subnet_data, subnet)
             except p_exc.PortConfigurationError:
-                err_msg = (_("Error while configuring DHCP for "
-                             "port %s"), port['id'])
-                LOG.error(err_msg)
+                LOG.error(_LE("Error while configuring DHCP for "
+                              "port %s"), port['id'])
                 raise n_exc.NeutronException()
         elif action == "delete_port":
             plugin.lsn_manager.lsn_port_dispose(context, network_id,
@@ -250,8 +250,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
             # do something only if there are IP's and dhcp is enabled
             subnet_id = port["fixed_ips"][0]['subnet_id']
             if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
-                LOG.info(_("DHCP is disabled for subnet %s: nothing "
-                           "to do"), subnet_id)
+                LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
+                             "to do"), subnet_id)
                 return
             host_data = {
                 "mac_address": port["mac_address"],
@@ -269,7 +269,7 @@ def handle_port_dhcp_access(plugin, context, port, action):
                     if action == 'create_port':
                         db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                             plugin, context, port['id'])
-    LOG.info(_("DHCP for port %s configured successfully"), port['id'])
+    LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
 
 
 def handle_port_metadata_access(plugin, context, port, is_delete=False):
@@ -277,7 +277,8 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
         network_id = port["network_id"]
         network = plugin.get_network(context, network_id)
         if network[external_net.EXTERNAL]:
-            LOG.info(_("Network %s is external: nothing to do"), network_id)
+            LOG.info(_LI("Network %s is external: nothing to do"),
+                     network_id)
             return
         subnet_id = port["fixed_ips"][0]['subnet_id']
         host_data = {
@@ -285,7 +286,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
             "tenant_id": port["tenant_id"],
             "ip_address": port["fixed_ips"][0]['ip_address']
         }
-        LOG.info(_("Configuring metadata entry for port %s"), port)
+        LOG.info(_LI("Configuring metadata entry for port %s"), port)
         if not is_delete:
             handler = plugin.lsn_manager.lsn_port_meta_host_add
         else:
@@ -297,12 +298,13 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
                 if not is_delete:
                     db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                         plugin, context, port['id'])
-        LOG.info(_("Metadata for port %s configured successfully"), port['id'])
+        LOG.info(_LI("Metadata for port %s configured successfully"),
+                 port['id'])
 
 
 def handle_router_metadata_access(plugin, context, router_id, interface=None):
-    LOG.info(_("Handle metadata access via router: %(r)s and "
-               "interface %(i)s") % {'r': router_id, 'i': interface})
+    LOG.info(_LI("Handle metadata access via router: %(r)s and "
+                 "interface %(i)s"), {'r': router_id, 'i': interface})
     if interface:
         try:
             plugin.get_port(context, interface['port_id'])
@@ -318,4 +320,4 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
                 if is_enabled:
                     l3_db.L3_NAT_db_mixin.remove_router_interface(
                         plugin, context, router_id, interface)
-    LOG.info(_("Metadata for router %s handled successfully"), router_id)
+    LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
index ba68cef000115fc1f7756cfa9265ce18a3925478..dec1a17c7d0adaf9d961ca6b1948d4dc9f69f5da 100644 (file)
@@ -25,6 +25,7 @@ from neutron.common import exceptions as ntn_exc
 from neutron.db import db_base_plugin_v2
 from neutron.db import l3_db
 from neutron.db import models_v2
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import config
@@ -64,8 +65,9 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
             # route. This is done via the enable_isolated_metadata
             # option if desired.
             if not subnet.get('gateway_ip'):
-                LOG.info(_('Subnet %s does not have a gateway, the metadata '
-                           'route will not be created'), subnet['id'])
+                LOG.info(_LI('Subnet %s does not have a gateway, the '
+                             'metadata route will not be created'),
+                         subnet['id'])
                 return
             metadata_routes = [r for r in subnet.routes
                                if r['destination'] == METADATA_DHCP_ROUTE]
@@ -88,11 +90,11 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
 
 def handle_router_metadata_access(plugin, context, router_id, interface=None):
     if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT:
-        LOG.debug(_("Metadata access network is disabled"))
+        LOG.debug("Metadata access network is disabled")
         return
     if not cfg.CONF.allow_overlapping_ips:
-        LOG.warn(_("Overlapping IPs must be enabled in order to setup "
-                   "the metadata access network"))
+        LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
+                     "the metadata access network"))
         return
     ctx_elevated = context.elevated()
     device_filter = {'device_id': [router_id],
@@ -111,16 +113,16 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
                 _destroy_metadata_access_network(
                     plugin, ctx_elevated, router_id, ports)
         else:
-            LOG.debug(_("No router interface found for router '%s'. "
-                        "No metadata access network should be "
-                        "created or destroyed"), router_id)
+            LOG.debug("No router interface found for router '%s'. "
+                      "No metadata access network should be "
+                      "created or destroyed", router_id)
     # TODO(salvatore-orlando): A better exception handling in the
     # NSX plugin would allow us to improve error handling here
     except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
             api_exc.NsxApiException):
         # Any exception here should be regarded as non-fatal
-        LOG.exception(_("An error occurred while operating on the "
-                        "metadata access network for router:'%s'"),
+        LOG.exception(_LE("An error occurred while operating on the "
+                          "metadata access network for router:'%s'"),
                       router_id)
 
 
index addc7187850bfd931f07bfab876bf6870ec35e98..78820235b829ebb2ab354a253c6d0b373dfc3d6d 100644 (file)
@@ -25,6 +25,7 @@ from neutron.common import constants as const
 from neutron.common import rpc as n_rpc
 from neutron.common import topics
 from neutron.db import agents_db
+from neutron.i18n import _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import config
 from neutron.plugins.vmware.common import exceptions as nsx_exc
@@ -106,7 +107,7 @@ class DhcpMetadataAccess(object):
             # This becomes ineffective, as all new networks creations
             # are handled by Logical Services Nodes in NSX
             cfg.CONF.set_override('network_auto_schedule', False)
-            LOG.warn(_('network_auto_schedule has been disabled'))
+            LOG.warn(_LW('network_auto_schedule has been disabled'))
             notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
                                                    lsn_manager)
             self.supported_extension_aliases.append(lsn.EXT_ALIAS)
index 1c564385de09880a92ec2f1179d720194531db7e..a4ce61cddb6c96ab363e6556c1d5be07d304c5a8 100644 (file)
@@ -15,6 +15,7 @@
 
 from oslo.config import cfg
 
+from neutron.i18n import _LI
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import exceptions
 
@@ -58,8 +59,8 @@ class NSXCluster(object):
             raise exceptions.InvalidClusterConfiguration(
                 invalid_attrs=self._required_attributes)
         if self._important_attributes:
-            LOG.info(_("The following cluster attributes were "
-                       "not specified: %s'"), self._important_attributes)
+            LOG.info(_LI("The following cluster attributes were "
+                         "not specified: %s'"), self._important_attributes)
         # The API client will be explicitly created by users of this class
         self.api_client = None
 
index 95a29ae5205e80702ac8172ddfc016d826eaba1a..1b26dfc3679dcf4fc87a3339752424b882b2d2de 100644 (file)
@@ -18,6 +18,7 @@ from oslo.serialization import jsonutils
 from oslo.utils import excutils
 
 from neutron.common import exceptions as exception
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
@@ -253,8 +254,8 @@ def update_explicit_routes_lrouter(cluster, router_id, routes):
                                                      router_id, route)
                 added_routes.append(uuid)
     except api_exc.NsxApiException:
-        LOG.exception(_('Cannot update NSX routes %(routes)s for '
-                        'router %(router_id)s'),
+        LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
+                          'router %(router_id)s'),
                       {'routes': routes, 'router_id': router_id})
         # Roll back to keep NSX in consistent state
         with excutils.save_and_reraise_exception():
@@ -347,8 +348,8 @@ def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
     result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
                                cluster=cluster)
 
-    LOG.debug(_("Created logical port %(lport_uuid)s on "
-                "logical router %(lrouter_uuid)s"),
+    LOG.debug("Created logical port %(lport_uuid)s on "
+              "logical router %(lrouter_uuid)s",
               {'lport_uuid': result['uuid'],
                'lrouter_uuid': lrouter_uuid})
     return result
@@ -375,8 +376,8 @@ def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
     result = nsxlib.do_request(HTTP_PUT, path,
                                jsonutils.dumps(lport_obj),
                                cluster=cluster)
-    LOG.debug(_("Updated logical port %(lport_uuid)s on "
-                "logical router %(lrouter_uuid)s"),
+    LOG.debug("Updated logical port %(lport_uuid)s on "
+              "logical router %(lrouter_uuid)s",
               {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
     return result
 
@@ -386,8 +387,8 @@ def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
     path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
                                   lrouter_uuid)
     nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
-    LOG.debug(_("Delete logical router port %(lport_uuid)s on "
-                "logical router %(lrouter_uuid)s"),
+    LOG.debug("Delete logical router port %(lport_uuid)s on "
+              "logical router %(lrouter_uuid)s",
               {'lport_uuid': lport_uuid,
                'lrouter_uuid': lrouter_uuid})
 
@@ -456,7 +457,7 @@ def _create_nat_match_obj(**kwargs):
 
 
 def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
-    LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj)
+    LOG.debug("Creating NAT rule: %s", nat_rule_obj)
     uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
                                  parent_resource_id=router_id)
     return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
@@ -471,13 +472,13 @@ def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
 
 
 def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
-    LOG.info(_("No SNAT rules cannot be applied as they are not available in "
-               "this version of the NSX platform"))
+    LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
+                 "in this version of the NSX platform"))
 
 
 def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
-    LOG.info(_("No DNAT rules cannot be applied as they are not available in "
-               "this version of the NSX platform"))
+    LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
+                 "in this version of the NSX platform"))
 
 
 def create_lrouter_snat_rule_v2(cluster, router_id,
@@ -577,9 +578,9 @@ def delete_nat_rules_by_match(cluster, router_id, rule_type,
                                           min_rules=min_num_expected,
                                           max_rules=max_num_expected)
         else:
-            LOG.warn(_("Found %(actual_rule_num)d matching NAT rules, which "
-                       "is not in the expected range (%(min_exp_rule_num)d,"
-                       "%(max_exp_rule_num)d)"),
+            LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
+                         "is not in the expected range (%(min_exp_rule_num)d,"
+                         "%(max_exp_rule_num)d)"),
                      {'actual_rule_num': num_rules_to_delete,
                       'min_exp_rule_num': min_num_expected,
                       'max_exp_rule_num': max_num_expected})
index 91e4f5631c81dc32c34f6c6c8a66758ca8a908cf..5c0dd5c92db441715fbe6a47111f5ae973d7603f 100644 (file)
@@ -18,6 +18,7 @@ from oslo.utils import excutils
 
 from neutron.common import constants
 from neutron.common import exceptions
+from neutron.i18n import _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.common import utils
 from neutron.plugins.vmware import nsxlib
@@ -92,7 +93,7 @@ def create_security_profile(cluster, tenant_id, neutron_id, security_profile):
                                                 {'ethertype': 'IPv6'}]}
 
         update_security_group_rules(cluster, rsp['uuid'], rules)
-    LOG.debug(_("Created Security Profile: %s"), rsp)
+    LOG.debug("Created Security Profile: %s", rsp)
     return rsp
 
 
@@ -118,7 +119,7 @@ def update_security_group_rules(cluster, spid, rules):
         LOG.error(nsxlib.format_exception("Unknown", e, locals()))
         #FIXME(salvatore-orlando): This should not raise NeutronException
         raise exceptions.NeutronException()
-    LOG.debug(_("Updated Security Profile: %s"), rsp)
+    LOG.debug("Updated Security Profile: %s", rsp)
     return rsp
 
 
@@ -138,5 +139,5 @@ def delete_security_profile(cluster, spid):
     except exceptions.NotFound:
         with excutils.save_and_reraise_exception():
             # This is not necessarily an error condition
-            LOG.warn(_("Unable to find security profile %s on NSX backend"),
+            LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
                      spid)
index adcb6f802cf631d3950412672034b556d93b42e5..2a0e6924567ec82c5fa4b4c621cd98b315e333bd 100644 (file)
@@ -19,6 +19,7 @@ from oslo.serialization import jsonutils
 
 from neutron.common import constants
 from neutron.common import exceptions as exception
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
@@ -127,7 +128,7 @@ def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
     uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
     lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
                                 cluster=cluster)
-    LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
+    LOG.debug("Created logical switch: %s", lswitch['uuid'])
     return lswitch
 
 
@@ -147,7 +148,7 @@ def update_lswitch(cluster, lswitch_id, display_name,
         return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
                                  cluster=cluster)
     except exception.NotFound as e:
-        LOG.error(_("Network not found, Error: %s"), str(e))
+        LOG.error(_LE("Network not found, Error: %s"), str(e))
         raise exception.NetworkNotFound(net_id=lswitch_id)
 
 
@@ -162,7 +163,7 @@ def delete_networks(cluster, net_id, lswitch_ids):
         try:
             nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
         except exception.NotFound as e:
-            LOG.error(_("Network not found, Error: %s"), str(e))
+            LOG.error(_LE("Network not found, Error: %s"), str(e))
             raise exception.NetworkNotFound(net_id=ls_id)
 
 
@@ -185,7 +186,7 @@ def delete_port(cluster, switch, port):
     try:
         nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
     except exception.NotFound:
-        LOG.exception(_("Port or Network not found"))
+        LOG.exception(_LE("Port or Network not found"))
         raise exception.PortNotFoundOnNetwork(
             net_id=switch, port_id=port)
     except api_exc.NsxApiException:
@@ -244,7 +245,7 @@ def get_ports(cluster, networks=None, devices=None, tenants=None):
             if not ports:
                 ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
         except exception.NotFound:
-            LOG.warn(_("Lswitch %s not found in NSX"), lswitch)
+            LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
             ports = None
 
         if ports:
@@ -270,24 +271,24 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
                                  fields='uuid',
                                  filters={'tag': neutron_port_id,
                                           'tag_scope': 'q_port_id'})
-    LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
-                "on: '%(lswitch_uuid)s'"),
+    LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
+              "on: '%(lswitch_uuid)s'",
               {'neutron_port_id': neutron_port_id,
                'lswitch_uuid': lswitch_uuid})
     res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
     num_results = len(res["results"])
     if num_results >= 1:
         if num_results > 1:
-            LOG.warn(_("Found '%(num_ports)d' ports with "
-                       "q_port_id tag: '%(neutron_port_id)s'. "
-                       "Only 1 was expected."),
+            LOG.warn(_LW("Found '%(num_ports)d' ports with "
+                         "q_port_id tag: '%(neutron_port_id)s'. "
+                         "Only 1 was expected."),
                      {'num_ports': num_results,
                       'neutron_port_id': neutron_port_id})
         return res["results"][0]
 
 
 def get_port(cluster, network, port, relations=None):
-    LOG.info(_("get_port() %(network)s %(port)s"),
+    LOG.info(_LI("get_port() %(network)s %(port)s"),
              {'network': network, 'port': port})
     uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
     if relations:
@@ -295,7 +296,7 @@ def get_port(cluster, network, port, relations=None):
     try:
         return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
     except exception.NotFound as e:
-        LOG.error(_("Port or Network not found, Error: %s"), str(e))
+        LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
         raise exception.PortNotFoundOnNetwork(
             port_id=port, net_id=network)
 
@@ -321,12 +322,12 @@ def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
     try:
         result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
                                    cluster=cluster)
-        LOG.debug(_("Updated logical port %(result)s "
-                    "on logical switch %(uuid)s"),
+        LOG.debug("Updated logical port %(result)s "
+                  "on logical switch %(uuid)s",
                   {'result': result['uuid'], 'uuid': lswitch_uuid})
         return result
     except exception.NotFound as e:
-        LOG.error(_("Port or Network not found, Error: %s"), str(e))
+        LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
         raise exception.PortNotFoundOnNetwork(
             port_id=lport_uuid, net_id=lswitch_uuid)
 
@@ -356,7 +357,7 @@ def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
     result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
                                cluster=cluster)
 
-    LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"),
+    LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
               {'result': result['uuid'], 'uuid': lswitch_uuid})
     return result
 
@@ -368,7 +369,7 @@ def get_port_status(cluster, lswitch_id, port_id):
                               "/ws.v1/lswitch/%s/lport/%s/status" %
                               (lswitch_id, port_id), cluster=cluster)
     except exception.NotFound as e:
-        LOG.error(_("Port not found, Error: %s"), str(e))
+        LOG.error(_LE("Port not found, Error: %s"), str(e))
         raise exception.PortNotFoundOnNetwork(
             port_id=port_id, net_id=lswitch_id)
     if r['link_status_up'] is True:
index c4528309ca5b35d34fc5348cc54bb023ff149024..2fe3f6a00a75ff2afcf1678062935da4b44f3535 100644 (file)
@@ -51,7 +51,7 @@ from neutron.extensions import portbindings as pbin
 from neutron.extensions import portsecurity as psec
 from neutron.extensions import providernet as pnet
 from neutron.extensions import securitygroup as ext_sg
-from neutron.i18n import _LE
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import lockutils
 from neutron.openstack.common import log as logging
 from neutron.plugins.common import constants as plugin_const
@@ -209,7 +209,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             self._is_default_net_gw_in_sync = True
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Unable to process default l2 gw service:%s"),
+                LOG.exception(_LE("Unable to process default l2 gw service: "
+                                  "%s"),
                               def_l2_gw_uuid)
 
     def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
@@ -246,7 +247,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 port_data.get('mac_address'))
             LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
         except api_exc.NsxApiException:
-            LOG.exception(_("Unable to create port on NSX logical router %s"),
+            LOG.exception(_LE("Unable to create port on NSX logical router "
+                              "%s"),
                           nsx_router_id)
             raise nsx_exc.NsxPluginException(
                 err_msg=_("Unable to create logical router port for neutron "
@@ -334,9 +336,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             # Must remove NSX logical port
             routerlib.delete_router_lport(cluster, nsx_router_id,
                                           nsx_router_port_id)
-            LOG.exception(_("Unable to plug attachment in NSX logical "
-                            "router port %(r_port_id)s, associated with "
-                            "Neutron %(q_port_id)s"),
+            LOG.exception(_LE("Unable to plug attachment in NSX logical "
+                              "router port %(r_port_id)s, associated with "
+                              "Neutron %(q_port_id)s"),
                           {'r_port_id': nsx_router_port_id,
                            'q_port_id': port_data.get('id')})
             raise nsx_exc.NsxPluginException(
@@ -426,9 +428,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             # rollback the neutron-nsx port mapping
             nsx_db.delete_neutron_nsx_port_mapping(context.session,
                                                    port_id)
-            msg = (_("An exception occurred while creating the "
-                     "neutron port %s on the NSX plaform") % port_id)
-            LOG.exception(msg)
+            LOG.exception(_LE("An exception occurred while creating the "
+                              "neutron port %s on the NSX plaform"), port_id)
 
     def _nsx_create_port(self, context, port_data):
         """Driver for creating a logical switch port on NSX platform."""
@@ -438,8 +439,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         # However, in order to not break unit tests, we need to still create
         # the DB object and return success
         if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_("NSX plugin does not support regular VIF ports on "
-                       "external networks. Port %s will be down."),
+            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+                         "external networks. Port %s will be down."),
                      port_data['network_id'])
             # No need to actually update the DB state - the default is down
             return port_data
@@ -470,12 +471,12 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         except db_exc.DBError as e:
             if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
                 isinstance(e.inner_exception, sql_exc.IntegrityError)):
-                msg = (_("Concurrent network deletion detected; Back-end Port "
-                         "%(nsx_id)s creation to be rolled back for Neutron "
-                         "port: %(neutron_id)s")
-                       % {'nsx_id': lport['uuid'],
-                          'neutron_id': port_data['id']})
-                LOG.warning(msg)
+                LOG.warning(
+                    _LW("Concurrent network deletion detected; Back-end "
+                        "Port %(nsx_id)s creation to be rolled back for "
+                        "Neutron port: %(neutron_id)s"),
+                    {'nsx_id': lport['uuid'],
+                     'neutron_id': port_data['id']})
                 if selected_lswitch and lport:
                     try:
                         switchlib.delete_port(self.cluster,
@@ -490,8 +491,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         # does not make sense. However we cannot raise as this would break
         # unit tests.
         if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_("NSX plugin does not support regular VIF ports on "
-                       "external networks. Port %s will be down."),
+            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+                         "external networks. Port %s will be down."),
                      port_data['network_id'])
             return
         nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
@@ -509,7 +510,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                       {'port_id': port_data['id'],
                        'net_id': port_data['network_id']})
         except n_exc.NotFound:
-            LOG.warning(_("Port %s not found in NSX"), port_data['id'])
+            LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
 
     def _nsx_delete_router_port(self, context, port_data):
         # Delete logical router port
@@ -518,11 +519,12 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
             context.session, self.cluster, port_data['id'])
         if not nsx_port_id:
-            LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. "
-                       "Terminating delete operation. A dangling router port "
-                       "might have been left on router %(router_id)s"),
-                     {'port_id': port_data['id'],
-                      'router_id': nsx_router_id})
+            LOG.warn(
+                _LW("Neutron port %(port_id)s not found on NSX backend. "
+                    "Terminating delete operation. A dangling router port "
+                    "might have been left on router %(router_id)s"),
+                {'port_id': port_data['id'],
+                 'router_id': nsx_router_id})
             return
         try:
             routerlib.delete_peer_router_lport(self.cluster,
@@ -533,8 +535,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             # Do not raise because the issue might as well be that the
             # router has already been deleted, so there would be nothing
             # to do here
-            LOG.exception(_("Ignoring exception as this means the peer "
-                            "for port '%s' has already been deleted."),
+            LOG.exception(_LE("Ignoring exception as this means the peer "
+                              "for port '%s' has already been deleted."),
                           nsx_port_id)
 
         # Delete logical switch port
@@ -688,8 +690,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         # However, in order to not break unit tests, we need to still create
         # the DB object and return success
         if self._network_is_external(context, port_data['network_id']):
-            LOG.info(_("NSX plugin does not support regular VIF ports on "
-                       "external networks. Port %s will be down."),
+            LOG.info(_LI("NSX plugin does not support regular VIF ports on "
+                         "external networks. Port %s will be down."),
                      port_data['network_id'])
             # No need to actually update the DB state - the default is down
             return port_data
@@ -887,8 +889,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 context.session, network.id, selected_lswitch['uuid'])
             return selected_lswitch
         else:
-            LOG.error(_("Maximum number of logical ports reached for "
-                        "logical network %s"), network.id)
+            LOG.error(_LE("Maximum number of logical ports reached for "
+                          "logical network %s"), network.id)
             raise nsx_exc.NoMorePortsException(network=network.id)
 
     def _convert_to_nsx_transport_zones(self, cluster, network=None,
@@ -933,9 +935,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 net_data[key] = None
         # FIXME(arosen) implement admin_state_up = False in NSX
         if net_data['admin_state_up'] is False:
-            LOG.warning(_("Network with admin_state_up=False are not yet "
-                          "supported by this plugin. Ignoring setting for "
-                          "network %s"), net_data.get('name', '<unknown>'))
+            LOG.warning(_LW("Network with admin_state_up=False are not yet "
+                            "supported by this plugin. Ignoring setting for "
+                            "network %s"), net_data.get('name', '<unknown>'))
         transport_zone_config = self._convert_to_nsx_transport_zones(
             self.cluster, net_data)
         external = net_data.get(ext_net_extn.EXTERNAL)
@@ -1011,8 +1013,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             try:
                 switchlib.delete_networks(self.cluster, id, lswitch_ids)
             except n_exc.NotFound:
-                LOG.warning(_("The following logical switches were not found "
-                              "on the NSX backend:%s"), lswitch_ids)
+                LOG.warning(_LW("The following logical switches were not "
+                                "found on the NSX backend:%s"), lswitch_ids)
         self.handle_network_dhcp_access(context, id, action='delete_network')
         LOG.debug("Delete network complete for network: %s", id)
 
@@ -1068,17 +1070,17 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
                 context.session, self.cluster, id)
             if not nsx_switch_ids or len(nsx_switch_ids) < 1:
-                LOG.warn(_("Unable to find NSX mappings for neutron "
-                           "network:%s"), id)
+                LOG.warn(_LW("Unable to find NSX mappings for neutron "
+                             "network:%s"), id)
             try:
                 switchlib.update_lswitch(self.cluster,
                                          nsx_switch_ids[0],
                                          network['network']['name'])
             except api_exc.NsxApiException as e:
-                LOG.warn(_("Logical switch update on NSX backend failed. "
-                           "Neutron network id:%(net_id)s; "
-                           "NSX lswitch id:%(lswitch_id)s;"
-                           "Error:%(error)s"),
+                LOG.warn(_LW("Logical switch update on NSX backend failed. "
+                             "Neutron network id:%(net_id)s; "
+                             "NSX lswitch id:%(lswitch_id)s;"
+                             "Error:%(error)s"),
                          {'net_id': id, 'lswitch_id': nsx_switch_ids[0],
                           'error': e})
 
@@ -1155,8 +1157,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             LOG.debug("port created on NSX backend for tenant "
                       "%(tenant_id)s: (%(id)s)", port_data)
         except n_exc.NotFound:
-            LOG.warning(_("Logical switch for network %s was not "
-                          "found in NSX."), port_data['network_id'])
+            LOG.warning(_LW("Logical switch for network %s was not "
+                            "found in NSX."), port_data['network_id'])
             # Put port in error on neutron DB
             with context.session.begin(subtransactions=True):
                 port = self._get_port(context, neutron_port_id)
@@ -1166,8 +1168,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         except Exception:
             # Port must be removed from neutron DB
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Unable to create port or set port "
-                            "attachment in NSX."))
+                LOG.error(_LE("Unable to create port or set port "
+                              "attachment in NSX."))
                 with context.session.begin(subtransactions=True):
                     self._delete_port(context, neutron_port_id)
 
@@ -1289,7 +1291,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 # FIXME(arosen) improve exception handling.
                 except Exception:
                     ret_port['status'] = constants.PORT_STATUS_ERROR
-                    LOG.exception(_("Unable to update port id: %s."),
+                    LOG.exception(_LE("Unable to update port id: %s."),
                                   nsx_port_id)
 
             # If nsx_port_id is not in database or in nsx put in error state.
@@ -1389,9 +1391,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 "L3GatewayAttachment",
                 self.cluster.default_l3_gw_service_uuid)
         except nsx_exc.NsxPluginException:
-            LOG.exception(_("Unable to create L3GW port on logical router "
-                            "%(router_uuid)s. Verify Default Layer-3 Gateway "
-                            "service %(def_l3_gw_svc)s id is correct"),
+            LOG.exception(_LE("Unable to create L3GW port on logical router "
+                              "%(router_uuid)s. Verify Default Layer-3 "
+                              "Gateway service %(def_l3_gw_svc)s id is "
+                              "correct"),
                           {'router_uuid': lrouter['uuid'],
                            'def_l3_gw_svc':
                            self.cluster.default_l3_gw_service_uuid})
@@ -1477,12 +1480,12 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                     # As setting gateway failed, the router must be deleted
                     # in order to ensure atomicity
                     router_id = router_db['id']
-                    LOG.warn(_("Failed to set gateway info for router being "
-                               "created:%s - removing router"), router_id)
+                    LOG.warn(_LW("Failed to set gateway info for router being "
+                                 "created:%s - removing router"), router_id)
                     self.delete_router(context, router_id)
-                    LOG.info(_("Create router failed while setting external "
-                               "gateway. Router:%s has been removed from "
-                               "DB and backend"),
+                    LOG.info(_LI("Create router failed while setting external "
+                                 "gateway. Router:%s has been removed from "
+                                 "DB and backend"),
                              router_id)
         return self._make_router_dict(router_db)
 
@@ -1601,8 +1604,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             self._delete_lrouter(context, router_id, nsx_router_id)
         except n_exc.NotFound:
             # This is not a fatal error, but needs to be logged
-            LOG.warning(_("Logical router '%s' not found "
-                        "on NSX Platform"), router_id)
+            LOG.warning(_LW("Logical router '%s' not found "
+                            "on NSX Platform"), router_id)
         except api_exc.NsxApiException:
             raise nsx_exc.NsxPluginException(
                 err_msg=(_("Unable to delete logical router '%s' "
@@ -1615,10 +1618,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 context.session, router_id)
         except db_exc.DBError as d_exc:
             # Do not make this error fatal
-            LOG.warn(_("Unable to remove NSX mapping for Neutron router "
-                       "%(router_id)s because of the following exception:"
-                       "%(d_exc)s"), {'router_id': router_id,
-                                      'd_exc': str(d_exc)})
+            LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
+                         "%(router_id)s because of the following exception:"
+                         "%(d_exc)s"), {'router_id': router_id,
+                                        'd_exc': str(d_exc)})
         # Perform the actual delete on the Neutron DB
         super(NsxPluginV2, self).delete_router(context, router_id)
 
@@ -1753,8 +1756,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 raise_on_len_mismatch=False,
                 destination_ip_addresses=subnet['cidr'])
         except n_exc.NotFound:
-            LOG.error(_("Logical router resource %s not found "
-                        "on NSX platform") % router_id)
+            LOG.error(_LE("Logical router resource %s not found "
+                          "on NSX platform"), router_id)
         except api_exc.NsxApiException:
             raise nsx_exc.NsxPluginException(
                 err_msg=(_("Unable to update logical router"
@@ -1789,13 +1792,13 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
 
         except api_exc.NsxApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("An error occurred while removing NAT rules "
-                                "on the NSX platform for floating ip:%s"),
+                LOG.exception(_LE("An error occurred while removing NAT rules "
+                                  "on the NSX platform for floating ip:%s"),
                               floating_ip_address)
         except nsx_exc.NatRuleMismatch:
             # Do not surface to the user
-            LOG.warning(_("An incorrect number of matching NAT rules "
-                          "was found on the NSX platform"))
+            LOG.warning(_LW("An incorrect number of matching NAT rules "
+                            "was found on the NSX platform"))
 
     def _remove_floatingip_address(self, context, fip_db):
         # Remove floating IP address from logical router port
@@ -1936,10 +1939,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                         self.cluster, nsx_router_id, nsx_gw_port_id,
                         ips_to_add=nsx_floating_ips, ips_to_remove=[])
                 except api_exc.NsxApiException:
-                    LOG.exception(_("An error occurred while creating NAT "
-                                    "rules on the NSX platform for floating "
-                                    "ip:%(floating_ip)s mapped to "
-                                    "internal ip:%(internal_ip)s"),
+                    LOG.exception(_LE("An error occurred while creating NAT "
+                                      "rules on the NSX platform for floating "
+                                      "ip:%(floating_ip)s mapped to "
+                                      "internal ip:%(internal_ip)s"),
                                   {'floating_ip': floating_ip,
                                    'internal_ip': internal_ip})
                     msg = _("Failed to update NAT rules for floatingip update")
@@ -1984,7 +1987,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             LOG.debug("The port '%s' is not associated with floating IPs",
                       port_id)
         except n_exc.NotFound:
-            LOG.warning(_("Nat rules not found in nsx for port: %s"), id)
+            LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
 
         # NOTE(ihrachys): L3 agent notifications don't make sense for
         # NSX VMWare plugin since there is no L3 agent in such setup, so
@@ -2048,8 +2051,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             except api_exc.ResourceNotFound:
                 # Do not cause a 500 to be returned to the user if
                 # the corresponding NSX resource does not exist
-                LOG.exception(_("Unable to remove gateway service from "
-                                "NSX plaform - the resource was not found"))
+                LOG.exception(_LE("Unable to remove gateway service from "
+                                  "NSX plaform - the resource was not found"))
 
     def get_network_gateway(self, context, id, fields=None):
         # Ensure the default gateway in the config file is in sync with the db
@@ -2077,8 +2080,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             except api_exc.NsxApiException:
                 # Consider backend failures as non-fatal, but still warn
                 # because this might indicate something dodgy is going on
-                LOG.warn(_("Unable to update name on NSX backend "
-                           "for network gateway: %s"), id)
+                LOG.warn(_LW("Unable to update name on NSX backend "
+                             "for network gateway: %s"), id)
         return super(NsxPluginV2, self).update_network_gateway(
             context, id, network_gateway)
 
@@ -2284,16 +2287,16 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
         try:
             l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
         except n_exc.NotFound:
-            LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on "
-                       "NSX backend (NSX id:%(nsx_id)s) because the NSX "
-                       "resource was not found"),
+            LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
+                         "NSX backend (NSX id:%(nsx_id)s) because the NSX "
+                         "resource was not found"),
                      {'neutron_id': device_id, 'nsx_id': nsx_device_id})
         except api_exc.NsxApiException:
             with excutils.save_and_reraise_exception():
                 # In this case a 500 should be returned
-                LOG.exception(_("Removal of gateway device: %(neutron_id)s "
-                                "failed on NSX backend (NSX id:%(nsx_id)s). "
-                                "Neutron and NSX states have diverged."),
+                LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
+                                  "failed on NSX backend (NSX id:%(nsx_id)s). "
+                                  "Neutron and NSX states have diverged."),
                               {'neutron_id': device_id,
                                'nsx_id': nsx_device_id})
 
@@ -2339,9 +2342,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
                 # Reverting the DB change is not really worthwhile
                 # for a mismatch between names. It's the rules that
                 # we care about.
-                LOG.error(_('Error while updating security profile '
-                            '%(uuid)s with name %(name)s: %(error)s.')
-                          {'uuid': secgroup_id, 'name': name, 'error': e})
+                LOG.error(_LE('Error while updating security profile '
+                              '%(uuid)s with name %(name)s: %(error)s.'),
+                          {'uuid': secgroup_id, 'name': name, 'error': e})
         return secgroup
 
     def delete_security_group(self, context, security_group_id):
@@ -2371,18 +2374,19 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
             except n_exc.NotFound:
                 # The security profile was not found on the backend
                 # do not fail in this case.
-                LOG.warning(_("The NSX security profile %(sec_profile_id)s, "
-                              "associated with the Neutron security group "
-                              "%(sec_group_id)s was not found on the backend"),
+                LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
+                                "associated with the Neutron security group "
+                                "%(sec_group_id)s was not found on the "
+                                "backend"),
                             {'sec_profile_id': nsx_sec_profile_id,
                              'sec_group_id': security_group_id})
             except api_exc.NsxApiException:
                 # Raise and fail the operation, as there is a problem which
                 # prevented the sec group from being removed from the backend
-                LOG.exception(_("An exception occurred while removing the "
-                                "NSX security profile %(sec_profile_id)s, "
-                                "associated with Netron security group "
-                                "%(sec_group_id)s"),
+                LOG.exception(_LE("An exception occurred while removing the "
+                                  "NSX security profile %(sec_profile_id)s, "
+                                  "associated with Netron security group "
+                                  "%(sec_group_id)s"),
                               {'sec_profile_id': nsx_sec_profile_id,
                                'sec_group_id': security_group_id})
                 raise nsx_exc.NsxPluginException(
index 32299b333767bb3144b02d0f26bf4f7f4656bc14..aede05ef6dde38534cd40403c0756decbb778bf9 100644 (file)
@@ -29,6 +29,7 @@ from neutron.extensions import firewall as fw_ext
 from neutron.extensions import l3
 from neutron.extensions import routedserviceinsertion as rsi
 from neutron.extensions import vpnaas as vpn_ext
+from neutron.i18n import _LE, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.common import constants as service_constants
 from neutron.plugins.vmware.api_client import exception as api_exc
@@ -538,7 +539,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             try:
                 self.vcns_driver.delete_lswitch(lswitch_id)
             except exceptions.ResourceNotFound:
-                LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id)
+                LOG.warning(_LW("Did not found lswitch %s in NSX"), lswitch_id)
 
             # delete edge
             jobdata = {
@@ -884,15 +885,14 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
         except exceptions.VcnsApiException as e:
             self._firewall_set_status(
                 context, fw['id'], service_constants.ERROR)
-            msg = (_("Failed to create firewall on vShield Edge "
-                     "bound on router %s") % router_id)
-            LOG.exception(msg)
+            LOG.exception(_LE("Failed to create firewall on vShield Edge "
+                              "bound on router %s"), router_id)
             raise e
 
         except exceptions.VcnsBadRequest as e:
             self._firewall_set_status(
                 context, fw['id'], service_constants.ERROR)
-            LOG.exception(_("Bad Firewall request Input"))
+            LOG.exception(_LE("Bad Firewall request Input"))
             raise e
 
     def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
@@ -1113,8 +1113,8 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
                 context, loadbalancer_db.Vip, resource_id=vip_id)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to find the edge with "
-                                "vip_id: %s"), vip_id)
+                LOG.exception(_LE("Failed to find the edge with "
+                                  "vip_id: %s"), vip_id)
         return self._get_edge_id_by_vcns_edge_binding(
             context, service_router_binding.router_id)
 
@@ -1184,8 +1184,8 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
                     context, edge_id, hm)
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Failed to create healthmonitor "
-                                    "associated with pool id: %s!") % pool_id)
+                    LOG.exception(_LE("Failed to create healthmonitor "
+                                      "associated with pool id: %s!"), pool_id)
                     for monitor_ide in pool.get('health_monitors'):
                         if monitor_ide == monitor_id:
                             break
@@ -1201,7 +1201,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self.vcns_driver.create_pool(context, edge_id, pool, members)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create pool on vshield edge"))
+                LOG.exception(_LE("Failed to create pool on vshield edge"))
                 self.vcns_driver.delete_pool(
                     context, pool_id, edge_id)
                 for monitor_id in pool.get('health_monitors'):
@@ -1261,7 +1261,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self.vcns_driver.create_vip(context, edge_id, v)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create vip!"))
+                LOG.exception(_LE("Failed to create vip!"))
                 self._delete_resource_router_id_binding(
                     context, v['id'], loadbalancer_db.Vip)
                 super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
@@ -1301,7 +1301,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self.vcns_driver.update_vip(context, v, session_persistence_update)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update vip with id: %s!"), id)
+                LOG.exception(_LE("Failed to update vip with id: %s!"), id)
                 self._resource_set_status(context, loadbalancer_db.Vip,
                                           id, service_constants.ERROR, v)
 
@@ -1318,7 +1318,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self.vcns_driver.delete_vip(context, id)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete vip with id: %s!"), id)
+                LOG.exception(_LE("Failed to delete vip with id: %s!"), id)
                 self._resource_set_status(context, loadbalancer_db.Vip,
                                           id, service_constants.ERROR)
         edge_id = self._get_edge_id_by_vip_id(context, id)
@@ -1374,7 +1374,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self._vcns_update_pool(context, p)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update pool with id: %s!"), id)
+                LOG.exception(_LE("Failed to update pool with id: %s!"), id)
                 self._resource_set_status(context, loadbalancer_db.Pool,
                                           p['id'], service_constants.ERROR, p)
         self._resource_set_status(context, loadbalancer_db.Pool,
@@ -1396,7 +1396,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self._vcns_update_pool(context, pool)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update pool with the member"))
+                LOG.exception(_LE("Failed to update pool with the member"))
                 super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
 
         self._resource_set_status(context, loadbalancer_db.Pool,
@@ -1422,8 +1422,8 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
                     self._vcns_update_pool(context, old_pool)
                 except Exception:
                     with excutils.save_and_reraise_exception():
-                        LOG.exception(_("Failed to update old pool "
-                                        "with the member"))
+                        LOG.exception(_LE("Failed to update old pool "
+                                          "with the member"))
                         super(NsxAdvancedPlugin, self).delete_member(
                             context, m['id'])
                 self._resource_set_status(
@@ -1443,7 +1443,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self._vcns_update_pool(context, pool)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update pool with the member"))
+                LOG.exception(_LE("Failed to update pool with the member"))
                 super(NsxAdvancedPlugin, self).delete_member(
                     context, m['id'])
 
@@ -1466,7 +1466,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self._vcns_update_pool(context, pool)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update pool with the member"))
+                LOG.exception(_LE("Failed to update pool with the member"))
         self._resource_set_status(context, loadbalancer_db.Pool,
                                   pool_id, service_constants.ACTIVE)
 
@@ -1486,8 +1486,8 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
                         context, edge_id, old_hm, hm)
                 except Exception:
                     with excutils.save_and_reraise_exception():
-                        LOG.exception(_("Failed to update monitor "
-                                        "with id: %s!"), id)
+                        LOG.exception(_LE("Failed to update monitor "
+                                          "with id: %s!"), id)
         return hm
 
     def create_pool_health_monitor(self, context,
@@ -1525,7 +1525,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             self._vcns_update_pool(context, pool)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to associate monitor with pool!"))
+                LOG.exception(_LE("Failed to associate monitor with pool!"))
                 self._resource_set_status(
                     context, loadbalancer_db.Pool,
                     pool_id, service_constants.ERROR)
@@ -1556,7 +1556,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
             except Exception:
                 with excutils.save_and_reraise_exception():
                     LOG.exception(
-                        _("Failed to update pool with pool_monitor!"))
+                        _LE("Failed to update pool with pool_monitor!"))
                     self._resource_set_status(
                         context, loadbalancer_db.Pool,
                         pool_id, service_constants.ERROR)
@@ -1598,14 +1598,14 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
                 edge_id, sites, enabled=vpn_service.admin_state_up)
         except exceptions.VcnsBadRequest:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Bad or unsupported Input request!"))
+                LOG.exception(_LE("Bad or unsupported Input request!"))
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                msg = (_("Failed to update ipsec VPN configuration "
-                         "with vpnservice: %(vpnservice_id)s on vShield Edge: "
-                         "%(edge_id)s") % {'vpnservice_id': vpnservice_id,
-                                           'edge_id': edge_id})
-                LOG.exception(msg)
+                LOG.exception(_LE("Failed to update ipsec VPN configuration "
+                                  "with vpnservice: %(vpnservice_id)s on "
+                                  "vShield Edge: %(edge_id)s"),
+                              {'vpnservice_id': vpnservice_id,
+                               'edge_id': edge_id})
 
     def create_vpnservice(self, context, vpnservice):
         LOG.debug("create_vpnservice() called")
@@ -1753,7 +1753,7 @@ class VcnsCallbacks(object):
                 context, neutron_router_id)
         except l3.RouterNotFound:
             # Router might have been deleted before deploy finished
-            LOG.exception(_("Router %s not found"), lrouter['uuid'])
+            LOG.exception(_LE("Router %s not found"), lrouter['uuid'])
 
         if task.status == tasks_const.TaskStatus.COMPLETED:
             LOG.debug("Successfully deployed %(edge_id)s for "
index c9f59ec6162bfea6b53330b7e97c1d042e391029..26c62620400f1ded97ec2c0f2b88dc0787f8f832 100644 (file)
@@ -15,6 +15,7 @@
 from oslo.serialization import jsonutils
 from oslo.utils import excutils
 
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import utils
 from neutron.plugins.vmware.vshield.common import constants as vcns_const
@@ -128,7 +129,7 @@ class EdgeApplianceDriver(object):
             status_level = self._edge_status_to_level(
                 response['edgeStatus'])
         except exceptions.VcnsApiException as e:
-            LOG.exception(_("VCNS: Failed to get edge status:\n%s"),
+            LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
                           e.response)
             status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
             try:
@@ -160,13 +161,13 @@ class EdgeApplianceDriver(object):
             self.vcns.update_interface(edge_id, config)
         except exceptions.VcnsApiException as e:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n"
-                                "%(response)s"), {
+                LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
+                                  "%(response)s"), {
                                     'config': config,
                                     'response': e.response})
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: Failed to update vnic %d"),
+                LOG.exception(_LE("VCNS: Failed to update vnic %d"),
                               config['index'])
 
         return constants.TaskStatus.COMPLETED
@@ -217,7 +218,7 @@ class EdgeApplianceDriver(object):
             status = constants.TaskStatus.PENDING
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: deploy edge failed for router %s."),
+                LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
                               name)
 
         return status
@@ -236,21 +237,20 @@ class EdgeApplianceDriver(object):
                 status = constants.TaskStatus.ERROR
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
+                LOG.exception(_LE("VCNS: Edge %s status query failed."),
+                              edge_id)
         except Exception:
             retries = task.userdata.get('retries', 0) + 1
             if retries < 3:
                 task.userdata['retries'] = retries
-                msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
-                        "Retry %(retries)d.") % {
-                            'edge_id': edge_id,
-                            'retries': retries}
-                LOG.exception(msg)
+                LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
+                                  "status. Retry %(retries)d."),
+                              {'edge_id': edge_id,
+                               'retries': retries})
                 status = constants.TaskStatus.PENDING
             else:
-                msg = _("VCNS: Unable to retrieve edge %s status. "
-                        "Abort.") % edge_id
-                LOG.exception(msg)
+                LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
+                                 "Abort."), edge_id)
                 status = constants.TaskStatus.ERROR
         LOG.debug("VCNS: Edge %s status", edge_id)
         return status
@@ -259,8 +259,8 @@ class EdgeApplianceDriver(object):
         router_name = task.userdata['router_name']
         edge_id = task.userdata.get('edge_id')
         if task.status != constants.TaskStatus.COMPLETED:
-            LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s "
-                        "for %(name)s, status %(status)d"), {
+            LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
+                          "for %(name)s, status %(status)d"), {
                             'edge_id': edge_id,
                             'name': router_name,
                             'status': task.status
@@ -281,13 +281,12 @@ class EdgeApplianceDriver(object):
             except exceptions.ResourceNotFound:
                 pass
             except exceptions.VcnsApiException as e:
-                msg = _("VCNS: Failed to delete %(edge_id)s:\n"
-                        "%(response)s") % {
-                            'edge_id': edge_id, 'response': e.response}
-                LOG.exception(msg)
+                LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
+                                  "%(response)s"),
+                              {'edge_id': edge_id, 'response': e.response})
                 status = constants.TaskStatus.ERROR
             except Exception:
-                LOG.exception(_("VCNS: Failed to delete %s"), edge_id)
+                LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
                 status = constants.TaskStatus.ERROR
 
         return status
@@ -297,7 +296,8 @@ class EdgeApplianceDriver(object):
             return self.vcns.get_edges()[1]
         except exceptions.VcnsApiException as e:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
+                LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
+                              e.response)
 
     def deploy_edge(self, router_id, name, internal_network, jobdata=None,
                     wait_for_exec=False, loadbalancer_enable=True):
@@ -375,7 +375,7 @@ class EdgeApplianceDriver(object):
             return self.vcns.get_nat_config(edge_id)[1]
         except exceptions.VcnsApiException as e:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
+                LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
                               e.response)
 
     def _create_nat_rule(self, task):
@@ -398,7 +398,7 @@ class EdgeApplianceDriver(object):
             self.vcns.update_nat_config(edge_id, nat)
             status = constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
-            LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
+            LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
                           e.response)
             status = constants.TaskStatus.ERROR
 
@@ -440,8 +440,8 @@ class EdgeApplianceDriver(object):
                 try:
                     self.vcns.delete_nat_rule(edge_id, rule_id)
                 except exceptions.VcnsApiException as e:
-                    LOG.exception(_("VCNS: Failed to delete snat rule:\n"
-                                    "%s"), e.response)
+                    LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
+                                      "%s"), e.response)
                     status = constants.TaskStatus.ERROR
 
         return status
@@ -523,7 +523,7 @@ class EdgeApplianceDriver(object):
             self.vcns.update_nat_config(edge_id, nat)
             status = constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
-            LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
+            LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
                           e.response)
             status = constants.TaskStatus.ERROR
 
@@ -593,7 +593,7 @@ class EdgeApplianceDriver(object):
             self.vcns.update_routes(edge_id, request)
             status = constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
-            LOG.exception(_("VCNS: Failed to update routes:\n%s"),
+            LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
                           e.response)
             status = constants.TaskStatus.ERROR
 
@@ -645,7 +645,7 @@ class EdgeApplianceDriver(object):
                 edge_id)
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get service config"))
+                LOG.exception(_LE("Failed to get service config"))
         return response
 
     def enable_service_loadbalancer(self, edge_id):
@@ -657,5 +657,5 @@ class EdgeApplianceDriver(object):
             self.vcns.enable_service_loadbalancer(edge_id, config)
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to enable loadbalancer "
-                                "service config"))
+                LOG.exception(_LE("Failed to enable loadbalancer "
+                                  "service config"))
index ad998516841b213536390321e0cf8232cc03c7c4..6a6843fa98a7cca647871c5835426d7d41c9e608 100644 (file)
@@ -15,6 +15,7 @@
 from oslo.utils import excutils
 
 from neutron.db import db_base_plugin_v2
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.common import constants
 from neutron.plugins.vmware.dbexts import vcns_db
@@ -186,8 +187,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             return self.vcns.get_firewall(edge_id)[1]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get firewall with edge "
-                                "id: %s"), edge_id)
+                LOG.exception(_LE("Failed to get firewall with edge "
+                                  "id: %s"), edge_id)
 
     def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
         # Return the firewall rule below 'rule_vseid'
@@ -214,8 +215,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                 edge_id, vcns_rule_id)[1]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get firewall rule: %(rule_id)s "
-                                "with edge_id: %(edge_id)s"), {
+                LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
+                                  "with edge_id: %(edge_id)s"), {
                                     'rule_id': id,
                                     'edge_id': edge_id})
         return self._restore_firewall_rule(context, edge_id, response)
@@ -230,8 +231,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             self.vcns.update_firewall(edge_id, fw_req)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update firewall "
-                                "with edge_id: %s"), edge_id)
+                LOG.exception(_LE("Failed to update firewall "
+                                  "with edge_id: %s"), edge_id)
         fw_res = self._get_firewall(context, edge_id)
         vcns_db.cleanup_vcns_edge_firewallrule_binding(
             context.session, edge_id)
@@ -242,8 +243,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             self.vcns.delete_firewall(edge_id)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete firewall "
-                                "with edge_id:%s"), edge_id)
+                LOG.exception(_LE("Failed to delete firewall "
+                                  "with edge_id:%s"), edge_id)
         vcns_db.cleanup_vcns_edge_firewallrule_binding(
             context.session, edge_id)
 
@@ -256,8 +257,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update firewall rule: %(rule_id)s "
-                                "with edge_id: %(edge_id)s"),
+                LOG.exception(_LE("Failed to update firewall rule: "
+                                  "%(rule_id)s with edge_id: %(edge_id)s"),
                               {'rule_id': id,
                                'edge_id': edge_id})
 
@@ -269,8 +270,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             self.vcns.delete_firewall_rule(edge_id, vcns_rule_id)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete firewall rule: %(rule_id)s "
-                                "with edge_id: %(edge_id)s"),
+                LOG.exception(_LE("Failed to delete firewall rule: "
+                                  "%(rule_id)s with edge_id: %(edge_id)s"),
                               {'rule_id': id,
                                'edge_id': edge_id})
         vcns_db.delete_vcns_edge_firewallrule_binding(
@@ -286,8 +287,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                 edge_id, ref_vcns_rule_id, fwr_req)[0]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to add firewall rule above: "
-                                "%(rule_id)s with edge_id: %(edge_id)s"),
+                LOG.exception(_LE("Failed to add firewall rule above: "
+                                  "%(rule_id)s with edge_id: %(edge_id)s"),
                               {'rule_id': ref_vcns_rule_id,
                                'edge_id': edge_id})
 
@@ -314,8 +315,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                     edge_id, int(ref_vcns_rule_id), fwr_req)[0]
             except vcns_exc.VcnsApiException:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Failed to add firewall rule above: "
-                                    "%(rule_id)s with edge_id: %(edge_id)s"),
+                    LOG.exception(_LE("Failed to add firewall rule above: "
+                                      "%(rule_id)s with edge_id: %(edge_id)s"),
                                   {'rule_id': ref_vcns_rule_id,
                                    'edge_id': edge_id})
         else:
@@ -325,8 +326,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                     edge_id, fwr_req)[0]
             except vcns_exc.VcnsApiException:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Failed to append a firewall rule"
-                                    "with edge_id: %s"), edge_id)
+                    LOG.exception(_LE("Failed to append a firewall rule"
+                                      "with edge_id: %s"), edge_id)
 
         objuri = header['location']
         fwr_vseid = objuri[objuri.rfind("/") + 1:]
index 6ae188910b56aec38e7520994529a318096db426..4b0993fdb40e32ecd581072c1c75beeea265b990 100644 (file)
@@ -14,6 +14,7 @@
 
 from oslo.utils import excutils
 
+from neutron.i18n import _LE, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.vshield.common import (
     exceptions as vcns_exc)
@@ -62,9 +63,9 @@ class EdgeIPsecVpnDriver():
             ikepolicy['encryption_algorithm'] != ipsecpolicy[
                 'encryption_algorithm'] or
             ikepolicy['pfs'] != ipsecpolicy['pfs']):
-            msg = _("IKEPolicy and IPsecPolicy should have consistent "
-                    "auth_algorithm, encryption_algorithm and pfs for VSE!")
-            LOG.warning(msg)
+            LOG.warning(_LW(
+                "IKEPolicy and IPsecPolicy should have consistent "
+                "auth_algorithm, encryption_algorithm and pfs for VSE!"))
 
         # Check whether encryption_algorithm is allowed.
         encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
@@ -134,18 +135,19 @@ class EdgeIPsecVpnDriver():
             self.vcns.update_ipsec_config(edge_id, ipsec_config)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update ipsec vpn configuration "
-                                "with edge_id: %s"), edge_id)
+                LOG.exception(_LE("Failed to update ipsec vpn "
+                                  "configuration with edge_id: %s"),
+                              edge_id)
 
     def delete_ipsec_config(self, edge_id):
         try:
             self.vcns.delete_ipsec_config(edge_id)
         except vcns_exc.ResourceNotFound:
-            LOG.warning(_("IPsec config not found on edge: %s"), edge_id)
+            LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete ipsec vpn configuration "
-                                "with edge_id: %s"), edge_id)
+                LOG.exception(_LE("Failed to delete ipsec vpn configuration "
+                                  "with edge_id: %s"), edge_id)
 
     def get_ipsec_config(self, edge_id):
         return self.vcns.get_ipsec_config(edge_id)
index 1b3ce55bd37179ce35a0faaf2071ac2e60aec598..d1ce2c9df70ddf5a4c689692561f120b401d2dd8 100644 (file)
@@ -14,6 +14,7 @@
 
 from oslo.utils import excutils
 
+from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.dbexts import vcns_db
 from neutron.plugins.vmware.vshield.common import (
@@ -176,7 +177,7 @@ class EdgeLbDriver():
                 edge_id, app_profile)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create app profile on edge: %s"),
+                LOG.exception(_LE("Failed to create app profile on edge: %s"),
                               edge_id)
         objuri = header['location']
         app_profileid = objuri[objuri.rfind("/") + 1:]
@@ -187,7 +188,7 @@ class EdgeLbDriver():
                 edge_id, vip_new)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create vip on vshield edge: %s"),
+                LOG.exception(_LE("Failed to create vip on vshield edge: %s"),
                               edge_id)
                 self.vcns.delete_app_profile(edge_id, app_profileid)
         objuri = header['location']
@@ -222,7 +223,7 @@ class EdgeLbDriver():
             response = self.vcns.get_vip(edge_id, vip_vseid)[1]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get vip on edge"))
+                LOG.exception(_LE("Failed to get vip on edge"))
         return self._restore_lb_vip(context, edge_id, response)
 
     def update_vip(self, context, vip, session_persistence_update=True):
@@ -239,15 +240,15 @@ class EdgeLbDriver():
                     edge_id, app_profileid, app_profile)
             except vcns_exc.VcnsApiException:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Failed to update app profile on "
-                                    "edge: %s") % edge_id)
+                    LOG.exception(_LE("Failed to update app profile on "
+                                      "edge: %s"), edge_id)
 
         vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
         try:
             self.vcns.update_vip(edge_id, vip_vseid, vip_new)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update vip on edge: %s") % edge_id)
+                LOG.exception(_LE("Failed to update vip on edge: %s"), edge_id)
 
     def delete_vip(self, context, id):
         vip_binding = self._get_vip_binding(context.session, id)
@@ -258,18 +259,18 @@ class EdgeLbDriver():
         try:
             self.vcns.delete_vip(edge_id, vip_vseid)
         except vcns_exc.ResourceNotFound:
-            LOG.exception(_("vip not found on edge: %s") % edge_id)
+            LOG.exception(_LE("vip not found on edge: %s"), edge_id)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete vip on edge: %s") % edge_id)
+                LOG.exception(_LE("Failed to delete vip on edge: %s"), edge_id)
 
         try:
             self.vcns.delete_app_profile(edge_id, app_profileid)
         except vcns_exc.ResourceNotFound:
-            LOG.exception(_("app profile not found on edge: %s") % edge_id)
+            LOG.exception(_LE("app profile not found on edge: %s"), edge_id)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete app profile on edge: %s") %
+                LOG.exception(_LE("Failed to delete app profile on edge: %s"),
                               edge_id)
 
         vcns_db.delete_vcns_edge_vip_binding(context.session, id)
@@ -280,7 +281,7 @@ class EdgeLbDriver():
             header = self.vcns.create_pool(edge_id, pool_new)[0]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create pool"))
+                LOG.exception(_LE("Failed to create pool"))
 
         objuri = header['location']
         pool_vseid = objuri[objuri.rfind("/") + 1:]
@@ -307,7 +308,7 @@ class EdgeLbDriver():
             response = self.vcns.get_pool(edge_id, pool_vseid)[1]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get pool on edge"))
+                LOG.exception(_LE("Failed to get pool on edge"))
         return self._restore_lb_pool(context, edge_id, response)
 
     def update_pool(self, context, edge_id, pool, members):
@@ -319,7 +320,7 @@ class EdgeLbDriver():
             self.vcns.update_pool(edge_id, pool_vseid, pool_new)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update pool"))
+                LOG.exception(_LE("Failed to update pool"))
 
     def delete_pool(self, context, id, edge_id):
         pool_binding = vcns_db.get_vcns_edge_pool_binding(
@@ -329,7 +330,7 @@ class EdgeLbDriver():
             self.vcns.delete_pool(edge_id, pool_vseid)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete pool"))
+                LOG.exception(_LE("Failed to delete pool"))
         vcns_db.delete_vcns_edge_pool_binding(
             context.session, id, edge_id)
 
@@ -339,7 +340,7 @@ class EdgeLbDriver():
             header = self.vcns.create_health_monitor(edge_id, monitor_new)[0]
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to create monitor on edge: %s"),
+                LOG.exception(_LE("Failed to create monitor on edge: %s"),
                               edge_id)
 
         objuri = header['location']
@@ -367,7 +368,7 @@ class EdgeLbDriver():
             response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1]
         except vcns_exc.VcnsApiException as e:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to get monitor on edge: %s"),
+                LOG.exception(_LE("Failed to get monitor on edge: %s"),
                               e.response)
         return self._restore_lb_monitor(context, edge_id, response)
 
@@ -384,7 +385,7 @@ class EdgeLbDriver():
                 edge_id, monitor_vseid, monitor_new)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to update monitor on edge: %s"),
+                LOG.exception(_LE("Failed to update monitor on edge: %s"),
                               edge_id)
 
     def delete_health_monitor(self, context, id, edge_id):
@@ -395,6 +396,6 @@ class EdgeLbDriver():
             self.vcns.delete_health_monitor(edge_id, monitor_vseid)
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to delete monitor"))
+                LOG.exception(_LE("Failed to delete monitor"))
         vcns_db.delete_vcns_edge_monitor_binding(
             context.session, id, edge_id)
index 424736f71e6aaad34b29dafad8ded271bb2c3142..3f9ac23680669f0f69d0d3960fb51ab68745a7c2 100644 (file)
@@ -20,6 +20,7 @@ from eventlet import event
 from eventlet import greenthread
 
 from neutron.common import exceptions
+from neutron.i18n import _LE, _LI
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import loopingcall
 from neutron.plugins.vmware.vshield.tasks import constants
@@ -93,12 +94,11 @@ class Task():
             try:
                 func(self)
             except Exception:
-                msg = _("Task %(task)s encountered exception in %(func)s "
-                        "at state %(state)s") % {
-                            'task': str(self),
-                            'func': str(func),
-                            'state': state}
-                LOG.exception(msg)
+                LOG.exception(_LE("Task %(task)s encountered exception in "
+                                  "%(func)s at state %(state)s"),
+                              {'task': str(self),
+                               'func': str(func),
+                               'state': state})
 
         self._move_state(state)
 
@@ -179,16 +179,14 @@ class TaskManager():
 
     def _execute(self, task):
         """Execute task."""
-        msg = _("Start task %s") % str(task)
-        LOG.debug(msg)
+        LOG.debug("Start task %s", str(task))
         task._start()
         try:
             status = task._execute_callback(task)
         except Exception:
-            msg = _("Task %(task)s encountered exception in %(cb)s") % {
-                'task': str(task),
-                'cb': str(task._execute_callback)}
-            LOG.exception(msg)
+            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+                          {'task': str(task),
+                           'cb': str(task._execute_callback)})
             status = constants.TaskStatus.ERROR
 
         LOG.debug("Task %(task)s return %(status)s", {
@@ -205,10 +203,9 @@ class TaskManager():
         try:
             task._result_callback(task)
         except Exception:
-            msg = _("Task %(task)s encountered exception in %(cb)s") % {
-                'task': str(task),
-                'cb': str(task._result_callback)}
-            LOG.exception(msg)
+            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+                          {'task': str(task),
+                           'cb': str(task._result_callback)})
 
         LOG.debug("Task %(task)s return %(status)s",
                   {'task': str(task), 'status': task.status})
@@ -228,10 +225,10 @@ class TaskManager():
             try:
                 status = task._status_callback(task)
             except Exception:
-                msg = _("Task %(task)s encountered exception in %(cb)s") % {
-                    'task': str(task),
-                    'cb': str(task._status_callback)}
-                LOG.exception(msg)
+                LOG.exception(_LE("Task %(task)s encountered exception in "
+                                  "%(cb)s"),
+                              {'task': str(task),
+                               'cb': str(task._status_callback)})
                 status = constants.TaskStatus.ERROR
             task._update_status(status)
             if status != constants.TaskStatus.PENDING:
@@ -293,7 +290,7 @@ class TaskManager():
                 if self._stopped:
                     # Gracefully terminate this thread if the _stopped
                     # attribute was set to true
-                    LOG.info(_("Stopping TaskManager"))
+                    LOG.info(_LI("Stopping TaskManager"))
                     break
 
                 # get a task from queue, or timeout for periodic status check
@@ -318,8 +315,8 @@ class TaskManager():
                     else:
                         self._enqueue(task)
             except Exception:
-                LOG.exception(_("TaskManager terminating because "
-                                "of an exception"))
+                LOG.exception(_LE("TaskManager terminating because "
+                                  "of an exception"))
                 break
 
     def add(self, task):
@@ -340,7 +337,7 @@ class TaskManager():
         if self._monitor_busy:
             self._monitor.wait()
         self._abort()
-        LOG.info(_("TaskManager terminated"))
+        LOG.info(_LI("TaskManager terminated"))
 
     def has_pending_task(self):
         if self._tasks_queue or self._tasks or self._main_thread_exec_task:
@@ -372,7 +369,7 @@ class TaskManager():
             try:
                 self._check_pending_tasks()
             except Exception:
-                LOG.exception(_("Exception in _check_pending_tasks"))
+                LOG.exception(_LE("Exception in _check_pending_tasks"))
             self._monitor_busy = False
 
         if self._thread is not None: